diff --git a/pkg/cmd/roachtest/tests/copyfrom.go b/pkg/cmd/roachtest/tests/copyfrom.go index 5b2a478dc318..64e1ad0608fe 100644 --- a/pkg/cmd/roachtest/tests/copyfrom.go +++ b/pkg/cmd/roachtest/tests/copyfrom.go @@ -76,10 +76,10 @@ func initTest(ctx context.Context, t test.Test, c cluster.Cluster, sf int) { ); err != nil { t.Fatal(err) } - csv := fmt.Sprintf(tpchLineitemFmt, sf) - c.Run(ctx, c.Node(1), "rm -f /tmp/lineitem-table.csv") - c.Run(ctx, c.Node(1), fmt.Sprintf("curl '%s' -o /tmp/lineitem-table.csv", csv)) } + csv := fmt.Sprintf(tpchLineitemFmt, sf) + c.Run(ctx, c.Node(1), "rm -f /tmp/lineitem-table.csv") + c.Run(ctx, c.Node(1), fmt.Sprintf("curl '%s' -o /tmp/lineitem-table.csv", csv)) } func runTest(ctx context.Context, t test.Test, c cluster.Cluster, pg string) { @@ -121,10 +121,15 @@ func runCopyFromPG(ctx context.Context, t test.Test, c cluster.Cluster, sf int) runTest(ctx, t, c, "sudo -i -u postgres psql") } -func runCopyFromCRDB(ctx context.Context, t test.Test, c cluster.Cluster, sf int) { +func runCopyFromCRDB(ctx context.Context, t test.Test, c cluster.Cluster, sf int, atomic bool) { c.Put(ctx, t.Cockroach(), "./cockroach", c.All()) c.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings(), c.All()) initTest(ctx, t, c, sf) + db, err := c.ConnE(ctx, t.L(), 1) + require.NoError(t, err) + stmt := fmt.Sprintf("ALTER ROLE ALL SET copy_from_atomic_enabled = %t", atomic) + _, err = db.ExecContext(ctx, stmt) + require.NoError(t, err) urls, err := c.InternalPGUrl(ctx, t.L(), c.Node(1)) require.NoError(t, err) m := c.NewMonitor(ctx, c.All()) @@ -150,11 +155,19 @@ func registerCopyFrom(r registry.Registry) { for _, tc := range testcases { tc := tc r.Add(registry.TestSpec{ - Name: fmt.Sprintf("copyfrom/crdb/sf=%d/nodes=%d", tc.sf, tc.nodes), + Name: fmt.Sprintf("copyfrom/crdb-atomic/sf=%d/nodes=%d", tc.sf, tc.nodes), + Owner: registry.OwnerKV, + Cluster: r.MakeClusterSpec(tc.nodes), + Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { + runCopyFromCRDB(ctx, t, c, tc.sf, true /*atomic*/) + }, + }) + r.Add(registry.TestSpec{ + Name: fmt.Sprintf("copyfrom/crdb-nonatomic/sf=%d/nodes=%d", tc.sf, tc.nodes), Owner: registry.OwnerKV, Cluster: r.MakeClusterSpec(tc.nodes), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { - runCopyFromCRDB(ctx, t, c, tc.sf) + runCopyFromCRDB(ctx, t, c, tc.sf, false /*atomic*/) }, }) r.Add(registry.TestSpec{ diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go index c5797c07dd0a..8d3b6437d2ea 100644 --- a/pkg/sql/conn_executor.go +++ b/pkg/sql/conn_executor.go @@ -2839,31 +2839,8 @@ func (ex *connExecutor) initPlanner(ctx context.Context, p *planner) { func (ex *connExecutor) resetPlanner( ctx context.Context, p *planner, txn *kv.Txn, stmtTS time.Time, ) { - p.txn = txn - p.stmt = Statement{} - p.instrumentation = instrumentationHelper{} - - p.cancelChecker.Reset(ctx) - - p.semaCtx = tree.MakeSemaContext() - p.semaCtx.SearchPath = &ex.sessionData().SearchPath - p.semaCtx.Annotations = nil - p.semaCtx.TypeResolver = p - p.semaCtx.FunctionResolver = p - p.semaCtx.TableNameResolver = p - p.semaCtx.DateStyle = ex.sessionData().GetDateStyle() - p.semaCtx.IntervalStyle = ex.sessionData().GetIntervalStyle() - + p.resetPlanner(ctx, txn, stmtTS, ex.sessionData()) ex.resetEvalCtx(&p.extendedEvalCtx, txn, stmtTS) - - p.autoCommit = false - p.isPreparing = false - - p.schemaResolver.txn = txn - p.schemaResolver.sessionDataStack = p.EvalContext().SessionDataStack - p.evalCatalogBuiltins.Init(p.execCfg.Codec, txn, p.Descriptors()) - p.skipDescriptorCache = false - p.typeResolutionDbID = descpb.InvalidID } // txnStateTransitionsApplyWrapper is a wrapper on top of Machine built with the diff --git a/pkg/sql/copy.go b/pkg/sql/copy.go index 6fca27db980d..6c68436d2b11 100644 --- a/pkg/sql/copy.go +++ b/pkg/sql/copy.go @@ -121,9 +121,14 @@ type copyMachine struct { // other things that statements more generally need. parsingEvalCtx *eval.Context - processRows func(ctx context.Context) error + processRows func(ctx context.Context, finalBatch bool) error scratchRow []tree.Datum + + // For testing we want to be able to override this on the instance level. + copyBatchRowSize int + + implicitTxn bool } // newCopyMachine creates a new copyMachine. @@ -147,11 +152,11 @@ func newCopyMachine( csvExpectHeader: n.Options.Header, p: p, execInsertPlan: execInsertPlan, + implicitTxn: txnOpt.txn == nil, } - // We need a planner to do the initial planning, in addition // to those used for the main execution of the COPY afterwards. - cleanup := c.p.preparePlannerForCopy(ctx, txnOpt) + cleanup := c.p.preparePlannerForCopy(ctx, &c.txnOpt, false /* finalBatch */, c.implicitTxn) defer func() { retErr = cleanup(ctx, retErr) }() @@ -283,6 +288,7 @@ func (c *copyMachine) initMonitoring(ctx context.Context, parentMon *mon.BytesMo c.copyMon.StartNoReserved(ctx, parentMon) c.bufMemAcc = c.copyMon.MakeBoundAccount() c.rowsMemAcc = c.copyMon.MakeBoundAccount() + c.copyBatchRowSize = copyBatchRowSize } // copyTxnOpt contains information about the transaction in which the copying @@ -456,10 +462,10 @@ func (c *copyMachine) processCopyData(ctx context.Context, data string, final bo } } // Only do work if we have a full batch of rows or this is the end. - if ln := c.rows.Len(); !final && (ln == 0 || ln < copyBatchRowSize) { + if ln := c.rows.Len(); !final && (ln == 0 || ln < c.copyBatchRowSize) { return nil } - return c.processRows(ctx) + return c.processRows(ctx, final) } func (c *copyMachine) readTextData(ctx context.Context, final bool) (brk bool, err error) { @@ -582,8 +588,7 @@ func (c *copyMachine) readCSVTuple(ctx context.Context, record []csv.Record) err datums[i] = d } - _, err := c.rows.AddRow(ctx, datums) - if err != nil { + if _, err := c.rows.AddRow(ctx, datums); err != nil { return err } return nil @@ -715,12 +720,12 @@ func (c *copyMachine) readBinarySignature() ([]byte, error) { // an error. If an error is passed in to the cleanup function, the // same error is returned. func (p *planner) preparePlannerForCopy( - ctx context.Context, txnOpt copyTxnOpt, + ctx context.Context, txnOpt *copyTxnOpt, finalBatch bool, implicitTxn bool, ) func(context.Context, error) error { txn := txnOpt.txn txnTs := txnOpt.txnTimestamp stmtTs := txnOpt.stmtTimestamp - autoCommit := false + autoCommit := finalBatch && implicitTxn if txn == nil { nodeID, _ := p.execCfg.NodeInfo.NodeID.OptionalNodeID() // The session data stack in the planner is not set up at this point, so use @@ -728,9 +733,21 @@ func (p *planner) preparePlannerForCopy( txn = kv.NewTxnWithSteppingEnabled(ctx, p.execCfg.DB, nodeID, sessiondatapb.Normal) txnTs = p.execCfg.Clock.PhysicalTime() stmtTs = txnTs - autoCommit = true + } txnOpt.resetPlanner(ctx, p, txn, txnTs, stmtTs) + if implicitTxn { + // For atomic implicit COPY remember txn for next time so we don't start a new one. + if p.SessionData().CopyFromAtomicEnabled { + txnOpt.txn = txn + txnOpt.txnTimestamp = txnTs + txnOpt.stmtTimestamp = txnTs + autoCommit = finalBatch + } else { + // We're doing original behavior of committing each batch. + autoCommit = true + } + } p.autoCommit = autoCommit return func(ctx context.Context, prevErr error) (err error) { @@ -754,14 +771,14 @@ func (p *planner) preparePlannerForCopy( } // insertRows transforms the buffered rows into an insertNode and executes it. -func (c *copyMachine) insertRows(ctx context.Context) (retErr error) { - if c.rows.Len() == 0 { - return nil - } - cleanup := c.p.preparePlannerForCopy(ctx, c.txnOpt) +func (c *copyMachine) insertRows(ctx context.Context, finalBatch bool) (retErr error) { + cleanup := c.p.preparePlannerForCopy(ctx, &c.txnOpt, finalBatch, c.implicitTxn) defer func() { retErr = cleanup(ctx, retErr) }() + if c.rows.Len() == 0 { + return nil + } numRows := c.rows.Len() copyFastPath := c.p.SessionData().CopyFastPathEnabled diff --git a/pkg/sql/copy_file_upload.go b/pkg/sql/copy_file_upload.go index cb7a525973fe..edc7b855fbd1 100644 --- a/pkg/sql/copy_file_upload.go +++ b/pkg/sql/copy_file_upload.go @@ -98,7 +98,7 @@ func newFileUploadMachine( // We need a planner to do the initial planning, even if a planner // is not required after that. - cleanup := c.p.preparePlannerForCopy(ctx, txnOpt) + cleanup := c.p.preparePlannerForCopy(ctx, &txnOpt, false /* finalBatch */, c.implicitTxn) defer func() { retErr = cleanup(ctx, retErr) }() @@ -191,7 +191,7 @@ func (f *fileUploadMachine) run(ctx context.Context) error { return err } -func (f *fileUploadMachine) writeFile(ctx context.Context) error { +func (f *fileUploadMachine) writeFile(ctx context.Context, finalBatch bool) error { for i := 0; i < f.c.rows.Len(); i++ { r := f.c.rows.At(i) b := []byte(*r[0].(*tree.DBytes)) diff --git a/pkg/sql/copy_from_test.go b/pkg/sql/copy_from_test.go index b5ce15d4c827..87f5b774c6fe 100644 --- a/pkg/sql/copy_from_test.go +++ b/pkg/sql/copy_from_test.go @@ -17,7 +17,9 @@ import ( "sync" "testing" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -59,6 +61,46 @@ var lineitemSchema string = `CREATE DATABASE c; CREATE TABLE c.lineitem ( const csvData = `%d|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|egular courts above the ` +func doCopyEx( + ctx context.Context, + t require.TestingT, + s serverutils.TestServerInterface, + txn *kv.Txn, + rows []string, + batchSizeOverride int, + atomic bool, +) { + numrows, err := sql.RunCopyFrom(ctx, s, "c", nil /* txn */, "COPY lineitem FROM STDIN WITH CSV DELIMITER '|';", rows, batchSizeOverride, atomic) + require.NoError(t, err) + require.Equal(t, len(rows), numrows) +} + +func doCopyImplicit( + ctx context.Context, t require.TestingT, s serverutils.TestServerInterface, rows []string, +) { + doCopyEx(ctx, t, s, nil, rows, 0, true) +} + +func doCopyWithTxn( + ctx context.Context, + t require.TestingT, + s serverutils.TestServerInterface, + txn *kv.Txn, + rows []string, +) { + doCopyEx(ctx, t, s, txn, rows, 0, true) +} + +func doCopyOneRowBatches( + ctx context.Context, + t require.TestingT, + s serverutils.TestServerInterface, + rows []string, + atomic bool, +) { + doCopyEx(ctx, t, s, nil, rows, 1, atomic) +} + // TestCopyFrom is a simple test to verify RunCopyFrom works for benchmarking // purposes. func TestCopyFrom(t *testing.T) { @@ -74,16 +116,128 @@ func TestCopyFrom(t *testing.T) { r := sqlutils.MakeSQLRunner(conn) r.Exec(t, lineitemSchema) rows := []string{fmt.Sprintf(csvData, 1), fmt.Sprintf(csvData, 2)} - numrows, err := sql.RunCopyFrom(ctx, s, "c", nil, "COPY lineitem FROM STDIN WITH CSV DELIMITER '|';", rows) - require.Equal(t, 2, numrows) - require.NoError(t, err) + doCopyImplicit(ctx, t, s, rows) + + partKey := 0 + r.QueryRow(t, "SELECT l_partkey FROM c.lineitem WHERE l_orderkey = 1").Scan(&partKey) + require.Equal(t, 155190, partKey) +} + +// TestCopyFromExplicitTransaction tests that copy from rows are written with +// same transaction timestamp. +func TestCopyFromExplicitTransaction(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + + s, conn, db := serverutils.StartServer(t, base.TestServerArgs{ + Settings: cluster.MakeTestingClusterSettings(), + }) + defer s.Stopper().Stop(ctx) + + r := sqlutils.MakeSQLRunner(conn) + r.Exec(t, lineitemSchema) + rows := []string{fmt.Sprintf(csvData, 1), fmt.Sprintf(csvData, 2)} + txn := db.NewTxn(ctx, "test") + doCopyWithTxn(ctx, t, s, txn, rows) + if err := txn.Commit(ctx); err != nil { + require.NoError(t, err) + } + partKey := 0 + r.QueryRow(t, "SELECT l_partkey FROM c.lineitem WHERE l_orderkey = 1").Scan(&partKey) + require.Equal(t, 155190, partKey) + + sqlRows := r.Query(t, "SELECT crdb_internal_mvcc_timestamp FROM c.lineitem") + var lastts float64 + firstTime := true + for sqlRows.Next() { + var ts float64 + err := sqlRows.Scan(&ts) + require.NoError(t, err) + if !firstTime { + require.EqualValues(t, lastts, ts) + } else { + firstTime = false + } + lastts = ts + } +} + +// TestCopyFromImplicitAtomicTransaction tests that copy from rows are +// not committed in batches (22.2 default behavior). +func TestCopyFromImplicitAtomicTransaction(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + + s, conn, _ := serverutils.StartServer(t, base.TestServerArgs{ + Settings: cluster.MakeTestingClusterSettings(), + }) + defer s.Stopper().Stop(ctx) + + r := sqlutils.MakeSQLRunner(conn) + r.Exec(t, lineitemSchema) + rows := []string{fmt.Sprintf(csvData, 1), fmt.Sprintf(csvData, 2)} + doCopyOneRowBatches(ctx, t, s, rows, true /* atomic */) + + partKey := 0 + r.QueryRow(t, "SELECT l_partkey FROM c.lineitem WHERE l_orderkey = 1").Scan(&partKey) + require.Equal(t, 155190, partKey) + + sqlRows := r.Query(t, "SELECT crdb_internal_mvcc_timestamp FROM c.lineitem") + var lastts apd.Decimal + firstTime := true + for sqlRows.Next() { + var ts apd.Decimal + err := sqlRows.Scan(&ts) + require.NoError(t, err) + if !firstTime { + require.EqualValues(t, lastts, ts) + } else { + firstTime = false + } + lastts = ts + } +} + +// TestCopyFromImplicitNonAtomicTransaction tests that copy from rows are +// committed in batches (pre-22.2 default behavior). +func TestCopyFromImplicitNonAtomicTransaction(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + + s, conn, _ := serverutils.StartServer(t, base.TestServerArgs{ + Settings: cluster.MakeTestingClusterSettings(), + }) + defer s.Stopper().Stop(ctx) + + r := sqlutils.MakeSQLRunner(conn) + r.Exec(t, lineitemSchema) + rows := []string{fmt.Sprintf(csvData, 1), fmt.Sprintf(csvData, 2)} + doCopyOneRowBatches(ctx, t, s, rows, false /* atomic */) partKey := 0 r.QueryRow(t, "SELECT l_partkey FROM c.lineitem WHERE l_orderkey = 1").Scan(&partKey) require.Equal(t, 155190, partKey) + + sqlRows := r.Query(t, "SELECT crdb_internal_mvcc_timestamp FROM c.lineitem") + var lastts apd.Decimal + firstTime := true + for sqlRows.Next() { + var ts apd.Decimal + err := sqlRows.Scan(&ts) + require.NoError(t, err) + if !firstTime { + require.NotEqualValues(t, lastts, ts) + } else { + firstTime = false + } + lastts = ts + } } -// BenchmarkCopy measures copy performance against a TestServer. +// BenchmarkCopyFrom measures copy performance against a TestServer. func BenchmarkCopyFrom(b *testing.B) { defer leaktest.AfterTest(b)() defer log.Scope(b).Close(b) @@ -113,9 +267,7 @@ func BenchmarkCopyFrom(b *testing.B) { actualRows := rows[:batchSize] for i := 0; i < b.N; i++ { pprof.Do(ctx, pprof.Labels("run", "copy"), func(ctx context.Context) { - rowcount, err := sql.RunCopyFrom(ctx, s, "c", nil, "COPY lineitem FROM STDIN WITH CSV DELIMITER '|';", actualRows) - require.NoError(b, err) - require.Equal(b, len(actualRows), rowcount) + doCopyImplicit(ctx, b, s, actualRows) }) b.StopTimer() r.Exec(b, "TRUNCATE TABLE c.lineitem") @@ -126,6 +278,7 @@ func BenchmarkCopyFrom(b *testing.B) { } } +// BenchmarkParallelCopyFrom benchmarks break copy up into separate chunks in separate goroutines. func BenchmarkParallelCopyFrom(b *testing.B) { defer leaktest.AfterTest(b)() defer log.Scope(b).Close(b) @@ -161,9 +314,7 @@ func BenchmarkParallelCopyFrom(b *testing.B) { wg.Add(1) go func(j int) { defer wg.Done() - count, err := sql.RunCopyFrom(ctx, s, "c", nil, "COPY lineitem FROM STDIN WITH CSV DELIMITER '|';", allrows[j]) - require.NoError(b, err) - require.Equal(b, chunk, count) + doCopyImplicit(ctx, b, s, allrows[j]) }(j) } wg.Wait() diff --git a/pkg/sql/copyshim.go b/pkg/sql/copyshim.go index 7c9d0a1e79eb..659e626fbefa 100644 --- a/pkg/sql/copyshim.go +++ b/pkg/sql/copyshim.go @@ -81,6 +81,8 @@ func RunCopyFrom( txn *kv.Txn, copySQL string, data []string, + copyBatchRowSizeOverride int, + atomic bool, ) (int, error) { execCfg := s.ExecutorConfig().(ExecutorConfig) dsp := execCfg.DistSQLPlanner @@ -88,9 +90,6 @@ func RunCopyFrom( if err != nil { return -1, err } - if txn == nil { - txn = s.DB().NewTxn(ctx, "test") - } // TODO(cucaroach): test open transaction and implicit txn, this will require // a real client side/over the wire copy implementation logictest can use. @@ -98,6 +97,7 @@ func RunCopyFrom( txnOpt.resetPlanner = func(ctx context.Context, p *planner, txn *kv.Txn, txnTS time.Time, stmtTS time.Time) { p.cancelChecker.Reset(ctx) p.optPlanningCtx.init(p) + p.resetPlanner(ctx, txn, stmtTS, p.sessionDataMutatorIterator.sds.Top()) } p, cleanup := newInternalPlanner("copytest", txn, @@ -115,6 +115,8 @@ func RunCopyFrom( } defer cleanup() + p.SessionData().CopyFromAtomicEnabled = atomic + // Write what the client side would write into a buffer and then make it the conn's data. var buf []byte for _, d := range data { @@ -145,16 +147,13 @@ func RunCopyFrom( if err != nil { return -1, err } + if copyBatchRowSizeOverride != 0 { + c.copyBatchRowSize = copyBatchRowSizeOverride + } if err := c.run(ctx); err != nil { return -1, err } - if txn != nil { - if err := txn.Commit(ctx); err != nil { - return -1, err - } - } - return rows, nil } diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index 5889431fea55..565cde36463e 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -3333,6 +3333,10 @@ func (m *sessionDataMutator) SetCopyFastPathEnabled(val bool) { m.data.CopyFastPathEnabled = val } +func (m *sessionDataMutator) SetCopyFromAtomicEnabled(val bool) { + m.data.CopyFromAtomicEnabled = val +} + // Utility functions related to scrubbing sensitive information on SQL Stats. // quantizeCounts ensures that the Count field in the diff --git a/pkg/sql/logictest/logic.go b/pkg/sql/logictest/logic.go index f97a4c67c93f..bd3ebd724d26 100644 --- a/pkg/sql/logictest/logic.go +++ b/pkg/sql/logictest/logic.go @@ -2395,7 +2395,7 @@ func (t *logicTest) processSubtest( // sql.DB interface doesn't support COPY so fixing it the right way // that would require major surgery (ie making logictest use libpq // or something low level like that). - rows, err := sql.RunCopyFrom(context.Background(), t.cluster.Server(0), "test", nil, query.sql, []string{data.String()}) + rows, err := sql.RunCopyFrom(context.Background(), t.cluster.Server(0), "test", nil, query.sql, []string{data.String()}, 0 /* rowsPerBatch */, true /* atomic */) result := fmt.Sprintf("%d", rows) if err != nil { if !expectError { diff --git a/pkg/sql/logictest/testdata/logic_test/information_schema b/pkg/sql/logictest/testdata/logic_test/information_schema index 9f873f6f8918..fcd2bfc567d6 100644 --- a/pkg/sql/logictest/testdata/logic_test/information_schema +++ b/pkg/sql/logictest/testdata/logic_test/information_schema @@ -4677,6 +4677,7 @@ bytea_output hex check_function_bodies on client_encoding UTF8 client_min_messages notice +copy_from_atomic_enabled on cost_scans_with_default_col_size off database test datestyle ISO, MDY diff --git a/pkg/sql/logictest/testdata/logic_test/pg_catalog b/pkg/sql/logictest/testdata/logic_test/pg_catalog index 549bea9ccc83..fe49a2501e31 100644 --- a/pkg/sql/logictest/testdata/logic_test/pg_catalog +++ b/pkg/sql/logictest/testdata/logic_test/pg_catalog @@ -4161,6 +4161,7 @@ bytea_output hex NULL check_function_bodies on NULL NULL NULL string client_encoding UTF8 NULL NULL NULL string client_min_messages notice NULL NULL NULL string +copy_from_atomic_enabled on NULL NULL NULL string cost_scans_with_default_col_size off NULL NULL NULL string database test NULL NULL NULL string datestyle ISO, MDY NULL NULL NULL string @@ -4288,6 +4289,7 @@ bytea_output hex NULL check_function_bodies on NULL user NULL on on client_encoding UTF8 NULL user NULL UTF8 UTF8 client_min_messages notice NULL user NULL notice notice +copy_from_atomic_enabled on NULL user NULL on on cost_scans_with_default_col_size off NULL user NULL off off database test NULL user NULL ยท test datestyle ISO, MDY NULL user NULL ISO, MDY ISO, MDY @@ -4410,6 +4412,7 @@ check_function_bodies NULL NULL NULL client_encoding NULL NULL NULL NULL NULL client_min_messages NULL NULL NULL NULL NULL copy_fast_path_enabled NULL NULL NULL NULL NULL +copy_from_atomic_enabled NULL NULL NULL NULL NULL cost_scans_with_default_col_size NULL NULL NULL NULL NULL crdb_version NULL NULL NULL NULL NULL database NULL NULL NULL NULL NULL diff --git a/pkg/sql/logictest/testdata/logic_test/show_source b/pkg/sql/logictest/testdata/logic_test/show_source index 6764f90d3f45..05a0e6e39efe 100644 --- a/pkg/sql/logictest/testdata/logic_test/show_source +++ b/pkg/sql/logictest/testdata/logic_test/show_source @@ -34,6 +34,7 @@ bytea_output hex check_function_bodies on client_encoding UTF8 client_min_messages notice +copy_from_atomic_enabled on cost_scans_with_default_col_size off database test datestyle ISO, MDY diff --git a/pkg/sql/planner.go b/pkg/sql/planner.go index 82aea7da7844..047099cfe9de 100644 --- a/pkg/sql/planner.go +++ b/pkg/sql/planner.go @@ -1011,3 +1011,31 @@ func (p *planner) WithInternalExecutor( ie := initInternalExecutor(ctx, p) return run(ctx, p.Txn(), ie) } + +func (p *planner) resetPlanner( + ctx context.Context, txn *kv.Txn, stmtTS time.Time, sd *sessiondata.SessionData, +) { + p.txn = txn + p.stmt = Statement{} + p.instrumentation = instrumentationHelper{} + + p.cancelChecker.Reset(ctx) + + p.semaCtx = tree.MakeSemaContext() + p.semaCtx.SearchPath = &sd.SearchPath + p.semaCtx.Annotations = nil + p.semaCtx.TypeResolver = p + p.semaCtx.FunctionResolver = p + p.semaCtx.TableNameResolver = p + p.semaCtx.DateStyle = sd.GetDateStyle() + p.semaCtx.IntervalStyle = sd.GetIntervalStyle() + + p.autoCommit = false + p.isPreparing = false + + p.schemaResolver.txn = txn + p.schemaResolver.sessionDataStack = p.EvalContext().SessionDataStack + p.evalCatalogBuiltins.Init(p.execCfg.Codec, txn, p.Descriptors()) + p.skipDescriptorCache = false + p.typeResolutionDbID = descpb.InvalidID +} diff --git a/pkg/sql/sessiondatapb/local_only_session_data.proto b/pkg/sql/sessiondatapb/local_only_session_data.proto index 452f6a8ded52..272cdd5b1d22 100644 --- a/pkg/sql/sessiondatapb/local_only_session_data.proto +++ b/pkg/sql/sessiondatapb/local_only_session_data.proto @@ -281,6 +281,9 @@ message LocalOnlySessionData { // disable_hoist_projection_in_join_limitation disables the restrictions // placed on projection hoisting during query planning in the optimizer. bool disable_hoist_projection_in_join_limitation = 76; + // CopyFromAtomicEnabled controls whether implicit txn copy from operations + // are atomic or segmented. + bool copy_from_atomic_enabled = 77; /////////////////////////////////////////////////////////////////////////// // WARNING: consider whether a session parameter you're adding needs to // diff --git a/pkg/sql/vars.go b/pkg/sql/vars.go index 05ad8edf3e29..aee9958238b8 100644 --- a/pkg/sql/vars.go +++ b/pkg/sql/vars.go @@ -2173,6 +2173,23 @@ var varGen = map[string]sessionVar{ }, GlobalDefault: globalFalse, }, + + // CockroachDB extension. + `copy_from_atomic_enabled`: { + GetStringVal: makePostgresBoolGetStringValFn(`copy_from_atomic_enabled`), + Set: func(_ context.Context, m sessionDataMutator, s string) error { + b, err := paramparse.ParseBoolVar("copy_from_atomic_enabled", s) + if err != nil { + return err + } + m.SetCopyFromAtomicEnabled(b) + return nil + }, + Get: func(evalCtx *extendedEvalContext, _ *kv.Txn) (string, error) { + return formatBoolAsPostgresSetting(evalCtx.SessionData().CopyFromAtomicEnabled), nil + }, + GlobalDefault: globalTrue, + }, } // We want test coverage for this on and off so make it metamorphic. diff --git a/pkg/storage/metamorphic/BUILD.bazel b/pkg/storage/metamorphic/BUILD.bazel index f387ffaeed0f..ed99ed417591 100644 --- a/pkg/storage/metamorphic/BUILD.bazel +++ b/pkg/storage/metamorphic/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "operands.go", "operations.go", "options.go", + "parser.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/storage/metamorphic", visibility = ["//visibility:public"], @@ -26,6 +27,7 @@ go_library( "//pkg/util/uuid", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_pebble//:pebble", + "@com_github_cockroachdb_pebble//bloom", "@com_github_cockroachdb_pebble//vfs", ], ) @@ -36,7 +38,9 @@ go_test( srcs = [ "main_test.go", "meta_test.go", + "parser_test.go", ], + data = glob(["testdata/**"]), embed = [":metamorphic"], shard_count = 16, deps = [ @@ -48,6 +52,7 @@ go_test( "//pkg/util/randutil", "@com_github_cockroachdb_errors//oserror", "@com_github_cockroachdb_pebble//vfs", + "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/storage/metamorphic/generator.go b/pkg/storage/metamorphic/generator.go index 7adacd23ad28..30578dbf40c4 100644 --- a/pkg/storage/metamorphic/generator.go +++ b/pkg/storage/metamorphic/generator.go @@ -124,7 +124,8 @@ func (m *metaTestRunner) init() { var err error m.engine, err = m.engineSeq.configs[0].create(m.path, m.engineFS) - m.printComment(fmt.Sprintf("engine options: %s", m.engineSeq.configs[0].opts.String())) + m.printComment(fmt.Sprintf("name: %s", m.engineSeq.configs[0].name)) + m.printComment(fmt.Sprintf("engine options:\n%s", m.engineSeq.configs[0].opts.String())) if err != nil { m.engine = nil m.t.Fatal(err) diff --git a/pkg/storage/metamorphic/meta_test.go b/pkg/storage/metamorphic/meta_test.go index b9f906fd91a8..063896b4e37d 100644 --- a/pkg/storage/metamorphic/meta_test.go +++ b/pkg/storage/metamorphic/meta_test.go @@ -17,6 +17,7 @@ import ( "io" "os" "path/filepath" + "strings" "testing" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -30,9 +31,11 @@ import ( ) var ( - keep = flag.Bool("keep", false, "keep temp directories after test") - check = flag.String("check", "", "run operations in specified file and check output for equality") - opCount = flag.Int("operations", 20000, "number of MVCC operations to generate and run") + keep = flag.Bool("keep", false, "keep temp directories after test") + check = flag.String("check", "", "run operations in specified file and check output for equality") + inMem = flag.Bool("in-mem", false, "use an in-memory filesystem") + compareFiles = flag.String("compare-files", "", "comma-separated list of output files to compare; used by TestCompareFiles") + opCount = flag.Int("operations", 20000, "number of MVCC operations to generate and run") ) type testRun struct { @@ -269,43 +272,120 @@ func TestPebbleCheck(t *testing.T) { ctx := context.Background() - if *check != "" { - if _, err := os.Stat(*check); oserror.IsNotExist(err) { - t.Fatal(err) + if *check == "" { + skip.IgnoreLint(t, "Skipping; no check file provided via --check") + return + } + if _, err := os.Stat(*check); oserror.IsNotExist(err) { + t.Fatal(err) + } + + engineSeqs := make([]engineSequence, 0, numStandardOptions+numRandomOptions) + + for i := 0; i < numStandardOptions; i++ { + engineSeq := engineSequence{ + configs: []engineConfig{{ + name: fmt.Sprintf("standard=%d", i), + opts: standardOptions(i), + }}, } + engineSeq.name = engineSeq.configs[0].name + engineSeqs = append(engineSeqs, engineSeq) + } - engineSeqs := make([]engineSequence, 0, numStandardOptions+numRandomOptions) + for i := 0; i < numRandomOptions; i++ { + engineSeq := engineSequence{ + configs: []engineConfig{{ + name: fmt.Sprintf("random=%d", i), + opts: randomOptions(), + }}, + } + engineSeq.name = engineSeq.configs[0].name + engineSeqs = append(engineSeqs, engineSeq) + } - for i := 0; i < numStandardOptions; i++ { - engineSeq := engineSequence{ - configs: []engineConfig{{ - name: fmt.Sprintf("standard=%d", i), - opts: standardOptions(i), - }}, - } - engineSeq.name = engineSeq.configs[0].name - engineSeqs = append(engineSeqs, engineSeq) + run := testRun{ + ctx: ctx, + t: t, + checkFile: *check, + restarts: true, + inMem: *inMem, + engineSequences: engineSeqs, + } + runMetaTest(run) +} + +// TestCompareFiles takes a comma-separated list of output files through the +// `--compare-files` command-line parameter. The output files should originate +// from the same run and have matching operations. TestRunCompare takes the +// operations from the provided `--check` file, and runs all the compare-files +// configurations against the operations, checking for equality. +// +// For example, suppose a nightly discovers a metamorphic failure where the +// random-008 run diverges. You can download 'output.meta', the first run with +// the standard options, and output file for the random run. Pass the +// output.meta to `--check` and the diverging run's output.meta to +// `--compare-files`: +// +// ./dev test -v ./pkg/storage/metamorphic -f TestCompareFiles --ignore-cache \ +// --test-args '--in-mem' \ +// --test-args '--check=/Users/craig/archive/output.meta' \ +// --test-args '--compare-files=/Users/craig/archive/random8.meta' +// +// The above example supplies `--in-mem`. This may be useful to produce quick +// reproductions, but if you want to dig through the data directory, omit it. +func TestCompareFiles(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + + if *check == "" { + skip.IgnoreLint(t, "Skipping; no check file provided via --check") + return + } + if *compareFiles == "" { + skip.IgnoreLint(t, "Skipping; no files to compare provided via --compare-files") + return + } + + // Check that all the referenced files exist. + if _, err := os.Stat(*check); oserror.IsNotExist(err) { + t.Fatal(err) + } + files := strings.Split(*compareFiles, ",") + for _, f := range files { + if _, err := os.Stat(f); oserror.IsNotExist(err) { + t.Fatal(err) } + } - for i := 0; i < numRandomOptions; i++ { - engineSeq := engineSequence{ - configs: []engineConfig{{ - name: fmt.Sprintf("random=%d", i), - opts: randomOptions(), - }}, + engineSeqs := make([]engineSequence, 0, len(files)) + for _, f := range files { + cfg, seed, err := func() (engineConfig, int64, error) { + r, err := os.Open(f) + if err != nil { + return engineConfig{}, 0, err } - engineSeq.name = engineSeq.configs[0].name - engineSeqs = append(engineSeqs, engineSeq) + defer r.Close() + return parseOutputPreamble(r) + }() + if err != nil { + t.Fatalf("parsing file %q: %s", f, err) } + engineSeqs = append(engineSeqs, engineSequence{ + name: fmt.Sprintf("%s_%d", filepath.Base(f), seed), + configs: []engineConfig{cfg}, + }) + } - run := testRun{ - ctx: ctx, - t: t, - checkFile: *check, - restarts: true, - inMem: false, - engineSequences: engineSeqs, - } - runMetaTest(run) + run := testRun{ + ctx: ctx, + t: t, + checkFile: *check, + restarts: true, + inMem: *inMem, + engineSequences: engineSeqs, } + runMetaTest(run) } diff --git a/pkg/storage/metamorphic/parser.go b/pkg/storage/metamorphic/parser.go new file mode 100644 index 000000000000..42ebec5257e2 --- /dev/null +++ b/pkg/storage/metamorphic/parser.go @@ -0,0 +1,107 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package metamorphic + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/cockroachdb/cockroach/pkg/storage" + "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/bloom" +) + +// parseOutputPreamble reads the commented preamble of an output.meta file, +// paring out the engine configuration. +func parseOutputPreamble(f io.Reader) (cfg engineConfig, seed int64, err error) { + r := bufio.NewReader(f) + + seed, err = readCommentInt64(r, "seed:") + if err != nil { + return cfg, seed, err + } + cfg.name, err = readCommentString(r, "name:") + if err != nil { + return cfg, seed, err + } + if _, err = readCommentString(r, "engine options:"); err != nil { + return cfg, seed, err + } + + var optsBuf bytes.Buffer + for { + // Read the first byte to check if this line is a comment. + if firstByte, err := r.ReadByte(); err != nil { + if err == io.EOF { + break + } + return cfg, seed, err + } else if firstByte != '#' { + // The end of the comment preamble. + break + } + + b, err := r.ReadBytes('\n') + if err != nil { + if err == io.EOF { + break + } + return cfg, seed, err + } + optsBuf.Write(b) + } + cfg.opts = storage.DefaultPebbleOptions() + err = cfg.opts.Parse(optsBuf.String(), &pebble.ParseHooks{ + NewFilterPolicy: func(name string) (pebble.FilterPolicy, error) { + switch name { + case "none": + return nil, nil + case "rocksdb.BuiltinBloomFilter": + return bloom.FilterPolicy(10), nil + } + return nil, nil + }, + }) + return cfg, seed, err +} + +func readCommentString(r *bufio.Reader, prefix string) (string, error) { + firstByte, err := r.ReadByte() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + return "", err + } + if firstByte != '#' { + return "", fmt.Errorf("expected comment with prefix %q, but not a comment", prefix) + } + s, err := r.ReadString('\n') + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + s = strings.TrimSpace(s) + s = strings.TrimPrefix(s, prefix) + s = strings.TrimSpace(s) + return s, err +} + +func readCommentInt64(r *bufio.Reader, prefix string) (int64, error) { + s, err := readCommentString(r, prefix) + if err != nil { + return 0, err + } + return strconv.ParseInt(s, 10, 64) +} diff --git a/pkg/storage/metamorphic/parser_test.go b/pkg/storage/metamorphic/parser_test.go new file mode 100644 index 000000000000..ad555fc72036 --- /dev/null +++ b/pkg/storage/metamorphic/parser_test.go @@ -0,0 +1,34 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package metamorphic + +import ( + "os" + "testing" + + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/stretchr/testify/require" +) + +func TestParseOutputPreamble(t *testing.T) { + defer leaktest.AfterTest(t)() + + f, err := os.Open(testutils.TestDataPath(t, "sample.meta")) + require.NoError(t, err) + + cfg, seed, err := parseOutputPreamble(f) + require.NoError(t, err) + require.Equal(t, seed, int64(7375396416917217630)) + require.Equal(t, cfg.name, "random-007") + // TODO(jackson): Assert roundtrip equality. + t.Log(cfg.opts.EnsureDefaults().String()) +} diff --git a/pkg/storage/metamorphic/testdata/sample.meta b/pkg/storage/metamorphic/testdata/sample.meta new file mode 100644 index 000000000000..cf12094e7f1f --- /dev/null +++ b/pkg/storage/metamorphic/testdata/sample.meta @@ -0,0 +1,256 @@ +# seed: 7375396416917217630 +# name: random-007 +# engine options: +# [Version] +# pebble_version=0.1 +# +# [Options] +# bytes_per_sync=524288 +# cache_size=1048576 +# cleaner=delete +# compaction_debt_concurrency=1073741824 +# comparer=cockroach_comparator +# delete_range_flush_delay=10s +# disable_wal=false +# flush_split_bytes=4194304 +# format_major_version=8 +# l0_compaction_concurrency=10 +# l0_compaction_file_threshold=500 +# l0_compaction_threshold=2 +# l0_stop_writes_threshold=1000 +# lbase_max_bytes=67108864 +# max_concurrent_compactions=3 +# max_manifest_file_size=134217728 +# max_open_files=1000 +# mem_table_size=67108864 +# mem_table_stop_writes_threshold=4 +# min_deletion_rate=134217728 +# merger=cockroach_merge_operator +# read_compaction_rate=16000 +# read_sampling_multiplier=16 +# strict_wal_tail=true +# table_cache_shards=8 +# table_property_collectors=[] +# validate_on_ingest=false +# wal_dir= +# wal_bytes_per_sync=0 +# max_writer_concurrency=0 +# force_writer_parallelism=false +# +# [Level "0"] +# block_restart_interval=16 +# block_size=32768 +# compression=Snappy +# filter_policy=rocksdb.BuiltinBloomFilter +# filter_type=table +# index_block_size=262144 +# target_file_size=2097152 +# +# [Level "1"] +# block_restart_interval=16 +# block_size=32768 +# compression=Snappy +# filter_policy=rocksdb.BuiltinBloomFilter +# filter_type=table +# index_block_size=262144 +# target_file_size=4194304 +# +# [Level "2"] +# block_restart_interval=16 +# block_size=32768 +# compression=Snappy +# filter_policy=rocksdb.BuiltinBloomFilter +# filter_type=table +# index_block_size=262144 +# target_file_size=8388608 +# +# [Level "3"] +# block_restart_interval=16 +# block_size=32768 +# compression=Snappy +# filter_policy=rocksdb.BuiltinBloomFilter +# filter_type=table +# index_block_size=262144 +# target_file_size=16777216 +# +# [Level "4"] +# block_restart_interval=16 +# block_size=32768 +# compression=Snappy +# filter_policy=rocksdb.BuiltinBloomFilter +# filter_type=table +# index_block_size=262144 +# target_file_size=33554432 +# +# [Level "5"] +# block_restart_interval=16 +# block_size=32768 +# compression=Snappy +# filter_policy=rocksdb.BuiltinBloomFilter +# filter_type=table +# index_block_size=262144 +# target_file_size=67108864 +# +# [Level "6"] +# block_restart_interval=16 +# block_size=32768 +# compression=Snappy +# filter_policy=rocksdb.BuiltinBloomFilter +# filter_type=table +# index_block_size=262144 +# target_file_size=134217728 +# +txn_open(1, t1) -> t1 +mvcc_reverse_scan("vpdhnkpam"/0, "vpdhnkpam"/0, t1, 0.4791, 0.5319, false, false) -> kvs = [], intents = [], resumeSpan = , numBytes = 0, numKeys = 0 +mvcc_scan("coivcnwg"/2, "coivcnwg"/2, t1, 0.4481, 0.6505, true, true) -> kvs = [], intents = [], resumeSpan = , numBytes = 0, numKeys = 0 +iterator_open(engine, "coivcnwg"/2, "ineoliklwoegq"/2, iter1) -> iter1 +iterator_nextkey(iter1) -> valid = false +mvcc_inconsistent_get(engine, "vpdhnkpam"/0, 1) -> val = , intent = +txn_commit(t1) -> ok +mvcc_inconsistent_scan("coivcnwg"/2, "ineoliklwoegq"/2, 1, 0.7864, 0.9047, false, true) -> kvs = [], intents = [], resumeSpan = , numBytes = 0, numKeys = 0 +txn_open(3, t2) -> t2 +mvcc_init_put(engine, t2, "tktluohcljr"/4, umqtkwuqlp) -> ok +compact("coivcnwg"/2, "ineoliklwoegq"/2) -> ok +mvcc_put(engine, t2, "ineoliklwoegq"/2, bocie) -> ok +iterator_next(iter1) -> valid = false +mvcc_delete(engine, t2, "gukoaxqohrakhx"/4) -> ok +mvcc_delete(engine, t2, "gukoaxqohrakhx"/4) -> ok +iterator_seeklt(iter1, "gukoaxqohrakhx"/4) -> valid = false +mvcc_inconsistent_scan("qxaisbnoe"/4, "coivcnwg"/2, 3, 0.8319, 0.3660, true, true) -> kvs = [], intents = [{{"gukoaxqohrakhx"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000003,0 min=0,0 seq=4} {{"ineoliklwoegq"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000003,0 min=0,0 seq=2}], resumeSpan = , numBytes = 0, numKeys = 0 +mvcc_reverse_scan("gukoaxqohrakhx"/4, "gukoaxqohrakhx"/4, t2, 0.7234, 0.1763, false, false) -> kvs = [], intents = [], resumeSpan = , numBytes = 0, numKeys = 0 +mvcc_inconsistent_scan("gukoaxqohrakhx"/4, "ineoliklwoegq"/2, 3, 0.3394, 0.8966, true, false) -> kvs = [], intents = [{{"gukoaxqohrakhx"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000003,0 min=0,0 seq=4}], resumeSpan = , numBytes = 0, numKeys = 0 +mvcc_get(engine, "aoclxttgscj"/4, t2) -> val = , intent = +iterator_close(iter1) -> ok +txn_open(5, t3) -> t3 +iterator_open(engine, "gukoaxqohrakhx"/4, "yggswhfeyqv"/6, iter2) -> iter2 +iterator_prev(iter2) -> valid = false +batch_commit(engine) -> noop +iterator_seekge(iter2, "tktluohcljr"/4) -> key = "tktluohcljr"/0.000000003,0 +mvcc_reverse_scan("dowuentqinoinc"/6, "aoclxttgscj"/4, t2, 0.0091, 0.1843, true, true) -> kvs = [], intents = [], resumeSpan = , numBytes = 0, numKeys = 0 +iterator_open(engine, "xepnphkbkbykq"/6, "aoclxttgscj"/4, iter3) -> iter3 +mvcc_scan("qpdaulhik"/6, "rfdgrsmg"/6, t2, 0.5591, 0.5158, false, false) -> kvs = [], intents = [], resumeSpan = , numBytes = 0, numKeys = 0 +mvcc_reverse_scan("ineoliklwoegq"/2, "qxaisbnoe"/4, t3, 0.8625, 0.1976, true, false) -> error: conflicting intents on "ineoliklwoegq" +mvcc_put(engine, t2, "rfdgrsmg"/6, jonwsmbwhx) -> ok +mvcc_put(engine, t3, "gqpapurgp"/6, rwgyypofud) -> ok +iterator_nextkey(iter2) -> valid = false +mvcc_put(engine, t2, "aoclxttgscj"/4, dmcppruyjxusum) -> ok +mvcc_reverse_scan("pkgerlaju"/6, "ineoliklwoegq"/2, t2, 0.7722, 0.0298, false, true) -> kvs = [{"ineoliklwoegq" {[0 0 0 0 3 98 111 99 105 101] 0.000000003,0}}], intents = [], resumeSpan = , numBytes = 41, numKeys = 1 +mvcc_inconsistent_scan("vpdhnkpam"/0, "coivcnwg"/2, 6, 0.8474, 0.1611, false, true) -> kvs = [], intents = [{{"gqpapurgp"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000005,0 min=0,0 seq=1} {{"gukoaxqohrakhx"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000003,0 min=0,0 seq=4} {{"ineoliklwoegq"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000003,0 min=0,0 seq=2} {{"rfdgrsmg"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000003,0 min=0,0 seq=5} {{"tktluohcljr"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000003,0 min=0,0 seq=1}], resumeSpan = , numBytes = 0, numKeys = 0 +mvcc_put(engine, t2, "coivcnwg"/2, klltabhwe) -> ok +mvcc_inconsistent_scan("vpdhnkpam"/0, "xepnphkbkbykq"/6, 5, 0.2138, 0.5211, false, false) -> kvs = [], intents = [], resumeSpan = , numBytes = 0, numKeys = 0 +mvcc_put(engine, t2, "wbpcepef"/6, hhgldqltudmgr) -> ok +mvcc_delete(engine, t3, "leyxhmxqrcbxsm"/6) -> ok +mvcc_reverse_scan("tktluohcljr"/4, "ycqyfkeom"/6, t2, 0.6096, 0.8422, true, true) -> kvs = [{"wbpcepef" {[0 0 0 0 3 104 104 103 108 100 113 108 116 117 100 109 103 114] 0.000000003,0}} {"tktluohcljr" {[0 0 0 0 3 117 109 113 116 107 119 117 113 108 112] 0.000000003,0}}], intents = [], resumeSpan = , numBytes = 88, numKeys = 2 +iterator_nextkey(iter2) -> valid = false +mvcc_scan("thqatgbqt"/6, "tktluohcljr"/4, t2, 0.2758, 0.6241, true, false) -> kvs = [], intents = [], resumeSpan = , numBytes = 0, numKeys = 0 +mvcc_put(engine, t3, "mrxatnht"/6, uooxrnfmynbrhmq) -> ok +mvcc_inconsistent_scan("tjbotfrbtxoyta"/6, "leyxhmxqrcbxsm"/6, 5, 0.4386, 0.9055, true, false) -> kvs = [], intents = [{{"leyxhmxqrcbxsm"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000005,0 min=0,0 seq=2} {{"mrxatnht"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000005,0 min=0,0 seq=3} {{"rfdgrsmg"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000003,0 min=0,0 seq=5}], resumeSpan = , numBytes = 0, numKeys = 0 +iterator_seekge(iter2, "dowuentqinoinc"/6) -> key = "gukoaxqohrakhx"/0.000000003,0 +iterator_seekge(iter2, "vpdhnkpam"/0) -> valid = false +mvcc_put(engine, t3, "yggswhfeyqv"/6, etewicdmbq) -> ok +iterator_seekge(iter2, "wbpcepef"/6) -> valid = false +mvcc_get(engine, "uwdvyohbpsgcr"/6, t2) -> val = , intent = +ingest("rfdgrsmg"/6, "gukoaxqohrakhx"/4, "tjbotfrbtxoyta"/6, "tjbotfrbtxoyta"/6, "snoojmhoaqte"/6) -> ok +mvcc_put(engine, t3, "tjbotfrbtxoyta"/6, svyqs) -> error: WriteTooOldError: write for key "tjbotfrbtxoyta" at timestamp 0.000000005,0 too old; wrote at 0.000000006,1 +mvcc_put(engine, t2, "gukoaxqohrakhx"/4, edqawhknloa) -> ok +mvcc_put(engine, t3, "qpdaulhik"/6, rkuhimkl) -> ok +mvcc_reverse_scan("snoojmhoaqte"/6, "yggswhfeyqv"/6, t2, 0.5142, 0.1074, false, true) -> kvs = [{"wbpcepef" {[0 0 0 0 3 104 104 103 108 100 113 108 116 117 100 109 103 114] 0.000000003,0}} {"tktluohcljr" {[0 0 0 0 3 117 109 113 116 107 119 117 113 108 112] 0.000000003,0}}], intents = [], resumeSpan = , numBytes = 88, numKeys = 2 +iterator_prev(iter2) -> valid = false +batch_commit(engine) -> noop +mvcc_put(engine, t2, "tktluohcljr"/4, mbiyym) -> ok +iterator_nextkey(iter3) -> valid = false +mvcc_put(engine, t3, "thqatgbqt"/6, idnmxjlvsw) -> ok +mvcc_inconsistent_scan("ycqyfkeom"/6, "wbpcepef"/6, 5, 0.0160, 0.1770, true, true) -> kvs = [], intents = [{{"wbpcepef"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000003,0 min=0,0 seq=8}], resumeSpan = , numBytes = 0, numKeys = 0 +mvcc_get(engine, "gukoaxqohrakhx"/4, t3) -> error: conflicting intents on "gukoaxqohrakhx" +iterator_prev(iter2) -> valid = false +mvcc_conditional_put(engine, t3, "yhegqgfufrmod"/6, jtvyp, blbkqemlet) -> ok +mvcc_reverse_scan("rfdgrsmg"/6, "snoojmhoaqte"/6, t3, 0.6166, 0.7049, false, true) -> error: conflicting intents on "rfdgrsmg" +txn_create_savepoint(t2, 0) -> savepoint 0 +iterator_next(iter2) -> valid = false +mvcc_reverse_scan("epqbxyaxptyypsf"/6, "xqylijbfdvmcqh"/6, t3, 0.5848, 0.6004, false, false) -> error: conflicting intents on "wbpcepef", "tktluohcljr", "rfdgrsmg", "ineoliklwoegq", "gukoaxqohrakhx" +mvcc_inconsistent_scan("xqylijbfdvmcqh"/6, "gqpapurgp"/6, 5, 0.7619, 0.3199, true, true) -> kvs = [], intents = [{{"gqpapurgp"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000005,0 min=0,0 seq=1} {{"gukoaxqohrakhx"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000003,0 min=0,0 seq=9} {{"ineoliklwoegq"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000003,0 min=0,0 seq=2} {{"leyxhmxqrcbxsm"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000005,0 min=0,0 seq=2} {{"mrxatnht"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000005,0 min=0,0 seq=3} {{"rfdgrsmg"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000003,0 min=0,0 seq=5} {{"tktluohcljr"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000003,0 min=0,0 seq=10} {{"wbpcepef"} id=00000000 key=/Min pri=0.00000000 epo=0 ts=0.000000003,0 min=0,0 seq=8}], resumeSpan = , numBytes = 0, numKeys = 0 +mvcc_put(engine, t3, "fphfjiotu"/6, kfsbxc) -> ok +mvcc_get(engine, "uwdvyohbpsgcr"/6, t3) -> val = , intent = +batch_commit(engine) -> noop +mvcc_inconsistent_get(engine, "pkgerlaju"/6, 5) -> val = , intent = +mvcc_put(engine, t2, "rqkudfmgjsoa"/6, kncf) -> ok +iterator_prev(iter2) -> valid = false +iterator_close(iter3) -> ok +mvcc_delete(engine, t2, "fnxretxlaqdcc"/6) -> ok +iterator_prev(iter2) -> valid = false +mvcc_put(engine, t3, "lfegihqn"/6, hulqxdderxj) -> ok +iterator_close(iter2) -> ok +mvcc_put(engine, t3, "uwdvyohbpsgcr"/6, cthhjldrof) -> ok +batch_open(batch1) -> batch1 +delete_range("clpjfaoch"/6, "uwdvyohbpsgcr"/6) -> deleted range = "clpjfaoch" - "coivcnwg" +txn_commit(t2) -> ok +iterator_open(engine, "tktluohcljr"/4, "rfdgrsmg"/6, iter4) -> iter4 +iterator_seeklt(iter4, "ojptdhamxv"/6) -> valid = false +txn_open(7, t4) -> t4 +iterator_nextkey(iter4) -> valid = false +mvcc_inconsistent_get(batch1, "pkgerlaju"/6, 7) -> val = , intent = +mvcc_scan("ycqyfkeom"/6, "tjbotfrbtxoyta"/6, t3, 0.9417, 0.7566, false, false) -> kvs = [{"tjbotfrbtxoyta" {[0 0 0 0 3 115 118 121 113 115] 0.000000006,1}} {"tktluohcljr" {[0 0 0 0 3 109 98 105 121 121 109] 0.000000003,0}} {"uwdvyohbpsgcr" {[0 0 0 0 3 99 116 104 104 106 108 100 114 111 102] 0.000000006,1}} {"wbpcepef" {[0 0 0 0 3 104 104 103 108 100 113 108 116 117 100 109 103 114] 0.000000003,0}}], intents = [], resumeSpan = , numBytes = 180, numKeys = 4 +mvcc_put(engine, t3, "tjbotfrbtxoyta"/6, lgkgkmmnyrrwmsb) -> ok +mvcc_put(batch1, t4, "clpjfaoch"/6, vmlss) -> ok +mvcc_delete(batch1, t4, "ycqyfkeom"/6) -> ok +iterator_prev(iter4) -> valid = false +mvcc_inconsistent_get(batch1, "dnxxqhxqag"/8, 7) -> val = , intent = +mvcc_put(batch1, t4, "bqjoxurxsucmw"/8, rqgfelqeemt) -> ok +mvcc_put(batch1, t4, "rfdgrsmg"/6, ficwvs) -> ok +txn_commit(t3) -> ok +mvcc_conditional_put(batch1, t4, "qpdaulhik"/6, xtubhpk, crgdj) -> error: conflicting intents on "qpdaulhik" +mvcc_delete(batch1, t4, "hselycajkto"/8) -> ok +mvcc_find_split_key("fphfjiotu"/6, "qxpkylrgfctkw"/8) -> ok, splitSize = 1024, splitKey = "qpdaulhik" +mvcc_put(batch1, t4, "fwgoimvprh"/8, djcxafrt) -> ok +iterator_prev(iter4) -> valid = false +mvcc_inconsistent_scan("clpjfaoch"/6, "leyxhmxqrcbxsm"/6, 7, 0.4131, 0.7189, true, true) -> kvs = [{"coivcnwg" {[0 0 0 0 3 107 108 108 116 97 98 104 119 101] 0.000000003,0}} {"fnxretxlaqdcc" {[] 0.000000003,0}} {"fphfjiotu" {[0 0 0 0 3 107 102 115 98 120 99] 0.000000006,1}} {"gqpapurgp" {[0 0 0 0 3 114 119 103 121 121 112 111 102 117 100] 0.000000006,1}} {"gukoaxqohrakhx" {[105 110 103 101 115 116 101 100] 0.000000004,0}} {"ineoliklwoegq" {[0 0 0 0 3 98 111 99 105 101] 0.000000003,0}}], intents = [], resumeSpan = , numBytes = 240, numKeys = 6 +mvcc_scan("xepnphkbkbykq"/6, "ydyviwcqscdnnnl"/8, t4, 0.7550, 0.1077, false, false) -> kvs = [], intents = [], resumeSpan = , numBytes = 0, numKeys = 0 +iterator_nextkey(iter4) -> valid = false +mvcc_scan("epqbxyaxptyypsf"/6, "coivcnwg"/2, t4, 0.5869, 0.7959, false, true) -> kvs = [{"coivcnwg" {[0 0 0 0 3 107 108 108 116 97 98 104 119 101] 0.000000003,0}}], intents = [], resumeSpan = , numBytes = 40, numKeys = 1 +mvcc_put(batch1, t4, "qxpkylrgfctkw"/8, igvhiyljnpt) -> ok +mvcc_put(batch1, t4, "hselycajkto"/8, xxayunqaiaga) -> ok +iterator_prev(iter4) -> valid = false +batch_commit(engine) -> noop +mvcc_get(batch1, "epqbxyaxptyypsf"/6, t4) -> val = , intent = +iterator_prev(iter4) -> valid = false +mvcc_put(batch1, t4, "tktluohcljr"/4, vigyna) -> ok +mvcc_put(batch1, t4, "mjyhmehskkoqwr"/8, mtxumlmbnaqtt) -> ok +mvcc_put(batch1, t4, "tjbotfrbtxoyta"/6, bdolflbnpr) -> error: conflicting intents on "tjbotfrbtxoyta" +mvcc_inconsistent_get(batch1, "lxuxacyiokrdt"/8, 7) -> val = , intent = +mvcc_put(batch1, t4, "epqbxyaxptyypsf"/6, brcxjmpwec) -> ok +batch_commit(batch1) -> ok +txn_commit(t4) -> ok +txn_open(9, t5) -> t5 +mvcc_put(engine, t5, "gqpapurgp"/6, paorv) -> ok +mvcc_scan("wgtnpnykjyiqjha"/10, "epqbxyaxptyypsf"/6, t5, 0.5909, 0.4375, true, true) -> kvs = [{"epqbxyaxptyypsf" {[0 0 0 0 3 98 114 99 120 106 109 112 119 101 99] 0.000000007,0}} {"fnxretxlaqdcc" {[] 0.000000003,0}} {"fphfjiotu" {[0 0 0 0 3 107 102 115 98 120 99] 0.000000006,1}} {"fwgoimvprh" {[0 0 0 0 3 100 106 99 120 97 102 114 116] 0.000000007,0}} {"gqpapurgp" {[0 0 0 0 3 112 97 111 114 118] 0.000000009,0}} {"gukoaxqohrakhx" {[105 110 103 101 115 116 101 100] 0.000000004,0}} {"hselycajkto" {[0 0 0 0 3 120 120 97 121 117 110 113 97 105 97 103 97] 0.000000007,0}} {"ineoliklwoegq" {[0 0 0 0 3 98 111 99 105 101] 0.000000003,0}} {"leyxhmxqrcbxsm" {[] 0.000000006,1}} {"lfegihqn" {[0 0 0 0 3 104 117 108 113 120 100 100 101 114 120 106] 0.000000006,1}} {"mjyhmehskkoqwr" {[0 0 0 0 3 109 116 120 117 109 108 109 98 110 97 113 116 116] 0.000000007,0}} {"mrxatnht" {[0 0 0 0 3 117 111 111 120 114 110 102 109 121 110 98 114 104 109 113] 0.000000006,1}} {"qpdaulhik" {[0 0 0 0 3 114 107 117 104 105 109 107 108] 0.000000006,1}} {"qxpkylrgfctkw" {[0 0 0 0 3 105 103 118 104 105 121 108 106 110 112 116] 0.000000007,0}} {"rfdgrsmg" {[0 0 0 0 3 102 105 99 119 118 115] 0.000000007,0}} {"rqkudfmgjsoa" {[0 0 0 0 3 107 110 99 102] 0.000000003,0}} {"snoojmhoaqte" {[105 110 103 101 115 116 101 100] 0.000000006,0}} {"thqatgbqt" {[0 0 0 0 3 105 100 110 109 120 106 108 118 115 119] 0.000000006,1}}], intents = [], resumeSpan = {tjbotfrbtxoyta-wgtnpnykjyiqjha}, numBytes = 759, numKeys = 18 +iterator_nextkey(iter4) -> valid = false +mvcc_reverse_scan("xqylijbfdvmcqh"/6, "leyxhmxqrcbxsm"/6, t5, 0.8436, 0.6321, true, true) -> kvs = [{"wbpcepef" {[0 0 0 0 3 104 104 103 108 100 113 108 116 117 100 109 103 114] 0.000000003,0}} {"uwdvyohbpsgcr" {[0 0 0 0 3 99 116 104 104 106 108 100 114 111 102] 0.000000006,1}} {"tktluohcljr" {[0 0 0 0 3 118 105 103 121 110 97] 0.000000007,0}} {"tjbotfrbtxoyta" {[0 0 0 0 3 108 103 107 103 107 109 109 110 121 114 114 119 109 115 98] 0.000000006,1}} {"thqatgbqt" {[0 0 0 0 3 105 100 110 109 120 106 108 118 115 119] 0.000000006,1}} {"snoojmhoaqte" {[105 110 103 101 115 116 101 100] 0.000000006,0}} {"rqkudfmgjsoa" {[0 0 0 0 3 107 110 99 102] 0.000000003,0}} {"rfdgrsmg" {[0 0 0 0 3 102 105 99 119 118 115] 0.000000007,0}} {"qxpkylrgfctkw" {[0 0 0 0 3 105 103 118 104 105 121 108 106 110 112 116] 0.000000007,0}} {"qpdaulhik" {[0 0 0 0 3 114 107 117 104 105 109 107 108] 0.000000006,1}} {"mrxatnht" {[0 0 0 0 3 117 111 111 120 114 110 102 109 121 110 98 114 104 109 113] 0.000000006,1}} {"mjyhmehskkoqwr" {[0 0 0 0 3 109 116 120 117 109 108 109 98 110 97 113 116 116] 0.000000007,0}} {"lfegihqn" {[0 0 0 0 3 104 117 108 113 120 100 100 101 114 120 106] 0.000000006,1}} {"leyxhmxqrcbxsm" {[] 0.000000006,1}}], intents = [], resumeSpan = , numBytes = 623, numKeys = 14 +txn_abort(t5) -> ok +mvcc_inconsistent_scan("whgfiungmxkre"/10, "xqylijbfdvmcqh"/6, 9, 0.3792, 0.4962, true, false) -> kvs = [], intents = [], resumeSpan = , numBytes = 0, numKeys = 0 +txn_open(11, t6) -> t6 +mvcc_put(engine, t6, "lfegihqn"/6, vswrobtnuc) -> ok +batch_commit(engine) -> noop +mvcc_conditional_put(engine, t6, "ycqyfkeom"/6, pemqu, cjcvouoyheugo) -> ok +mvcc_inconsistent_get(engine, "rqkudfmgjsoa"/6, 11) -> val = raw_bytes:"\000\000\000\000\003kncf" timestamp: , intent = +delete_range("rqkudfmgjsoa"/6, "tktluohcljr"/4) -> deleted range = "rqkudfmgjsoa" - "tktluohcljr" +mvcc_put(engine, t6, "qxaisbnoe"/4, olos) -> ok +mvcc_reverse_scan("gukoaxqohrakhx"/4, "mrxatnht"/6, t6, 0.3089, 0.2601, false, true) -> kvs = [{"mjyhmehskkoqwr" {[0 0 0 0 3 109 116 120 117 109 108 109 98 110 97 113 116 116] 0.000000007,0}} {"lfegihqn" {[0 0 0 0 3 118 115 119 114 111 98 116 110 117 99] 0.000000011,0}} {"leyxhmxqrcbxsm" {[] 0.000000006,1}} {"ineoliklwoegq" {[0 0 0 0 3 98 111 99 105 101] 0.000000003,0}} {"hselycajkto" {[0 0 0 0 3 120 120 97 121 117 110 113 97 105 97 103 97] 0.000000007,0}} {"gukoaxqohrakhx" {[105 110 103 101 115 116 101 100] 0.000000004,0}}], intents = [], resumeSpan = , numBytes = 254, numKeys = 6 +batch_open(batch2) -> batch2 +mvcc_put(batch2, t6, "gqpapurgp"/6, nuqrrj) -> ok +iterator_open(batch2, "ojptdhamxv"/6, "fnxretxlaqdcc"/6, iter5) -> iter5 +mvcc_init_put(batch2, t6, "pkgerlaju"/6, tbkyqdxpkypv) -> ok +iterator_seeklt(iter5, "jcxainusdg"/12) -> noop due to missing seekLT support in rocksdb batch iterators +batch_open(batch3) -> batch3 +mvcc_inconsistent_scan("fwgoimvprh"/8, "rfdgrsmg"/6, 11, 0.2005, 0.3658, false, true) -> kvs = [{"fwgoimvprh" {[0 0 0 0 3 100 106 99 120 97 102 114 116] 0.000000007,0}} {"gqpapurgp" {[0 0 0 0 3 114 119 103 121 121 112 111 102 117 100] 0.000000006,1}} {"gukoaxqohrakhx" {[105 110 103 101 115 116 101 100] 0.000000004,0}} {"hselycajkto" {[0 0 0 0 3 120 120 97 121 117 110 113 97 105 97 103 97] 0.000000007,0}} {"ineoliklwoegq" {[0 0 0 0 3 98 111 99 105 101] 0.000000003,0}} {"leyxhmxqrcbxsm" {[] 0.000000006,1}}], intents = [], resumeSpan = {lfegihqn-rfdgrsmg}, numBytes = 250, numKeys = 6 +mvcc_conditional_put(batch2, t6, "trjspvvymm"/12, cnbsn, bckhf) -> ok +mvcc_put(batch2, t6, "trjspvvymm"/12, uihihxflpydisaw) -> ok +mvcc_put(engine, t6, "ywpdvttyiesg"/12, ghxrcxjeajt) -> ok +txn_create_savepoint(t6, 0) -> savepoint 0 +mvcc_put(batch3, t6, "tktluohcljr"/4, awfwesv) -> ok +mvcc_delete(batch3, t6, "yhjnmapl"/12) -> ok +batch_open(batch4) -> batch4 +iterator_close(iter5) -> ok +batch_commit(batch2) -> ok +batch_commit(batch3) -> ok +txn_abort(t6) -> ok + diff --git a/pkg/ui/workspaces/cluster-ui/src/api/index.ts b/pkg/ui/workspaces/cluster-ui/src/api/index.ts index 8e97efd8eb46..7f7bf1678b40 100644 --- a/pkg/ui/workspaces/cluster-ui/src/api/index.ts +++ b/pkg/ui/workspaces/cluster-ui/src/api/index.ts @@ -16,3 +16,4 @@ export * from "./nodesApi"; export * from "./clusterLocksApi"; export * from "./insightsApi"; export * from "./indexActionsApi"; +export * from "./schemaInsightsApi"; diff --git a/pkg/ui/workspaces/cluster-ui/src/api/schemaInsightsApi.ts b/pkg/ui/workspaces/cluster-ui/src/api/schemaInsightsApi.ts new file mode 100644 index 000000000000..e53359869262 --- /dev/null +++ b/pkg/ui/workspaces/cluster-ui/src/api/schemaInsightsApi.ts @@ -0,0 +1,195 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { executeSql, SqlExecutionRequest, SqlTxnResult } from "./sqlApi"; +import { + InsightRecommendation, + InsightType, + recommendDropUnusedIndex, +} from "../insights"; +import { HexStringToInt64String } from "../util"; + +// Export for db-console import from clusterUiApi. +export type { InsightRecommendation } from "../insights"; + +export type ClusterIndexUsageStatistic = { + table_id: number; + index_id: number; + last_read?: string; + created_at?: string; + index_name: string; + table_name: string; + database_id: number; + database_name: string; + unused_threshold: string; +}; + +type CreateIndexRecommendationsResponse = { + fingerprint_id: string; + db: string; + query: string; + querysummary: string; + implicittxn: boolean; + index_recommendations: string[]; +}; + +type SchemaInsightResponse = + | ClusterIndexUsageStatistic + | CreateIndexRecommendationsResponse; +type SchemaInsightQuery = { + name: InsightType; + query: string; + toSchemaInsight: (response: SqlTxnResult) => InsightRecommendation[]; +}; + +function clusterIndexUsageStatsToSchemaInsight( + txn_result: SqlTxnResult, +): InsightRecommendation[] { + const results: Record = {}; + + txn_result.rows.forEach(row => { + const result = recommendDropUnusedIndex(row); + if (result.recommend) { + const key = row.table_id.toString() + row.index_id.toString(); + if (!results[key]) { + results[key] = { + type: "DROP_INDEX", + database: row.database_name, + query: `DROP INDEX ${row.table_name}@${row.index_name};`, + indexDetails: { + table: row.table_name, + indexID: row.index_id, + indexName: row.index_name, + lastUsed: result.reason, + }, + }; + } + } + }); + + return Object.values(results); +} + +function createIndexRecommendationsToSchemaInsight( + txn_result: SqlTxnResult, +): InsightRecommendation[] { + const results: InsightRecommendation[] = []; + + txn_result.rows.forEach(row => { + row.index_recommendations.forEach(rec => { + const recSplit = rec.split(" : "); + const recType = recSplit[0]; + const recQuery = recSplit[1]; + let idxType: InsightType; + switch (recType) { + case "creation": + idxType = "CREATE_INDEX"; + break; + case "replacement": + idxType = "REPLACE_INDEX"; + break; + case "drop": + idxType = "DROP_INDEX"; + break; + } + + results.push({ + type: idxType, + database: row.db, + execution: { + statement: row.query, + summary: row.querysummary, + fingerprintID: HexStringToInt64String(row.fingerprint_id), + implicit: row.implicittxn, + }, + query: recQuery, + }); + }); + }); + return results; +} + +const dropUnusedIndexQuery: SchemaInsightQuery = { + name: "DROP_INDEX", + query: `SELECT + us.table_id, + us.index_id, + us.last_read, + ti.created_at, + ti.index_name, + t.name as table_name, + t.parent_id as database_id, + t.database_name, + (SELECT value FROM crdb_internal.cluster_settings WHERE variable = 'sql.index_recommendation.drop_unused_duration') AS unused_threshold + FROM "".crdb_internal.index_usage_statistics AS us + JOIN "".crdb_internal.table_indexes as ti ON us.index_id = ti.index_id AND us.table_id = ti.descriptor_id + JOIN "".crdb_internal.tables as t ON t.table_id = ti.descriptor_id and t.name = ti.descriptor_name + WHERE t.database_name != 'system' AND ti.index_type != 'primary';`, + toSchemaInsight: clusterIndexUsageStatsToSchemaInsight, +}; + +const createIndexRecommendationsQuery: SchemaInsightQuery = + { + name: "CREATE_INDEX", + query: `SELECT + encode(fingerprint_id, 'hex') AS fingerprint_id, + metadata ->> 'db' AS db, + metadata ->> 'query' AS query, + metadata ->> 'querySummary' as querySummary, + metadata ->> 'implicitTxn' AS implicitTxn, + index_recommendations + FROM ( + SELECT + fingerprint_id, + statistics -> 'statistics' ->> 'lastExecAt' as lastExecAt, + metadata, + index_recommendations, + row_number() over( + PARTITION BY + fingerprint_id + ORDER BY statistics -> 'statistics' ->> 'lastExecAt' DESC + ) AS rank + FROM crdb_internal.statement_statistics WHERE aggregated_ts >= now() - INTERVAL '1 week') + WHERE rank=1 AND array_length(index_recommendations,1) > 0;`, + toSchemaInsight: createIndexRecommendationsToSchemaInsight, + }; + +const schemaInsightQueries: SchemaInsightQuery[] = [ + dropUnusedIndexQuery, + createIndexRecommendationsQuery, +]; + +// getSchemaInsights makes requests over the SQL API and transforms the corresponding +// SQL responses into schema insights. +export function getSchemaInsights(): Promise { + const request: SqlExecutionRequest = { + statements: schemaInsightQueries.map(insightQuery => ({ + sql: insightQuery.query, + })), + execute: true, + }; + return executeSql(request).then(result => { + const results: InsightRecommendation[] = []; + if (result.execution.txn_results.length === 0) { + // No data. + return results; + } + + result.execution.txn_results.map(txn_result => { + // Note: txn_result.statement values begin at 1, not 0. + const insightQuery: SchemaInsightQuery = + schemaInsightQueries[txn_result.statement - 1]; + if (txn_result.rows) { + results.push(...insightQuery.toSchemaInsight(txn_result)); + } + }); + return results; + }); +} diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/index.ts b/pkg/ui/workspaces/cluster-ui/src/insights/index.ts index 6bccb1327b89..c16a8431c2cf 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insights/index.ts +++ b/pkg/ui/workspaces/cluster-ui/src/insights/index.ts @@ -10,5 +10,7 @@ export * from "./workloadInsights"; export * from "./workloadInsightDetails"; +export * from "./schemaInsights"; export * from "./utils"; export * from "./types"; +export * from "./insightsErrorComponent"; diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/indexActionBtn.tsx b/pkg/ui/workspaces/cluster-ui/src/insights/indexActionBtn.tsx index 4d9984faebe8..6b081b8ad6b6 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insights/indexActionBtn.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/insights/indexActionBtn.tsx @@ -11,7 +11,6 @@ import React, { useCallback, useState } from "react"; import { Modal } from "../modal"; import { Text, TextTypes } from "../text"; -import { InsightType } from "../insightsTable/insightsTable"; import { Button } from "../button"; import { executeIndexRecAction, IndexActionResponse } from "../api"; import { createIndex, dropIndex, onlineSchemaChanges } from "../util"; @@ -19,6 +18,7 @@ import { Anchor } from "../anchor"; import { InlineAlert } from "@cockroachlabs/ui-components"; import classNames from "classnames/bind"; import styles from "./indexActionBtn.module.scss"; +import { InsightType } from "./types"; const cx = classNames.bind(styles); diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/util/workloadInsightsError.tsx b/pkg/ui/workspaces/cluster-ui/src/insights/insightsErrorComponent.tsx similarity index 89% rename from pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/util/workloadInsightsError.tsx rename to pkg/ui/workspaces/cluster-ui/src/insights/insightsErrorComponent.tsx index ec74ae802229..2ff7abbbe9b6 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/util/workloadInsightsError.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/insights/insightsErrorComponent.tsx @@ -10,7 +10,7 @@ import React from "react"; import classNames from "classnames/bind"; -import styles from "./workloadInsights.module.scss"; +import styles from "./workloadInsights/util/workloadInsights.module.scss"; const cx = classNames.bind(styles); @@ -18,7 +18,7 @@ type SQLInsightsErrorProps = { execType: string; }; -export const WorkloadInsightsError = ( +export const InsightsError = ( props: SQLInsightsErrorProps, ): React.ReactElement => { return ( diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/emptySchemaInsightsTablePlaceholder.tsx b/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/emptySchemaInsightsTablePlaceholder.tsx new file mode 100644 index 000000000000..9c6ac0bbf42d --- /dev/null +++ b/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/emptySchemaInsightsTablePlaceholder.tsx @@ -0,0 +1,32 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import React from "react"; +import { EmptyTable, EmptyTableProps } from "src/empty"; +import magnifyingGlassImg from "src/assets/emptyState/magnifying-glass.svg"; +import emptyTableResultsImg from "src/assets/emptyState/empty-table-results.svg"; + +const emptySearchResults = { + title: "No schema insight match your search.", + icon: magnifyingGlassImg, +}; + +export const EmptySchemaInsightsTablePlaceholder: React.FC<{ + isEmptySearchResults: boolean; +}> = props => { + const emptyPlaceholderProps: EmptyTableProps = props.isEmptySearchResults + ? emptySearchResults + : { + title: "No schema insight since this page was last refreshed.", + icon: emptyTableResultsImg, + }; + + return ; +}; diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/index.ts b/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/index.ts new file mode 100644 index 000000000000..45c5dbc2f62a --- /dev/null +++ b/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/index.ts @@ -0,0 +1,13 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +export * from "./indexUsageStatsRec"; +export * from "./schemaInsightsView"; +export * from "./emptySchemaInsightsTablePlaceholder"; diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/indexUsageStatsRec.spec.ts b/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/indexUsageStatsRec.spec.ts new file mode 100644 index 000000000000..14e176f4b2e6 --- /dev/null +++ b/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/indexUsageStatsRec.spec.ts @@ -0,0 +1,130 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { + formatMomentDuration, + indexNeverUsedReason, + recommendDropUnusedIndex, +} from "./indexUsageStatsRec"; +import { ClusterIndexUsageStatistic } from "../../api"; +import moment from "moment"; + +describe("recommendDropUnusedIndex", () => { + const mockCurrentTime = moment(); + const oneHourAgo: moment.Moment = moment(mockCurrentTime).subtract(1, "hour"); + + describe("Recently Used Index", () => { + const recentlyUsedIndex: ClusterIndexUsageStatistic = { + table_id: 1, + index_id: 1, + last_read: moment.utc(oneHourAgo, "X").format(), + created_at: null, + index_name: "recent_index", + table_name: "test_table", + database_id: 1, + database_name: "test_db", + unused_threshold: "10h0m0s", + }; + it("should not recommend index to be dropped", () => { + expect(recommendDropUnusedIndex(recentlyUsedIndex)).toEqual({ + recommend: false, + reason: "", + }); + }); + }); + describe("Never Used Index", () => { + const neverUsedIndex: ClusterIndexUsageStatistic = { + table_id: 1, + index_id: 1, + last_read: null, + created_at: null, + index_name: "recent_index", + table_name: "test_table", + database_id: 1, + database_name: "test_db", + unused_threshold: "10h0m0s", + }; + it("should recommend index to be dropped with the reason that the index is never used", () => { + expect(recommendDropUnusedIndex(neverUsedIndex)).toEqual({ + recommend: true, + reason: indexNeverUsedReason, + }); + }); + }); + describe("Index Last Use Exceeds Duration Threshold", () => { + const exceedsDurationIndex: ClusterIndexUsageStatistic = { + table_id: 1, + index_id: 1, + last_read: moment.utc(oneHourAgo, "X").format(), + created_at: null, + index_name: "recent_index", + table_name: "test_table", + database_id: 1, + database_name: "test_db", + unused_threshold: "0h30m0s", + }; + it("should recommend index to be dropped with the reason that it has exceeded the configured index unuse duration", () => { + expect(recommendDropUnusedIndex(exceedsDurationIndex)).toEqual({ + recommend: true, + reason: `This index has not been used in over ${formatMomentDuration( + moment.duration( + "PT" + exceedsDurationIndex.unused_threshold.toUpperCase(), + ), + )} and can be removed for better write performance.`, + }); + }); + }); + describe("Index Created But Never Read", () => { + describe("creation date does not exceed unuse duration", () => { + const createdNeverReadIndexNoExceed: ClusterIndexUsageStatistic = { + table_id: 1, + index_id: 1, + last_read: null, + created_at: moment.utc(oneHourAgo, "X").format(), + index_name: "recent_index", + table_name: "test_table", + database_id: 1, + database_name: "test_db", + unused_threshold: "10h0m0s", + }; + it("should not recommend index to be dropped", () => { + expect(recommendDropUnusedIndex(createdNeverReadIndexNoExceed)).toEqual( + { + recommend: false, + reason: "", + }, + ); + }); + }); + describe("creation date exceeds unuse duration", () => { + const createdNeverReadIndexExceed: ClusterIndexUsageStatistic = { + table_id: 1, + index_id: 1, + last_read: null, + created_at: moment.utc(oneHourAgo, "X").format(), + index_name: "recent_index", + table_name: "test_table", + database_id: 1, + database_name: "test_db", + unused_threshold: "0h30m0s", + }; + it("should recommend index to be dropped with the reason that it has exceeded the configured index unuse duration", () => { + expect(recommendDropUnusedIndex(createdNeverReadIndexExceed)).toEqual({ + recommend: true, + reason: `This index has not been used in over ${formatMomentDuration( + moment.duration( + "PT" + createdNeverReadIndexExceed.unused_threshold.toUpperCase(), + ), + )} and can be removed for better write performance.`, + }); + }); + }); + }); +}); diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/indexUsageStatsRec.ts b/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/indexUsageStatsRec.ts new file mode 100644 index 000000000000..516dc6e6be7c --- /dev/null +++ b/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/indexUsageStatsRec.ts @@ -0,0 +1,73 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { ClusterIndexUsageStatistic } from "../../api/schemaInsightsApi"; +import moment from "moment"; + +export const indexNeverUsedReason = + "This index has not been used and can be removed for better write performance."; + +const minDate = moment.utc("0001-01-01"); // minimum value as per UTC. + +type dropIndexRecommendation = { + recommend: boolean; + reason: string; +}; + +export function recommendDropUnusedIndex( + clusterIndexUsageStat: ClusterIndexUsageStatistic, +): dropIndexRecommendation { + const createdAt = clusterIndexUsageStat.created_at + ? moment.utc(clusterIndexUsageStat.created_at) + : minDate; + const lastRead = clusterIndexUsageStat.last_read + ? moment.utc(clusterIndexUsageStat.last_read) + : minDate; + let lastActive = createdAt; + if (lastActive.isSame(minDate) && !lastRead.isSame(minDate)) { + lastActive = lastRead; + } + + if (lastActive.isSame(minDate)) { + return { recommend: true, reason: indexNeverUsedReason }; + } + + const duration = moment.duration(moment().diff(lastActive)); + const unusedThreshold = moment.duration( + "PT" + clusterIndexUsageStat.unused_threshold.toUpperCase(), + ); + if (duration >= unusedThreshold) { + return { + recommend: true, + reason: `This index has not been used in over ${formatMomentDuration( + unusedThreshold, + )} and can be removed for better write performance.`, + }; + } + return { recommend: false, reason: "" }; +} + +export function formatMomentDuration(duration: moment.Duration): string { + const numSecondsInMinute = 60; + const numMinutesInHour = 60; + const numHoursInDay = 24; + + const seconds = Math.floor(duration.as("s")) % numSecondsInMinute; + const minutes = Math.floor(duration.as("m")) % numMinutesInHour; + const hours = Math.floor(duration.as("h")) % numHoursInDay; + const days = Math.floor(duration.as("d")); + + const daysSubstring = days > 0 ? `${days} days, ` : ""; + const hoursSubstring = hours > 0 ? `${hours} hours, ` : ""; + const minutesSubstring = minutes > 0 ? `${minutes} minutes, ` : ""; + const secondsSubstring = seconds > 0 ? `${seconds} seconds, ` : ""; + + return `${daysSubstring}${hoursSubstring}${minutesSubstring}${secondsSubstring}`; +} diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/schemaInsights.fixture.ts b/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/schemaInsights.fixture.ts new file mode 100644 index 000000000000..ff49590043cc --- /dev/null +++ b/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/schemaInsights.fixture.ts @@ -0,0 +1,74 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { SchemaInsightsViewProps } from "./schemaInsightsView"; + +export const SchemaInsightsPropsFixture: SchemaInsightsViewProps = { + schemaInsights: [ + { + type: "DROP_INDEX", + database: "db_name", + indexDetails: { + table: "table_name", + indexID: 1, + indexName: "index_name", + lastUsed: + "This index has not been used and can be removed for better write performance.", + }, + }, + { + type: "DROP_INDEX", + database: "db_name2", + indexDetails: { + table: "table_name2", + indexID: 2, + indexName: "index_name2", + lastUsed: + "This index has not been used in over 9 days, 5 hours, and 3 minutes and can be removed for better write performance.", + }, + }, + { + type: "CREATE_INDEX", + database: "db_name", + query: "CREATE INDEX ON test_table (another_num) STORING (num);", + execution: { + statement: "SELECT * FROM test_table WHERE another_num > _", + summary: "SELECT * FROM test_table", + fingerprintID: "\\xc093e4523ab0bd3e", + implicit: true, + }, + }, + { + type: "CREATE_INDEX", + database: "db_name", + query: "CREATE INDEX ON test_table (yet_another_num) STORING (num);", + execution: { + statement: "SELECT * FROM test_table WHERE yet_another_num > _", + summary: "SELECT * FROM test_table", + fingerprintID: "\\xc093e4523ab0db9o", + implicit: false, + }, + }, + ], + schemaInsightsDatabases: ["db_name", "db_name2"], + schemaInsightsTypes: ["DROP_INDEX", "CREATE_INDEX"], + schemaInsightsError: null, + sortSetting: { + ascending: false, + columnTitle: "insights", + }, + filters: { + database: "", + schemaInsightType: "", + }, + refreshSchemaInsights: () => {}, + onSortChange: () => {}, + onFiltersChange: () => {}, +}; diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/schemaInsightsView.tsx b/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/schemaInsightsView.tsx new file mode 100644 index 000000000000..0eced088f120 --- /dev/null +++ b/pkg/ui/workspaces/cluster-ui/src/insights/schemaInsights/schemaInsightsView.tsx @@ -0,0 +1,251 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import React, { useContext } from "react"; +import styles from "src/statementsPage/statementsPage.module.scss"; +import sortableTableStyles from "src/sortedtable/sortedtable.module.scss"; +import { ISortedTablePagination, SortSetting } from "../../sortedtable"; +import classNames from "classnames/bind"; +import { PageConfig, PageConfigItem } from "../../pageConfig"; +import { Loading } from "../../loading"; +import { useEffect, useState } from "react"; +import { useHistory } from "react-router-dom"; +import { + InsightsSortedTable, + makeInsightsColumns, +} from "../../insightsTable/insightsTable"; +import { + calculateActiveFilters, + defaultFilters, + Filter, + getFullFiltersAsStringRecord, +} from "../../queryFilter"; +import { queryByName, syncHistory } from "../../util"; +import { getTableSortFromURL } from "../../sortedtable/getTableSortFromURL"; +import { TableStatistics } from "../../tableStatistics"; +import { InsightRecommendation, SchemaInsightEventFilters } from "../types"; +import { getSchemaInsightEventFiltersFromURL } from "../../queryFilter/utils"; +import { filterSchemaInsights } from "../utils"; +import { Search } from "../../search"; +import { InsightsError } from "../insightsErrorComponent"; +import { Pagination } from "../../pagination"; +import { EmptySchemaInsightsTablePlaceholder } from "./emptySchemaInsightsTablePlaceholder"; +import { CockroachCloudContext } from "../../contexts"; +const cx = classNames.bind(styles); +const sortableTableCx = classNames.bind(sortableTableStyles); + +export type SchemaInsightsViewStateProps = { + schemaInsights: InsightRecommendation[]; + schemaInsightsDatabases: string[]; + schemaInsightsTypes: string[]; + schemaInsightsError: Error | null; + filters: SchemaInsightEventFilters; + sortSetting: SortSetting; +}; + +export type SchemaInsightsViewDispatchProps = { + onFiltersChange: (filters: SchemaInsightEventFilters) => void; + onSortChange: (ss: SortSetting) => void; + refreshSchemaInsights: () => void; +}; + +export type SchemaInsightsViewProps = SchemaInsightsViewStateProps & + SchemaInsightsViewDispatchProps; + +const SCHEMA_INSIGHT_SEARCH_PARAM = "q"; + +export const SchemaInsightsView: React.FC = ({ + sortSetting, + schemaInsights, + schemaInsightsDatabases, + schemaInsightsTypes, + schemaInsightsError, + filters, + refreshSchemaInsights, + onFiltersChange, + onSortChange, +}: SchemaInsightsViewProps) => { + const isCockroachCloud = useContext(CockroachCloudContext); + const [pagination, setPagination] = useState({ + current: 1, + pageSize: 10, + }); + const history = useHistory(); + const [search, setSearch] = useState( + queryByName(history.location, SCHEMA_INSIGHT_SEARCH_PARAM), + ); + + useEffect(() => { + // Refresh every 5mins. + refreshSchemaInsights(); + const interval = setInterval(refreshSchemaInsights, 60 * 1000 * 5); + return () => { + clearInterval(interval); + }; + }, [refreshSchemaInsights]); + + useEffect(() => { + // We use this effect to sync settings defined on the URL (sort, filters), + // with the redux store. The only time we do this is when the user navigates + // to the page directly via the URL and specifies settings in the query string. + // Note that the desired behaviour is currently that the user is unable to + // clear filters via the URL, and must do so with page controls. + const sortSettingURL = getTableSortFromURL(history.location); + const filtersFromURL = getSchemaInsightEventFiltersFromURL( + history.location, + ); + + if (sortSettingURL) { + onSortChange(sortSettingURL); + } + if (filtersFromURL) { + onFiltersChange(filtersFromURL); + } + }, [history, onFiltersChange, onSortChange]); + + useEffect(() => { + // This effect runs when the filters or sort settings received from + // redux changes and syncs the URL params with redux. + syncHistory( + { + ascending: sortSetting.ascending.toString(), + columnTitle: sortSetting.columnTitle, + ...getFullFiltersAsStringRecord(filters), + [SCHEMA_INSIGHT_SEARCH_PARAM]: search, + }, + history, + ); + }, [ + history, + filters, + sortSetting.ascending, + sortSetting.columnTitle, + search, + ]); + + const onChangePage = (current: number): void => { + setPagination({ + current: current, + pageSize: 10, + }); + }; + + const resetPagination = () => { + setPagination({ + current: 1, + pageSize: 10, + }); + }; + + const onChangeSortSetting = (ss: SortSetting): void => { + onSortChange(ss); + resetPagination(); + }; + + const onSubmitSearch = (newSearch: string) => { + if (newSearch === search) return; + setSearch(newSearch); + resetPagination(); + }; + + const clearSearch = () => onSubmitSearch(""); + + const onSubmitFilters = (selectedFilters: SchemaInsightEventFilters) => { + onFiltersChange(selectedFilters); + resetPagination(); + }; + + const clearFilters = () => + onSubmitFilters({ + database: defaultFilters.database, + schemaInsightType: defaultFilters.schemaInsightType, + }); + + const countActiveFilters = calculateActiveFilters(filters); + + const filteredSchemaInsights = filterSchemaInsights( + schemaInsights, + filters, + search, + ); + + return ( +
+ + + + + + + + +
+ + InsightsError({ + execType: "schema insights", + }) + } + > +
+
+
+ +
+ 0 && filteredSchemaInsights?.length == 0 + } + /> + } + /> +
+ +
+
+
+
+ ); +}; diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/types.ts b/pkg/ui/workspaces/cluster-ui/src/insights/types.ts index 7cc5f031b40e..0685f02c7f19 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insights/types.ts +++ b/pkg/ui/workspaces/cluster-ui/src/insights/types.ts @@ -131,3 +131,47 @@ export type InsightEventFilters = Omit< | "timeNumber" | "timeUnit" >; + +export type SchemaInsightEventFilters = Pick< + Filters, + "database" | "schemaInsightType" +>; + +export type InsightType = + | "DROP_INDEX" + | "CREATE_INDEX" + | "REPLACE_INDEX" + | "HIGH_WAIT_TIME" + | "HIGH_RETRIES" + | "SUBOPTIMAL_PLAN" + | "FAILED"; + +export interface InsightRecommendation { + type: InsightType; + database?: string; + query?: string; + indexDetails?: indexDetails; + execution?: executionDetails; + details?: insightDetails; +} + +export interface indexDetails { + table: string; + indexID: number; + indexName: string; + lastUsed?: string; +} + +export interface executionDetails { + statement?: string; + summary?: string; + fingerprintID?: string; + implicit?: boolean; + retries?: number; + indexRecommendations?: string[]; +} + +export interface insightDetails { + duration: number; + description: string; +} diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/utils.ts b/pkg/ui/workspaces/cluster-ui/src/insights/utils.ts index 2eb2d6f8e92a..00022fd27b11 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insights/utils.ts +++ b/pkg/ui/workspaces/cluster-ui/src/insights/utils.ts @@ -22,6 +22,9 @@ import { InsightEvent, InsightEventFilters, InsightEventDetails, + SchemaInsightEventFilters, + InsightType, + InsightRecommendation, } from "./types"; export const getInsights = ( @@ -145,3 +148,76 @@ export function getAppsFromTransactionInsights( return Array.from(uniqueAppNames).sort(); } + +export const filterSchemaInsights = ( + schemaInsights: InsightRecommendation[], + filters: SchemaInsightEventFilters, + search?: string, +): InsightRecommendation[] => { + if (schemaInsights == null) return []; + + let filteredSchemaInsights = schemaInsights; + + if (filters.database) { + const databases = + filters.database.toString().length > 0 + ? filters.database.toString().split(",") + : []; + if (databases.includes(unset)) { + databases.push(""); + } + filteredSchemaInsights = filteredSchemaInsights.filter( + schemaInsight => + databases.length === 0 || databases.includes(schemaInsight.database), + ); + } + + if (filters.schemaInsightType) { + const schemaInsightTypes = + filters.schemaInsightType.toString().length > 0 + ? filters.schemaInsightType.toString().split(",") + : []; + if (schemaInsightTypes.includes(unset)) { + schemaInsightTypes.push(""); + } + filteredSchemaInsights = filteredSchemaInsights.filter( + schemaInsight => + schemaInsightTypes.length === 0 || + schemaInsightTypes.includes(insightType(schemaInsight.type)), + ); + } + + if (search) { + search = search.toLowerCase(); + filteredSchemaInsights = filteredSchemaInsights.filter( + schemaInsight => + schemaInsight.query?.toLowerCase().includes(search) || + schemaInsight.indexDetails?.indexName?.toLowerCase().includes(search) || + schemaInsight.execution?.statement.toLowerCase().includes(search) || + schemaInsight.execution?.summary.toLowerCase().includes(search) || + schemaInsight.execution?.fingerprintID.toLowerCase().includes(search), + ); + } + return filteredSchemaInsights; +}; + +export function insightType(type: InsightType): string { + switch (type) { + case "CREATE_INDEX": + return "Create New Index"; + case "DROP_INDEX": + return "Drop Unused Index"; + case "REPLACE_INDEX": + return "Replace Index"; + case "HIGH_WAIT_TIME": + return "High Wait Time"; + case "HIGH_RETRIES": + return "High Retry Counts"; + case "SUBOPTIMAL_PLAN": + return "Sub-Optimal Plan"; + case "FAILED": + return "Failed Execution"; + default: + return "Insight"; + } +} diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsightDetails/transactionInsightDetails.tsx b/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsightDetails/transactionInsightDetails.tsx index 8fc9965db5e3..c364f7a50e92 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsightDetails/transactionInsightDetails.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsightDetails/transactionInsightDetails.tsx @@ -32,19 +32,18 @@ import { InsightEventDetailsResponse, } from "src/api"; import { - InsightRecommendation, InsightsSortedTable, makeInsightsColumns, } from "src/insightsTable/insightsTable"; import { WaitTimeDetailsTable } from "./insightDetailsTables"; import { getInsightEventDetailsFromState } from "../utils"; -import { EventExecution } from "../types"; -import { WorkloadInsightsError } from "../workloadInsights/util"; +import { EventExecution, InsightRecommendation } from "../types"; import classNames from "classnames/bind"; import { commonStyles } from "src/common"; import insightTableStyles from "src/insightsTable/insightsTable.module.scss"; import { CockroachCloudContext } from "../../contexts"; +import { InsightsError } from "../insightsErrorComponent"; const tableCx = classNames.bind(insightTableStyles); @@ -227,7 +226,7 @@ export class InsightDetails extends React.Component { error={this.props.insightError} render={this.renderContent} renderError={() => - WorkloadInsightsError({ + InsightsError({ execType: "transaction insights", }) } diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsightsView.tsx b/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsightsView.tsx index 7ac0463e3eb6..e834243a84b3 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsightsView.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/transactionInsights/transactionInsightsView.tsx @@ -37,8 +37,9 @@ import { getInsightsFromState, InsightEventFilters, } from "src/insights"; -import { EmptyInsightsTablePlaceholder, WorkloadInsightsError } from "../util"; +import { EmptyInsightsTablePlaceholder } from "../util"; import { TransactionInsightsTable } from "./transactionInsightsTable"; +import { InsightsError } from "../../insightsErrorComponent"; import styles from "src/statementsPage/statementsPage.module.scss"; import sortableTableStyles from "src/sortedtable/sortedtable.module.scss"; @@ -214,7 +215,7 @@ export const TransactionInsightsView: React.FC< page="transaction insights" error={transactionsError} renderError={() => - WorkloadInsightsError({ + InsightsError({ execType: "transaction insights", }) } diff --git a/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/util/index.ts b/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/util/index.ts index 3bbb3d532f1e..67f1409e71c9 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/util/index.ts +++ b/pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/util/index.ts @@ -13,4 +13,3 @@ export * from "./queriesCell"; export * from "./emptyInsightsTablePlaceholder"; export * from "./insightsColumns"; export * from "./dropDownSelect"; -export * from "./workloadInsightsError"; diff --git a/pkg/ui/workspaces/cluster-ui/src/insightsTable/insightsTable.module.scss b/pkg/ui/workspaces/cluster-ui/src/insightsTable/insightsTable.module.scss index 37283c9f8c51..338a3b2714fd 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insightsTable/insightsTable.module.scss +++ b/pkg/ui/workspaces/cluster-ui/src/insightsTable/insightsTable.module.scss @@ -14,6 +14,10 @@ .description-item { margin-bottom: 5px; + + .table-link { + color: $colors--link + } } .margin-bottom { diff --git a/pkg/ui/workspaces/cluster-ui/src/insightsTable/insightsTable.tsx b/pkg/ui/workspaces/cluster-ui/src/insightsTable/insightsTable.tsx index c8433f634664..78b1489d34eb 100644 --- a/pkg/ui/workspaces/cluster-ui/src/insightsTable/insightsTable.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/insightsTable/insightsTable.tsx @@ -17,42 +17,12 @@ import { StatementLink } from "../statementsTable"; import IdxRecAction from "../insights/indexActionBtn"; import { Duration, statementsRetries } from "../util"; import { Anchor } from "../anchor"; +import { Link } from "react-router-dom"; +import { performanceTuningRecipes } from "../util"; +import { InsightRecommendation, insightType } from "../insights"; const cx = classNames.bind(styles); -export type InsightType = - | "DROP_INDEX" - | "CREATE_INDEX" - | "REPLACE_INDEX" - | "HIGH_WAIT_TIME" - | "HIGH_RETRIES" - | "SUBOPTIMAL_PLAN" - | "FAILED"; - -export interface InsightRecommendation { - type: InsightType; - database?: string; - table?: string; - indexID?: number; - query?: string; - execution?: executionDetails; - details?: insightDetails; -} - -export interface executionDetails { - statement?: string; - summary?: string; - fingerprintID?: string; - implicit?: boolean; - retries?: number; - indexRecommendations?: string[]; -} - -export interface insightDetails { - duration: number; - description: string; -} - export class InsightsSortedTable extends SortedTable {} const insightColumnLabels = { @@ -94,27 +64,6 @@ export const insightsTableTitles: InsightsTableTitleType = { }, }; -function insightType(type: InsightType): string { - switch (type) { - case "CREATE_INDEX": - return "Create New Index"; - case "DROP_INDEX": - return "Drop Unused Index"; - case "REPLACE_INDEX": - return "Replace Index"; - case "HIGH_WAIT_TIME": - return "High Wait Time"; - case "HIGH_RETRIES": - return "High Retry Counts"; - case "SUBOPTIMAL_PLAN": - return "Sub-Optimal Plan"; - case "FAILED": - return "Failed Execution"; - default: - return "Insight"; - } -} - function typeCell(value: string): React.ReactElement { return
{value}
; } @@ -144,7 +93,32 @@ function descriptionCell( ); case "DROP_INDEX": - return <>{`Index ${insightRec.indexID}`}; + return ( + <> +
+ Index: {" "} + + {insightRec.indexDetails.indexName} + +
+
+ Description: {" "} + {insightRec.indexDetails?.lastUsed} + {" Learn more about "} + + unused indexes + + {"."} +
+ + ); case "HIGH_WAIT_TIME": return ( <> diff --git a/pkg/ui/workspaces/cluster-ui/src/queryFilter/filter.spec.tsx b/pkg/ui/workspaces/cluster-ui/src/queryFilter/filter.spec.tsx index 72d29f9f9c10..3bab1b4b9173 100644 --- a/pkg/ui/workspaces/cluster-ui/src/queryFilter/filter.spec.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/queryFilter/filter.spec.tsx @@ -21,6 +21,7 @@ describe("Test filter functions", (): void => { sqlType: "", database: "", regions: "", + schemaInsightType: "", sessionStatus: "", nodes: "", username: "", @@ -39,12 +40,13 @@ describe("Test filter functions", (): void => { sqlType: "DML", database: "movr", regions: "us-central", + schemaInsightType: "Drop Unused Index", sessionStatus: "idle", nodes: "n1,n2", username: "root", }; const resultFilters = getFiltersFromQueryString( - "app=%24+internal&timeNumber=1&timeUnit=milliseconds&fullScan=true&sqlType=DML&database=movr&sessionStatus=idle&username=root®ions=us-central&nodes=n1,n2", + "app=%24+internal&timeNumber=1&timeUnit=milliseconds&fullScan=true&sqlType=DML&database=movr&sessionStatus=idle&username=root®ions=us-central&nodes=n1,n2&schemaInsightType=Drop+Unused+Index", ); expect(resultFilters).toEqual(expectedFilters); }); @@ -58,6 +60,7 @@ describe("Test filter functions", (): void => { sqlType: "", database: "", regions: "", + schemaInsightType: "", sessionStatus: "", nodes: "", username: "", @@ -75,6 +78,7 @@ describe("Test filter functions", (): void => { sqlType: "", database: "", regions: "", + schemaInsightType: "", sessionStatus: "", nodes: "", username: "", @@ -92,6 +96,7 @@ describe("Test filter functions", (): void => { sqlType: "", database: "", regions: "", + schemaInsightType: "", sessionStatus: "open", nodes: "", username: "", @@ -109,6 +114,7 @@ describe("Test filter functions", (): void => { sqlType: "", database: "", regions: "", + schemaInsightType: "", sessionStatus: "idle", nodes: "", username: "", @@ -126,6 +132,7 @@ describe("Test filter functions", (): void => { sqlType: "", database: "", regions: "", + schemaInsightType: "", sessionStatus: "closed", nodes: "", username: "", @@ -133,4 +140,24 @@ describe("Test filter functions", (): void => { const resultFilters = getFiltersFromQueryString("sessionStatus=closed"); expect(resultFilters).toEqual(expectedFilters); }); + + it("testing schemaInsightType", (): void => { + const expectedFilters: Filters = { + app: "", + timeNumber: "0", + timeUnit: "seconds", + fullScan: false, + sqlType: "", + database: "", + regions: "", + schemaInsightType: "Drop Unused Index", + sessionStatus: "", + nodes: "", + username: "", + }; + const resultFilters = getFiltersFromQueryString( + "schemaInsightType=Drop+Unused+Index", + ); + expect(resultFilters).toEqual(expectedFilters); + }); }); diff --git a/pkg/ui/workspaces/cluster-ui/src/queryFilter/filter.tsx b/pkg/ui/workspaces/cluster-ui/src/queryFilter/filter.tsx index e7ab48bd1336..7f0930351a84 100644 --- a/pkg/ui/workspaces/cluster-ui/src/queryFilter/filter.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/queryFilter/filter.tsx @@ -33,17 +33,20 @@ import { syncHistory } from "../util"; interface QueryFilter { onSubmitFilters: (filters: Filters) => void; smth?: string; - appNames: string[]; + appNames?: string[]; activeFilters: number; filters: Filters; dbNames?: string[]; usernames?: string[]; sessionStatuses?: string[]; + schemaInsightTypes?: string[]; regions?: string[]; nodes?: string[]; + hideAppNames?: boolean; showDB?: boolean; showUsername?: boolean; showSessionStatus?: boolean; + showSchemaInsightTypes?: boolean; showSqlType?: boolean; showScan?: boolean; showRegions?: boolean; @@ -71,6 +74,7 @@ export interface Filters extends Record { nodes?: string; username?: string; sessionStatus?: string; + schemaInsightType?: string; } const timeUnit = [ @@ -90,6 +94,7 @@ export const defaultFilters: Required = { nodes: "", username: "", sessionStatus: "", + schemaInsightType: "", }; // getFullFiltersObject returns Filters with every field defined as @@ -383,9 +388,11 @@ export class Filter extends React.Component { dbNames, usernames, sessionStatuses, + schemaInsightTypes, regions, nodes, activeFilters, + hideAppNames, showDB, showSqlType, showScan, @@ -394,6 +401,7 @@ export class Filter extends React.Component { timeLabel, showUsername, showSessionStatus, + showSchemaInsightTypes, } = this.props; const dropdownArea = hide ? hidden : dropdown; const customStyles = { @@ -429,11 +437,13 @@ export class Filter extends React.Component { border: "none", }); - const appsOptions = appNames.map(app => ({ - label: app, - value: app, - isSelected: this.isOptionSelected(app, filters.app), - })); + const appsOptions = !hideAppNames + ? appNames.map(app => ({ + label: app, + value: app, + isSelected: this.isOptionSelected(app, filters.app), + })) + : []; const appValue = appsOptions.filter(option => { return filters.app.split(",").includes(option.label); }); @@ -522,6 +532,32 @@ export class Filter extends React.Component { ); + const schemaInsightTypeOptions = showSchemaInsightTypes + ? schemaInsightTypes.map(schemaInsight => ({ + label: schemaInsight, + value: schemaInsight, + isSelected: this.isOptionSelected( + schemaInsight, + filters.schemaInsightType, + ), + })) + : []; + const schemaInsightTypeValue = schemaInsightTypeOptions.filter(option => { + return filters.schemaInsightType.split(",").includes(option.label); + }); + const schemaInsightTypeFilter = ( +
+
Schema Insight Type
+ +
+ ); + const regionsOptions = showRegions ? regions.map(region => ({ label: region, @@ -633,10 +669,11 @@ export class Filter extends React.Component {
- {appFilter} + {!hideAppNames ? appFilter : ""} {showDB ? dbFilter : ""} {showUsername ? usernameFilter : ""} {showSessionStatus ? sessionStatusFilter : ""} + {showSchemaInsightTypes ? schemaInsightTypeFilter : ""} {showSqlType ? sqlTypeFilter : ""} {showRegions ? regionsFilter : ""} {showNodes ? nodesFilter : ""} diff --git a/pkg/ui/workspaces/cluster-ui/src/queryFilter/utils.ts b/pkg/ui/workspaces/cluster-ui/src/queryFilter/utils.ts index ff67ca7704c0..87f84f5a7e96 100644 --- a/pkg/ui/workspaces/cluster-ui/src/queryFilter/utils.ts +++ b/pkg/ui/workspaces/cluster-ui/src/queryFilter/utils.ts @@ -14,7 +14,7 @@ import { ActiveStatementFilters, ActiveTransactionFilters, } from "src/activeExecutions/types"; -import { InsightEventFilters } from "../insights"; +import { InsightEventFilters, SchemaInsightEventFilters } from "../insights"; // This function returns a Filters object populated with values from the URL, or null // if there were no filters set. @@ -83,3 +83,20 @@ export function getInsightEventFiltersFromURL( return appFilters; } + +export function getSchemaInsightEventFiltersFromURL( + location: Location, +): Partial | null { + const filters = getFiltersFromURL(location); + if (!filters) return null; + + const schemaFilters = { + database: filters.database, + schemaInsightType: filters.schemaInsightType, + }; + + // If every entry is null, there were no active filters. Return null. + if (Object.values(schemaFilters).every(val => !val)) return null; + + return schemaFilters; +} diff --git a/pkg/ui/workspaces/cluster-ui/src/statementDetails/planDetails/planDetails.tsx b/pkg/ui/workspaces/cluster-ui/src/statementDetails/planDetails/planDetails.tsx index 25f51dabe602..88f7561a2552 100644 --- a/pkg/ui/workspaces/cluster-ui/src/statementDetails/planDetails/planDetails.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/statementDetails/planDetails/planDetails.tsx @@ -22,14 +22,13 @@ import { SortSetting } from "../../sortedtable"; import { Row } from "antd"; import "antd/lib/row/style"; import { - InsightRecommendation, InsightsSortedTable, - InsightType, makeInsightsColumns, } from "../../insightsTable/insightsTable"; import classNames from "classnames/bind"; import styles from "../statementDetails.module.scss"; import { CockroachCloudContext } from "../../contexts"; +import { InsightRecommendation, InsightType } from "../../insights"; const cx = classNames.bind(styles); diff --git a/pkg/ui/workspaces/cluster-ui/src/statementsTable/statementsTableContent.module.scss b/pkg/ui/workspaces/cluster-ui/src/statementsTable/statementsTableContent.module.scss index a37df251caa2..ca80114c0330 100644 --- a/pkg/ui/workspaces/cluster-ui/src/statementsTable/statementsTableContent.module.scss +++ b/pkg/ui/workspaces/cluster-ui/src/statementsTable/statementsTableContent.module.scss @@ -76,4 +76,5 @@ .inline { display: inline-flex; + color: $colors--link !important; } diff --git a/pkg/ui/workspaces/cluster-ui/src/store/localStorage/localStorage.reducer.ts b/pkg/ui/workspaces/cluster-ui/src/store/localStorage/localStorage.reducer.ts index bdc3c9350adc..5e2ac26a815a 100644 --- a/pkg/ui/workspaces/cluster-ui/src/store/localStorage/localStorage.reducer.ts +++ b/pkg/ui/workspaces/cluster-ui/src/store/localStorage/localStorage.reducer.ts @@ -33,12 +33,14 @@ export type LocalStorageState = { "sortSetting/SessionsPage": SortSetting; "sortSetting/JobsPage": SortSetting; "sortSetting/InsightsPage": SortSetting; + "sortSetting/SchemaInsightsPage": SortSetting; "filters/ActiveStatementsPage": Filters; "filters/ActiveTransactionsPage": Filters; "filters/StatementsPage": Filters; "filters/TransactionsPage": Filters; "filters/SessionsPage": Filters; "filters/InsightsPage": Filters; + "filters/SchemaInsightsPage": Filters; "search/StatementsPage": string; "search/TransactionsPage": string; "typeSetting/JobsPage": number; @@ -66,6 +68,11 @@ const defaultSortSettingInsights: SortSetting = { columnTitle: "startTime", }; +const defaultSortSettingSchemaInsights: SortSetting = { + ascending: false, + columnTitle: "insights", +}; + const defaultFiltersActiveExecutions = { app: defaultFilters.app, }; @@ -74,6 +81,11 @@ const defaultFiltersInsights = { app: defaultFilters.app, }; +const defaultFiltersSchemaInsights = { + database: defaultFilters.database, + schemaInsightType: defaultFilters.schemaInsightType, +}; + const defaultSessionsSortSetting: SortSetting = { ascending: false, columnTitle: "statementAge", @@ -134,6 +146,9 @@ const initialState: LocalStorageState = { "sortSetting/InsightsPage": JSON.parse(localStorage.getItem("sortSetting/InsightsPage")) || defaultSortSettingInsights, + "sortSetting/SchemaInsightsPage": + JSON.parse(localStorage.getItem("sortSetting/SchemaInsightsPage")) || + defaultSortSettingSchemaInsights, "filters/ActiveStatementsPage": JSON.parse(localStorage.getItem("filters/ActiveStatementsPage")) || defaultFiltersActiveExecutions, @@ -151,6 +166,9 @@ const initialState: LocalStorageState = { "filters/InsightsPage": JSON.parse(localStorage.getItem("filters/InsightsPage")) || defaultFiltersInsights, + "filters/SchemaInsightsPage": + JSON.parse(localStorage.getItem("filters/SchemaInsightsPage")) || + defaultFiltersSchemaInsights, "search/StatementsPage": JSON.parse(localStorage.getItem("search/StatementsPage")) || null, "search/TransactionsPage": diff --git a/pkg/ui/workspaces/cluster-ui/src/store/reducers.ts b/pkg/ui/workspaces/cluster-ui/src/store/reducers.ts index dd16b2e56f3e..fd639758947e 100644 --- a/pkg/ui/workspaces/cluster-ui/src/store/reducers.ts +++ b/pkg/ui/workspaces/cluster-ui/src/store/reducers.ts @@ -44,6 +44,10 @@ import { InsightDetailsState, reducer as insightDetails, } from "./insightDetails"; +import { + SchemaInsightsState, + reducer as schemaInsights, +} from "./schemaInsights"; export type AdminUiState = { statementDiagnostics: StatementDiagnosticsState; @@ -61,6 +65,7 @@ export type AdminUiState = { clusterLocks: ClusterLocksReqState; insights: InsightsState; insightDetails: InsightDetailsState; + schemaInsights: SchemaInsightsState; }; export type AppState = { @@ -83,6 +88,7 @@ export const reducers = combineReducers({ jobs, job, clusterLocks, + schemaInsights, }); export const rootActions = { diff --git a/pkg/ui/workspaces/cluster-ui/src/store/sagas.ts b/pkg/ui/workspaces/cluster-ui/src/store/sagas.ts index 324a2519d3b3..bb9b199eb08d 100644 --- a/pkg/ui/workspaces/cluster-ui/src/store/sagas.ts +++ b/pkg/ui/workspaces/cluster-ui/src/store/sagas.ts @@ -26,6 +26,7 @@ import { indexStatsSaga } from "./indexStats/indexStats.sagas"; import { clusterLocksSaga } from "./clusterLocks/clusterLocks.saga"; import { insightsSaga } from "./insights/insights.sagas"; import { insightDetailsSaga } from "./insightDetails"; +import { schemaInsightsSaga } from "./schemaInsights"; export function* sagas(cacheInvalidationPeriod?: number): SagaIterator { yield all([ @@ -44,5 +45,6 @@ export function* sagas(cacheInvalidationPeriod?: number): SagaIterator { fork(sqlDetailsStatsSaga), fork(indexStatsSaga), fork(clusterLocksSaga), + fork(schemaInsightsSaga), ]); } diff --git a/pkg/ui/workspaces/cluster-ui/src/store/schemaInsights/index.ts b/pkg/ui/workspaces/cluster-ui/src/store/schemaInsights/index.ts new file mode 100644 index 000000000000..a5861f51d54c --- /dev/null +++ b/pkg/ui/workspaces/cluster-ui/src/store/schemaInsights/index.ts @@ -0,0 +1,13 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +export * from "./schemaInsights.reducer"; +export * from "./schemaInsights.sagas"; +export * from "./schemaInsights.selectors"; diff --git a/pkg/ui/workspaces/cluster-ui/src/store/schemaInsights/schemaInsights.reducer.ts b/pkg/ui/workspaces/cluster-ui/src/store/schemaInsights/schemaInsights.reducer.ts new file mode 100644 index 000000000000..7691ae0dde45 --- /dev/null +++ b/pkg/ui/workspaces/cluster-ui/src/store/schemaInsights/schemaInsights.reducer.ts @@ -0,0 +1,55 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { createSlice, PayloadAction } from "@reduxjs/toolkit"; +import { DOMAIN_NAME, noopReducer } from "../utils"; +import moment, { Moment } from "moment"; +import { InsightRecommendation } from "../../insights"; + +export type SchemaInsightsState = { + data: InsightRecommendation[]; + lastUpdated: Moment; + lastError: Error; + valid: boolean; +}; + +const initialState: SchemaInsightsState = { + data: null, + lastUpdated: null, + lastError: null, + valid: false, +}; + +const schemaInsightsSlice = createSlice({ + name: `${DOMAIN_NAME}/schemaInsightsSlice`, + initialState, + reducers: { + received: (state, action: PayloadAction) => { + state.data = action.payload; + state.valid = true; + state.lastError = null; + state.lastUpdated = moment.utc(); + }, + failed: (state, action: PayloadAction) => { + state.valid = false; + state.lastError = action.payload; + state.lastUpdated = moment.utc(); + }, + invalidated: state => { + state.valid = false; + state.lastUpdated = moment.utc(); + }, + // Define actions that don't change state. + refresh: noopReducer, + request: noopReducer, + }, +}); + +export const { reducer, actions } = schemaInsightsSlice; diff --git a/pkg/ui/workspaces/cluster-ui/src/store/schemaInsights/schemaInsights.sagas.spec.ts b/pkg/ui/workspaces/cluster-ui/src/store/schemaInsights/schemaInsights.sagas.spec.ts new file mode 100644 index 000000000000..35f43ff02db3 --- /dev/null +++ b/pkg/ui/workspaces/cluster-ui/src/store/schemaInsights/schemaInsights.sagas.spec.ts @@ -0,0 +1,100 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { expectSaga } from "redux-saga-test-plan"; +import { + EffectProviders, + StaticProvider, + throwError, +} from "redux-saga-test-plan/providers"; +import * as matchers from "redux-saga-test-plan/matchers"; +import moment from "moment"; +import { getSchemaInsights } from "../../api"; +import { + refreshSchemaInsightsSaga, + requestSchemaInsightsSaga, +} from "./schemaInsights.sagas"; +import { + actions, + reducer, + SchemaInsightsState, +} from "./schemaInsights.reducer"; +import { InsightRecommendation } from "../../insights"; + +const lastUpdated = moment(); + +describe("SchemaInsights sagas", () => { + let spy: jest.SpyInstance; + beforeAll(() => { + spy = jest.spyOn(moment, "utc").mockImplementation(() => lastUpdated); + }); + + afterAll(() => { + spy.mockRestore(); + }); + + const schemaInsightsResponse: InsightRecommendation[] = [ + { + type: "DROP_INDEX", + database: "test_database", + query: "DROP INDEX test_table@test_idx;", + indexDetails: { + table: "test_table", + indexName: "test_idx", + indexID: 1, + lastUsed: "2022-08-22T22:30:02Z", + }, + }, + ]; + + const schemaInsightsAPIProvider: (EffectProviders | StaticProvider)[] = [ + [matchers.call.fn(getSchemaInsights), schemaInsightsResponse], + ]; + + describe("refreshSchemaInsightsSaga", () => { + it("dispatches request Schema Insights action", () => { + return expectSaga(refreshSchemaInsightsSaga, actions.request()) + .provide(schemaInsightsAPIProvider) + .put(actions.request()) + .run(); + }); + }); + + describe("requestSchemaInsightsSaga", () => { + it("successfully requests schema insights", () => { + return expectSaga(requestSchemaInsightsSaga, actions.request()) + .provide(schemaInsightsAPIProvider) + .put(actions.received(schemaInsightsResponse)) + .withReducer(reducer) + .hasFinalState({ + data: schemaInsightsResponse, + lastError: null, + valid: true, + lastUpdated, + }) + .run(); + }); + + it("returns error on failed request", () => { + const error = new Error("Failed request"); + return expectSaga(requestSchemaInsightsSaga, actions.request()) + .provide([[matchers.call.fn(getSchemaInsights), throwError(error)]]) + .put(actions.failed(error)) + .withReducer(reducer) + .hasFinalState({ + data: null, + lastError: error, + valid: false, + lastUpdated, + }) + .run(); + }); + }); +}); diff --git a/pkg/ui/workspaces/cluster-ui/src/store/schemaInsights/schemaInsights.sagas.ts b/pkg/ui/workspaces/cluster-ui/src/store/schemaInsights/schemaInsights.sagas.ts new file mode 100644 index 000000000000..3480d9b6c457 --- /dev/null +++ b/pkg/ui/workspaces/cluster-ui/src/store/schemaInsights/schemaInsights.sagas.ts @@ -0,0 +1,43 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { all, call, put, takeLatest } from "redux-saga/effects"; + +import { actions } from "./schemaInsights.reducer"; +import { CACHE_INVALIDATION_PERIOD, throttleWithReset } from "../utils"; +import { rootActions } from "../reducers"; +import { getSchemaInsights } from "../../api"; + +export function* refreshSchemaInsightsSaga() { + yield put(actions.request()); +} + +export function* requestSchemaInsightsSaga(): any { + try { + const result = yield call(getSchemaInsights); + yield put(actions.received(result)); + } catch (e) { + yield put(actions.failed(e)); + } +} + +export function* schemaInsightsSaga( + cacheInvalidationPeriod: number = CACHE_INVALIDATION_PERIOD, +) { + yield all([ + throttleWithReset( + cacheInvalidationPeriod, + actions.refresh, + [actions.invalidated, rootActions.resetState], + refreshSchemaInsightsSaga, + ), + takeLatest(actions.request, requestSchemaInsightsSaga), + ]); +} diff --git a/pkg/ui/workspaces/cluster-ui/src/store/schemaInsights/schemaInsights.selectors.ts b/pkg/ui/workspaces/cluster-ui/src/store/schemaInsights/schemaInsights.selectors.ts new file mode 100644 index 000000000000..8266fc36a761 --- /dev/null +++ b/pkg/ui/workspaces/cluster-ui/src/store/schemaInsights/schemaInsights.selectors.ts @@ -0,0 +1,43 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { createSelector } from "reselect"; +import { adminUISelector } from "../utils/selectors"; +import { insightType } from "../../insights"; + +export const selectSchemaInsights = createSelector( + adminUISelector, + adminUiState => { + if (!adminUiState.schemaInsights) return []; + return adminUiState.schemaInsights.data; + }, +); + +export const selectSchemaInsightsDatabases = createSelector( + selectSchemaInsights, + schemaInsights => { + if (!schemaInsights) return []; + return Array.from( + new Set(schemaInsights.map(schemaInsight => schemaInsight.database)), + ).sort(); + }, +); + +export const selectSchemaInsightsTypes = createSelector( + selectSchemaInsights, + schemaInsights => { + if (!schemaInsights) return []; + return Array.from( + new Set( + schemaInsights.map(schemaInsight => insightType(schemaInsight.type)), + ), + ).sort(); + }, +); diff --git a/pkg/ui/workspaces/db-console/src/app.spec.tsx b/pkg/ui/workspaces/db-console/src/app.spec.tsx index 3e30893d6e33..5829fb0f4c62 100644 --- a/pkg/ui/workspaces/db-console/src/app.spec.tsx +++ b/pkg/ui/workspaces/db-console/src/app.spec.tsx @@ -39,6 +39,10 @@ stubComponentInModule( "src/views/insights/workloadInsightDetailsPageConnected", "default", ); +stubComponentInModule( + "src/views/insights/schemaInsightsPageConnected", + "default", +); import React from "react"; import { Action, Store } from "redux"; @@ -425,10 +429,14 @@ describe("Routing to", () => { /* insights */ } describe("'/insights' path", () => { - test("routes to component", () => { + test("routes to component - workload insights page", () => { navigateToPath("/insights"); screen.getByTestId("workloadInsightsPageConnected"); }); + test("routes to component - schema insights page", () => { + navigateToPath("/insights?tab=Schema+Insights"); + screen.getByTestId("schemaInsightsPageConnected"); + }); }); describe("'/insights/insightID' path", () => { test("routes to component", () => { diff --git a/pkg/ui/workspaces/db-console/src/redux/apiReducers.ts b/pkg/ui/workspaces/db-console/src/redux/apiReducers.ts index 3b6771978376..06831dfa8c4b 100644 --- a/pkg/ui/workspaces/db-console/src/redux/apiReducers.ts +++ b/pkg/ui/workspaces/db-console/src/redux/apiReducers.ts @@ -422,6 +422,14 @@ const insightDetailsReducerObj = new KeyedCachedDataReducer( ); export const refreshInsightDetails = insightDetailsReducerObj.refresh; +const schemaInsightsReducerObj = new CachedDataReducer( + clusterUiApi.getSchemaInsights, + "schemaInsights", + null, + moment.duration(30, "s"), +); +export const refreshSchemaInsights = schemaInsightsReducerObj.refresh; + export interface APIReducersState { cluster: CachedDataReducerState; events: CachedDataReducerState; @@ -460,6 +468,7 @@ export interface APIReducersState { insights: CachedDataReducerState; insightDetails: KeyedCachedDataReducerState; statementInsights: CachedDataReducerState; + schemaInsights: CachedDataReducerState; } export const apiReducersReducer = combineReducers({ @@ -505,6 +514,7 @@ export const apiReducersReducer = combineReducers({ [insightDetailsReducerObj.actionNamespace]: insightDetailsReducerObj.reducer, [statementInsightsReducerObj.actionNamespace]: statementInsightsReducerObj.reducer, + [schemaInsightsReducerObj.actionNamespace]: schemaInsightsReducerObj.reducer, }); export { CachedDataReducerState, KeyedCachedDataReducerState }; diff --git a/pkg/ui/workspaces/db-console/src/views/insights/insightsOverview.tsx b/pkg/ui/workspaces/db-console/src/views/insights/insightsOverview.tsx index 3815d94313c4..fc401f659a3b 100644 --- a/pkg/ui/workspaces/db-console/src/views/insights/insightsOverview.tsx +++ b/pkg/ui/workspaces/db-console/src/views/insights/insightsOverview.tsx @@ -19,6 +19,7 @@ import { commonStyles, util } from "@cockroachlabs/cluster-ui"; import { RouteComponentProps } from "react-router-dom"; import { tabAttr, viewAttr } from "src/util/constants"; import WorkloadInsightsPageConnected from "src/views/insights/workloadInsightsPageConnected"; +import SchemaInsightsPageConnected from "src/views/insights/schemaInsightsPageConnected"; const { TabPane } = Tabs; @@ -64,6 +65,9 @@ const InsightsOverviewPage = (props: RouteComponentProps) => { + + +
); diff --git a/pkg/ui/workspaces/db-console/src/views/insights/insightsSelectors.ts b/pkg/ui/workspaces/db-console/src/views/insights/insightsSelectors.ts index 8cbe77abceb2..68c10709f222 100644 --- a/pkg/ui/workspaces/db-console/src/views/insights/insightsSelectors.ts +++ b/pkg/ui/workspaces/db-console/src/views/insights/insightsSelectors.ts @@ -15,7 +15,9 @@ import { defaultFilters, SortSetting, InsightEventFilters, + SchemaInsightEventFilters, api, + insightType, } from "@cockroachlabs/cluster-ui"; import { RouteComponentProps } from "react-router-dom"; import { CachedDataReducerState } from "src/redux/cachedDataReducer"; @@ -57,3 +59,53 @@ export const selectInsightDetails = createSelector( return insight[insightId]; }, ); + +export const schemaInsightsFiltersLocalSetting = new LocalSetting< + AdminUIState, + SchemaInsightEventFilters +>("filters/SchemaInsightsPage", (state: AdminUIState) => state.localSettings, { + database: defaultFilters.database, + schemaInsightType: defaultFilters.schemaInsightType, +}); + +export const schemaInsightsSortLocalSetting = new LocalSetting< + AdminUIState, + SortSetting +>( + "sortSetting/SchemaInsightsPage", + (state: AdminUIState) => state.localSettings, + { + ascending: false, + columnTitle: "insights", + }, +); + +export const selectSchemaInsights = createSelector( + (state: AdminUIState) => state.cachedData, + adminUiState => { + if (!adminUiState.schemaInsights) return []; + return adminUiState.schemaInsights.data; + }, +); + +export const selectSchemaInsightsDatabases = createSelector( + selectSchemaInsights, + schemaInsights => { + if (!schemaInsights) return []; + return Array.from( + new Set(schemaInsights.map(schemaInsight => schemaInsight.database)), + ).sort(); + }, +); + +export const selectSchemaInsightsTypes = createSelector( + selectSchemaInsights, + schemaInsights => { + if (!schemaInsights) return []; + return Array.from( + new Set( + schemaInsights.map(schemaInsight => insightType(schemaInsight.type)), + ), + ).sort(); + }, +); diff --git a/pkg/ui/workspaces/db-console/src/views/insights/schemaInsightsPageConnected.tsx b/pkg/ui/workspaces/db-console/src/views/insights/schemaInsightsPageConnected.tsx new file mode 100644 index 000000000000..8647ceddaa4e --- /dev/null +++ b/pkg/ui/workspaces/db-console/src/views/insights/schemaInsightsPageConnected.tsx @@ -0,0 +1,60 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { connect } from "react-redux"; +import { RouteComponentProps, withRouter } from "react-router-dom"; +import { refreshSchemaInsights } from "src/redux/apiReducers"; +import { AdminUIState } from "src/redux/state"; +import { + SchemaInsightEventFilters, + SortSetting, + SchemaInsightsViewStateProps, + SchemaInsightsViewDispatchProps, + SchemaInsightsView, +} from "@cockroachlabs/cluster-ui"; +import { + schemaInsightsFiltersLocalSetting, + schemaInsightsSortLocalSetting, + selectSchemaInsights, + selectSchemaInsightsDatabases, + selectSchemaInsightsTypes, +} from "src/views/insights/insightsSelectors"; + +const mapStateToProps = ( + state: AdminUIState, + _props: RouteComponentProps, +): SchemaInsightsViewStateProps => ({ + schemaInsights: selectSchemaInsights(state), + schemaInsightsDatabases: selectSchemaInsightsDatabases(state), + schemaInsightsTypes: selectSchemaInsightsTypes(state), + schemaInsightsError: state.cachedData?.schemaInsights.lastError, + filters: schemaInsightsFiltersLocalSetting.selector(state), + sortSetting: schemaInsightsSortLocalSetting.selector(state), +}); + +const mapDispatchToProps = { + onFiltersChange: (filters: SchemaInsightEventFilters) => + schemaInsightsFiltersLocalSetting.set(filters), + onSortChange: (ss: SortSetting) => schemaInsightsSortLocalSetting.set(ss), + refreshSchemaInsights: refreshSchemaInsights, +}; + +const SchemaInsightsPageConnected = withRouter( + connect< + SchemaInsightsViewStateProps, + SchemaInsightsViewDispatchProps, + RouteComponentProps + >( + mapStateToProps, + mapDispatchToProps, + )(SchemaInsightsView), +); + +export default SchemaInsightsPageConnected;