From 4d3d4484d4e53ed63b82650c4849ca91b39cc0b3 Mon Sep 17 00:00:00 2001
From: Oliver Tan
Date: Tue, 15 Mar 2022 20:02:26 +1100
Subject: [PATCH 01/11] clisqlshell: add ndjson as --format option
Resolves #73022
Release justification: low-risk new update
Release note: None
---
pkg/cli/clisqlexec/format_table.go | 43 ++++++++++++++++++++++
pkg/cli/clisqlexec/format_table_test.go | 16 ++++++++
pkg/cli/clisqlexec/table_display_format.go | 7 ++++
pkg/cli/clisqlshell/sql.go | 2 +-
pkg/cli/interactive_tests/test_pretty.tcl | 7 ++++
5 files changed, 74 insertions(+), 1 deletion(-)
diff --git a/pkg/cli/clisqlexec/format_table.go b/pkg/cli/clisqlexec/format_table.go
index c7a9f8a2f474..f2e8d20bec96 100644
--- a/pkg/cli/clisqlexec/format_table.go
+++ b/pkg/cli/clisqlexec/format_table.go
@@ -12,6 +12,7 @@ package clisqlexec
import (
"bytes"
+ "encoding/json"
"fmt"
"html"
"io"
@@ -584,6 +585,45 @@ func (p *recordReporter) doneRows(w io.Writer, seenRows int) error {
func (p *recordReporter) beforeFirstRow(_ io.Writer, _ RowStrIter) error { return nil }
func (p *recordReporter) doneNoRows(_ io.Writer) error { return nil }
+type ndjsonReporter struct {
+ cols []string
+}
+
+func (n *ndjsonReporter) describe(w io.Writer, cols []string) error {
+ n.cols = cols
+ return nil
+}
+
+func (n *ndjsonReporter) beforeFirstRow(w io.Writer, allRows RowStrIter) error {
+ return nil
+}
+
+func (n *ndjsonReporter) iter(w, ew io.Writer, rowIdx int, row []string) error {
+ retMap := make(map[string]string, len(row))
+ for i := range row {
+ retMap[n.cols[i]] = row[i]
+ }
+ out, err := json.Marshal(retMap)
+ if err != nil {
+ return err
+ }
+ if _, err := ew.Write(out); err != nil {
+ return err
+ }
+ if _, err := ew.Write([]byte("\n")); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (n *ndjsonReporter) doneRows(w io.Writer, seenRows int) error {
+ return nil
+}
+
+func (n *ndjsonReporter) doneNoRows(w io.Writer) error {
+ return nil
+}
+
type sqlReporter struct {
noColumns bool
}
@@ -644,6 +684,9 @@ func (sqlExecCtx *Context) makeReporter(w io.Writer) (rowReporter, func(), error
reporter, cleanup := makeCSVReporter(w, sqlExecCtx.TableDisplayFormat)
return reporter, cleanup, nil
+ case TableDisplayNDJSON:
+ return &ndjsonReporter{}, nil, nil
+
case TableDisplayRaw:
return &rawReporter{}, nil, nil
diff --git a/pkg/cli/clisqlexec/format_table_test.go b/pkg/cli/clisqlexec/format_table_test.go
index 0dbb957251c1..92a27b0a6c7b 100644
--- a/pkg/cli/clisqlexec/format_table_test.go
+++ b/pkg/cli/clisqlexec/format_table_test.go
@@ -203,6 +203,7 @@ func Example_sql_empty_table() {
// -----
// (0 rows)
// sql --format=records -e select * from t.norows
+ // sql --format=ndjson -e select * from t.norows
// sql --format=sql -e select * from t.norows
// CREATE TABLE results (
// x STRING
@@ -237,6 +238,10 @@ func Example_sql_empty_table() {
// (3 rows)
// sql --format=records -e select * from t.nocols
// (3 rows)
+ // sql --format=ndjson -e select * from t.nocols
+ // {}
+ // {}
+ // {}
// sql --format=sql -e select * from t.nocols
// CREATE TABLE results (
// );
@@ -278,6 +283,7 @@ func Example_sql_empty_table() {
// (0 rows)
// sql --format=records -e select * from t.nocolsnorows
// (0 rows)
+ // sql --format=ndjson -e select * from t.nocolsnorows
// sql --format=sql -e select * from t.nocolsnorows
// CREATE TABLE results (
// );
@@ -593,6 +599,16 @@ func Example_sql_table() {
// s | a b c+
// | 12 123123213 12313
// d | tabs
+ // sql --format=ndjson -e select * from t.t
+ // {"d":"printable ASCII","s":"foo"}
+ // {"d":"printable ASCII with quotes","s":"\"foo"}
+ // {"d":"printable ASCII with backslash","s":"\\foo"}
+ // {"d":"non-printable ASCII","s":"foo\nbar"}
+ // {"d":"printable UTF8","s":"κόσμε"}
+ // {"d":"printable UTF8 using escapes","s":"ñ"}
+ // {"d":"non-printable UTF8 string","s":"\\x01"}
+ // {"d":"UTF8 string with RTL char","s":"܈85"}
+ // {"d":"tabs","s":"a\tb\tc\n12\t123123213\t12313"}
// sql --format=sql -e select * from t.t
// CREATE TABLE results (
// s STRING,
diff --git a/pkg/cli/clisqlexec/table_display_format.go b/pkg/cli/clisqlexec/table_display_format.go
index aac5ae5f88c1..c089cf523d85 100644
--- a/pkg/cli/clisqlexec/table_display_format.go
+++ b/pkg/cli/clisqlexec/table_display_format.go
@@ -35,6 +35,9 @@ const (
// TableDisplayRecords is a record-oriented format. It is somewhat
// compatible with 'psql' "expanded display" mode.
TableDisplayRecords
+ // TableDisplayNDJSON reports results in an nd-json format
+ // (https://github.com/ndjson/ndjson-spec).
+ TableDisplayNDJSON
// TableDisplaySQL reports results using SQL statements that mimic
// the creation of a SQL table containing the result values.
TableDisplaySQL
@@ -71,6 +74,8 @@ func (f *TableDisplayFormat) String() string {
return "records"
case TableDisplaySQL:
return "sql"
+ case TableDisplayNDJSON:
+ return "ndjson"
case TableDisplayHTML:
return "html"
case TableDisplayRawHTML:
@@ -94,6 +99,8 @@ func (f *TableDisplayFormat) Set(s string) error {
*f = TableDisplayRecords
case "sql":
*f = TableDisplaySQL
+ case "ndjson":
+ *f = TableDisplayNDJSON
case "html":
*f = TableDisplayHTML
case "rawhtml":
diff --git a/pkg/cli/clisqlshell/sql.go b/pkg/cli/clisqlshell/sql.go
index 19bd732c688a..f9c5f8ff6317 100644
--- a/pkg/cli/clisqlshell/sql.go
+++ b/pkg/cli/clisqlshell/sql.go
@@ -359,7 +359,7 @@ var options = map[string]struct {
display: func(c *cliState) string { return strconv.Itoa(c.sqlExecCtx.TableBorderMode) },
},
`display_format`: {
- description: "the output format for tabular data (table, csv, tsv, html, sql, records, raw)",
+ description: "the output format for tabular data (table, csv, tsv, html, ndjson, sql, records, raw)",
isBoolean: false,
validDuringMultilineEntry: true,
set: func(c *cliState, val string) error {
diff --git a/pkg/cli/interactive_tests/test_pretty.tcl b/pkg/cli/interactive_tests/test_pretty.tcl
index 68b4fe02f9d8..5b1106798ec6 100644
--- a/pkg/cli/interactive_tests/test_pretty.tcl
+++ b/pkg/cli/interactive_tests/test_pretty.tcl
@@ -19,6 +19,13 @@ eexpect "1 row"
eexpect ":/# "
end_test
+
+start_test "Check that tables are pretty-printed when output is not a terminal but --format=ndjson is specified."
+send "echo 'select 1 as WOO;' | $argv sql --format=ndjson | cat\r"
+eexpect "{\"woo\":\"1\"}"
+eexpect ":/# "
+end_test
+
start_test "Check that tables are pretty-printed when input is not a terminal and --format=table is not specified, but output is a terminal."
send "echo begin; echo 'select 1 as WOO;' | $argv sql\r"
eexpect "woo"
From a9253e6110c60a490d5c04af3e52648a7261e157 Mon Sep 17 00:00:00 2001
From: Oliver Tan
Date: Fri, 11 Mar 2022 14:58:10 +1100
Subject: [PATCH 02/11] builtins: add crdb_internal.validate_ttl_scheduled_jobs
Release justification: high benefit addition to new functionality
Release note (sql change): Added a
`crdb_internal.validate_ttl_scheduled_jobs` builtin which verifies each
table points to a valid scheduled job which will action the deletion of
expired rows.
---
docs/generated/sql/functions.md | 2 +
.../testdata/backup-restore/row_level_ttl | 12 +++
pkg/sql/check.go | 81 ++++++++++++++++
pkg/sql/check_test.go | 92 +++++++++++++++++++
pkg/sql/faketreeeval/evalctx.go | 5 +
.../testdata/logic_test/row_level_ttl | 6 ++
pkg/sql/sem/builtins/builtins.go | 15 +++
pkg/sql/sem/tree/eval.go | 4 +
pkg/sql/ttl/ttlschedule/ttlschedule.go | 1 -
9 files changed, 217 insertions(+), 1 deletion(-)
create mode 100644 pkg/sql/check_test.go
diff --git a/docs/generated/sql/functions.md b/docs/generated/sql/functions.md
index d1b095f6730e..3b9f6ab4e135 100644
--- a/docs/generated/sql/functions.md
+++ b/docs/generated/sql/functions.md
@@ -3030,6 +3030,8 @@ table. Returns an error if validation fails.
crdb_internal.validate_session_revival_token(token: bytes) → bool | Validate a token that was created by create_session_revival_token. Intended for testing.
|
+crdb_internal.validate_ttl_scheduled_jobs() → void | Validate all TTL tables have a valid scheduled job attached.
+ |
crdb_internal.void_func() → void | This function is used only by CockroachDB’s developers for testing purposes.
|
current_database() → string | Returns the current database.
diff --git a/pkg/ccl/backupccl/testdata/backup-restore/row_level_ttl b/pkg/ccl/backupccl/testdata/backup-restore/row_level_ttl
index f7da8e077d4b..7c3e27ba9b63 100644
--- a/pkg/ccl/backupccl/testdata/backup-restore/row_level_ttl
+++ b/pkg/ccl/backupccl/testdata/backup-restore/row_level_ttl
@@ -28,6 +28,10 @@ exec-sql
RESTORE FROM 'nodelocal://0/full_cluster_backup/'
----
+exec-sql
+SELECT crdb_internal.validate_ttl_scheduled_jobs()
+----
+
query-sql
SELECT create_statement FROM [SHOW CREATE TABLE d.public.t]
----
@@ -57,6 +61,10 @@ exec-sql
RESTORE DATABASE d FROM 'nodelocal://0/database_backup/'
----
+exec-sql
+SELECT crdb_internal.validate_ttl_scheduled_jobs()
+----
+
query-sql
SELECT create_statement FROM [SHOW CREATE TABLE d.public.t]
----
@@ -90,6 +98,10 @@ exec-sql
RESTORE TABLE d.public.t FROM 'nodelocal://0/database_backup/'
----
+exec-sql
+SELECT crdb_internal.validate_ttl_scheduled_jobs()
+----
+
query-sql
SELECT create_statement FROM [SHOW CREATE TABLE d.public.t]
----
diff --git a/pkg/sql/check.go b/pkg/sql/check.go
index 322f66a0d281..c0dfe79e9648 100644
--- a/pkg/sql/check.go
+++ b/pkg/sql/check.go
@@ -16,8 +16,10 @@ import (
"fmt"
"strings"
+ "github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
@@ -29,6 +31,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
+ pbtypes "github.com/gogo/protobuf/types"
)
// validateCheckExpr verifies that the given CHECK expression returns true
@@ -578,6 +581,84 @@ func validateUniqueConstraint(
return nil
}
+// ValidateTTLScheduledJobsInCurrentDB is part of the EvalPlanner interface.
+func (p *planner) ValidateTTLScheduledJobsInCurrentDB(ctx context.Context) error {
+ dbName := p.CurrentDatabase()
+ log.Infof(ctx, "validating scheduled jobs in database %s", dbName)
+ db, err := p.Descriptors().GetImmutableDatabaseByName(
+ ctx, p.Txn(), dbName, tree.DatabaseLookupFlags{Required: true},
+ )
+ if err != nil {
+ return err
+ }
+ tableDescs, err := p.Descriptors().GetAllTableDescriptorsInDatabase(ctx, p.Txn(), db.GetID())
+ if err != nil {
+ return err
+ }
+
+ for _, tableDesc := range tableDescs {
+ if err = p.validateTTLScheduledJobInTable(ctx, tableDesc); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// validateTTLScheduledJobsInCurrentDB is part of the EvalPlanner interface.
+func (p *planner) validateTTLScheduledJobInTable(
+ ctx context.Context, tableDesc catalog.TableDescriptor,
+) error {
+ if !tableDesc.HasRowLevelTTL() {
+ return nil
+ }
+ ttl := tableDesc.GetRowLevelTTL()
+
+ execCfg := p.ExecCfg()
+ env := JobSchedulerEnv(execCfg)
+
+ sj, err := jobs.LoadScheduledJob(
+ ctx,
+ env,
+ ttl.ScheduleID,
+ execCfg.InternalExecutor,
+ p.txn,
+ )
+ if err != nil {
+ if jobs.HasScheduledJobNotFoundError(err) {
+ return pgerror.Newf(
+ pgcode.Internal,
+ "table id %d maps to a non-existent schedule id %d",
+ tableDesc.GetID(),
+ ttl.ScheduleID,
+ )
+ }
+ return errors.Wrapf(err, "error fetching schedule id %d for table id %d", ttl.ScheduleID, tableDesc.GetID())
+ }
+
+ var args catpb.ScheduledRowLevelTTLArgs
+ if err := pbtypes.UnmarshalAny(sj.ExecutionArgs().Args, &args); err != nil {
+ return pgerror.Wrapf(
+ err,
+ pgcode.Internal,
+ "error unmarshalling scheduled jobs args for table id %d, schedule id %d",
+ tableDesc.GetID(),
+ ttl.ScheduleID,
+ )
+ }
+
+ if args.TableID != tableDesc.GetID() {
+ return pgerror.Newf(
+ pgcode.Internal,
+ "schedule id %d points to table id %d instead of table id %d",
+ ttl.ScheduleID,
+ args.TableID,
+ tableDesc.GetID(),
+ )
+ }
+
+ return nil
+}
+
func formatValues(colNames []string, values tree.Datums) string {
var pairs bytes.Buffer
for i := range values {
diff --git a/pkg/sql/check_test.go b/pkg/sql/check_test.go
new file mode 100644
index 000000000000..b12bc071cbdb
--- /dev/null
+++ b/pkg/sql/check_test.go
@@ -0,0 +1,92 @@
+// Copyright 2022 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package sql_test
+
+import (
+ "context"
+ gosql "database/sql"
+ "fmt"
+ "testing"
+
+ "github.com/cockroachdb/cockroach/pkg/base"
+ "github.com/cockroachdb/cockroach/pkg/keys"
+ "github.com/cockroachdb/cockroach/pkg/kv"
+ "github.com/cockroachdb/cockroach/pkg/sql"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
+ "github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
+ "github.com/cockroachdb/cockroach/pkg/util/leaktest"
+ "github.com/cockroachdb/cockroach/pkg/util/log"
+ "github.com/stretchr/testify/require"
+)
+
+func TestValidateTTLScheduledJobs(t *testing.T) {
+ defer leaktest.AfterTest(t)()
+ defer log.Scope(t).Close(t)
+ ctx := context.Background()
+
+ testCases := []struct {
+ desc string
+ setup func(t *testing.T, sqlDB *gosql.DB, kvDB *kv.DB, s serverutils.TestServerInterface, tableDesc *tabledesc.Mutable, scheduleID int64)
+ expectedErrRe func(tableID descpb.ID, scheduleID int64) string
+ cleanup func(t *testing.T, sqlDB *gosql.DB, kvDB *kv.DB, tableID descpb.ID, scheduleID int64)
+ }{
+ {
+ desc: "not pointing at a valid scheduled job",
+ setup: func(t *testing.T, sqlDB *gosql.DB, kvDB *kv.DB, s serverutils.TestServerInterface, tableDesc *tabledesc.Mutable, scheduleID int64) {
+ tableDesc.RowLevelTTL.ScheduleID = 0
+ require.NoError(t, sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error {
+ return col.WriteDesc(ctx, false /* kvBatch */, tableDesc, txn)
+ }))
+ },
+ expectedErrRe: func(tableID descpb.ID, scheduleID int64) string {
+ return fmt.Sprintf(`table id %d does not have a maps to a non-existent scheduled job id %d`, tableID, scheduleID)
+ },
+ cleanup: func(t *testing.T, sqlDB *gosql.DB, kvDB *kv.DB, tableID descpb.ID, scheduleID int64) {
+ _, err := sqlDB.Exec(`DROP SCHEDULE $1`, scheduleID)
+ require.NoError(t, err)
+ _, err = sqlDB.Exec(`SELECT crdb_internal.repair_ttl_table_scheduled_job($1)`, tableID)
+ require.NoError(t, err)
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.desc, func(t *testing.T) {
+ s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{})
+ defer s.Stopper().Stop(ctx)
+
+ _, err := sqlDB.Exec(`CREATE TABLE t () WITH (ttl_expire_after = '10 mins')`)
+ require.NoError(t, err)
+
+ tableDesc := desctestutils.TestingGetMutableExistingTableDescriptor(kvDB, keys.SystemSQLCodec, "defaultdb", "t")
+ require.NotNil(t, tableDesc.GetRowLevelTTL())
+ scheduleID := tableDesc.GetRowLevelTTL().ScheduleID
+
+ tc.setup(t, sqlDB, kvDB, s, tableDesc, scheduleID)
+
+ _, err = sqlDB.Exec(`SELECT crdb_internal.validate_ttl_scheduled_jobs()`)
+ require.Error(t, err)
+ require.Regexp(t, tc.expectedErrRe(tableDesc.GetID(), scheduleID), err)
+ require.Regexp(
+ t,
+ fmt.Sprintf(`use crdb_internal.repair_ttl_table_scheduled_job(%d) to repair the missing job`, tableDesc.GetID()),
+ err,
+ )
+
+ tc.cleanup(t, sqlDB, kvDB, tableDesc.GetID(), scheduleID)
+ _, err = sqlDB.Exec(`SELECT crdb_internal.validate_ttl_scheduled_jobs()`)
+ require.NoError(t, err)
+ })
+ }
+}
diff --git a/pkg/sql/faketreeeval/evalctx.go b/pkg/sql/faketreeeval/evalctx.go
index d995c8c558fc..67f6975dcd5d 100644
--- a/pkg/sql/faketreeeval/evalctx.go
+++ b/pkg/sql/faketreeeval/evalctx.go
@@ -278,6 +278,11 @@ func (*DummyEvalPlanner) RevalidateUniqueConstraint(
return errors.WithStack(errEvalPlanner)
}
+// ValidateTTLScheduledJobsInCurrentDB is part of the EvalPlanner interface.
+func (*DummyEvalPlanner) ValidateTTLScheduledJobsInCurrentDB(ctx context.Context) error {
+ return errors.WithStack(errEvalPlanner)
+}
+
// ExecutorConfig is part of the EvalPlanner interface.
func (*DummyEvalPlanner) ExecutorConfig() interface{} {
return nil
diff --git a/pkg/sql/logictest/testdata/logic_test/row_level_ttl b/pkg/sql/logictest/testdata/logic_test/row_level_ttl
index ac0eff0bcda5..31c787177eb9 100644
--- a/pkg/sql/logictest/testdata/logic_test/row_level_ttl
+++ b/pkg/sql/logictest/testdata/logic_test/row_level_ttl
@@ -44,6 +44,9 @@ CREATE TABLE public.tbl (
FAMILY fam_0_id_text_crdb_internal_expiration (id, text, crdb_internal_expiration)
) WITH (ttl = 'on', ttl_automatic_column = 'on', ttl_expire_after = '00:10:00':::INTERVAL)
+statement ok
+SELECT crdb_internal.validate_ttl_scheduled_jobs()
+
statement error resetting "ttl_expire_after" is not permitted\nHINT: use `RESET \(ttl\)` to remove TTL from the table
ALTER TABLE tbl RESET (ttl_expire_after)
@@ -114,6 +117,9 @@ CREATE TABLE public.tbl (
FAMILY fam_0_id_text_crdb_internal_expiration (id, text)
)
+statement ok
+SELECT crdb_internal.validate_ttl_scheduled_jobs()
+
query I
SELECT count(1) FROM [SHOW SCHEDULES]
WHERE label LIKE 'row-level-ttl-%'
diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go
index 1cdc61495989..eac78f26958d 100644
--- a/pkg/sql/sem/builtins/builtins.go
+++ b/pkg/sql/sem/builtins/builtins.go
@@ -6422,6 +6422,21 @@ table's zone configuration this will return NULL.`,
},
),
+ "crdb_internal.validate_ttl_scheduled_jobs": makeBuiltin(
+ tree.FunctionProperties{
+ Category: categorySystemInfo,
+ },
+ tree.Overload{
+ Types: tree.ArgTypes{},
+ ReturnType: tree.FixedReturnType(types.Void),
+ Fn: func(evalCtx *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
+ return tree.DVoidDatum, evalCtx.Planner.ValidateTTLScheduledJobsInCurrentDB(evalCtx.Context)
+ },
+ Info: `Validate all TTL tables have a valid scheduled job attached.`,
+ Volatility: tree.VolatilityVolatile,
+ },
+ ),
+
"crdb_internal.check_password_hash_format": makeBuiltin(
tree.FunctionProperties{
Category: categorySystemInfo,
diff --git a/pkg/sql/sem/tree/eval.go b/pkg/sql/sem/tree/eval.go
index 02cfcd476cef..0956e331a291 100644
--- a/pkg/sql/sem/tree/eval.go
+++ b/pkg/sql/sem/tree/eval.go
@@ -3304,6 +3304,10 @@ type EvalPlanner interface {
// constraint on the table.
RevalidateUniqueConstraint(ctx context.Context, tableID int, constraintName string) error
+ // ValidateTTLScheduledJobsInCurrentDB checks scheduled jobs for each table
+ // in the database maps to a scheduled job.
+ ValidateTTLScheduledJobsInCurrentDB(ctx context.Context) error
+
// QueryRowEx executes the supplied SQL statement and returns a single row, or
// nil if no row is found, or an error if more that one row is returned.
//
diff --git a/pkg/sql/ttl/ttlschedule/ttlschedule.go b/pkg/sql/ttl/ttlschedule/ttlschedule.go
index 476df185d952..74bfd871a301 100644
--- a/pkg/sql/ttl/ttlschedule/ttlschedule.go
+++ b/pkg/sql/ttl/ttlschedule/ttlschedule.go
@@ -38,7 +38,6 @@ type rowLevelTTLExecutor struct {
var _ jobs.ScheduledJobController = (*rowLevelTTLExecutor)(nil)
type rowLevelTTLMetrics struct {
- // TODO(#75189): add more useful metrics here
*jobs.ExecutorMetrics
}
From a499e06c2b4a155194a8904ef77de54d80aeaab5 Mon Sep 17 00:00:00 2001
From: Oliver Tan
Date: Fri, 11 Mar 2022 15:28:54 +1100
Subject: [PATCH 03/11] descs: introduce GetTableNameByID / GetTableNameByDesc
This commit introduces the GetTableNameByDesc and GetTableNameByID
functions, which fetches a *tree.TableName from the given table objects.
Release justification: low-risk refactor
Release note: None
---
pkg/sql/catalog/descs/BUILD.bazel | 1 +
pkg/sql/catalog/descs/table_name.go | 50 +++++++++++++++++++++++++++++
pkg/sql/ttl/ttljob/ttljob.go | 31 +++---------------
3 files changed, 55 insertions(+), 27 deletions(-)
create mode 100644 pkg/sql/catalog/descs/table_name.go
diff --git a/pkg/sql/catalog/descs/BUILD.bazel b/pkg/sql/catalog/descs/BUILD.bazel
index c9c7ea73a1ae..d68b6be84499 100644
--- a/pkg/sql/catalog/descs/BUILD.bazel
+++ b/pkg/sql/catalog/descs/BUILD.bazel
@@ -18,6 +18,7 @@ go_library(
"system_database_namespace_cache.go",
"system_table.go",
"table.go",
+ "table_name.go",
"temporary_descriptors.go",
"txn.go",
"type.go",
diff --git a/pkg/sql/catalog/descs/table_name.go b/pkg/sql/catalog/descs/table_name.go
new file mode 100644
index 000000000000..84d45e1ce34c
--- /dev/null
+++ b/pkg/sql/catalog/descs/table_name.go
@@ -0,0 +1,50 @@
+// Copyright 2022 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package descs
+
+import (
+ "context"
+
+ "github.com/cockroachdb/cockroach/pkg/kv"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
+ "github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
+ "github.com/cockroachdb/errors"
+)
+
+// GetTableNameByID fetches the full tree table name by the given ID.
+func GetTableNameByID(
+ ctx context.Context, txn *kv.Txn, tc *Collection, tableID descpb.ID,
+) (*tree.TableName, error) {
+ tbl, err := tc.GetImmutableTableByID(ctx, txn, tableID, tree.ObjectLookupFlagsWithRequired())
+ if err != nil {
+ return nil, err
+ }
+ return GetTableNameByDesc(ctx, txn, tc, tbl)
+}
+
+// GetTableNameByDesc fetches the full tree table name by the given table descriptor.
+func GetTableNameByDesc(
+ ctx context.Context, txn *kv.Txn, tc *Collection, tbl catalog.TableDescriptor,
+) (*tree.TableName, error) {
+ sc, err := tc.GetImmutableSchemaByID(ctx, txn, tbl.GetParentSchemaID(), tree.SchemaLookupFlags{Required: true})
+ if err != nil {
+ return nil, err
+ }
+ found, db, err := tc.GetImmutableDatabaseByID(ctx, txn, tbl.GetParentID(), tree.DatabaseLookupFlags{Required: true})
+ if err != nil {
+ return nil, err
+ }
+ if !found {
+ return nil, errors.AssertionFailedf("expected database %d to exist", tbl.GetParentID())
+ }
+ return tree.NewTableNameWithSchema(tree.Name(db.GetName()), tree.Name(sc.GetName()), tree.Name(tbl.GetName())), nil
+}
diff --git a/pkg/sql/ttl/ttljob/ttljob.go b/pkg/sql/ttl/ttljob/ttljob.go
index b0b0ef7aded0..8eca6b35ec9e 100644
--- a/pkg/sql/ttl/ttljob/ttljob.go
+++ b/pkg/sql/ttl/ttljob/ttljob.go
@@ -246,7 +246,7 @@ var _ jobs.Resumer = (*rowLevelTTLResumer)(nil)
func (t rowLevelTTLResumer) Resume(ctx context.Context, execCtx interface{}) error {
p := execCtx.(sql.JobExecContext)
db := p.ExecCfg().DB
- descs := p.ExtendedEvalContext().Descs
+ descsCol := p.ExtendedEvalContext().Descs
if enabled := jobEnabled.Get(p.ExecCfg().SV()); !enabled {
return errors.Newf(
@@ -283,7 +283,7 @@ func (t rowLevelTTLResumer) Resume(ctx context.Context, execCtx interface{}) err
var name string
var rangeSpan roachpb.Span
if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
- desc, err := descs.GetImmutableTableByID(
+ desc, err := descsCol.GetImmutableTableByID(
ctx,
txn,
details.TableID,
@@ -320,34 +320,11 @@ func (t rowLevelTTLResumer) Resume(ctx context.Context, execCtx interface{}) err
return errors.Newf("ttl jobs on table %s are currently paused", tree.Name(desc.GetName()))
}
- _, dbDesc, err := descs.GetImmutableDatabaseByID(
- ctx,
- txn,
- desc.GetParentID(),
- tree.CommonLookupFlags{
- Required: true,
- },
- )
+ tn, err := descs.GetTableNameByDesc(ctx, txn, descsCol, desc)
if err != nil {
- return err
- }
- schemaDesc, err := descs.GetImmutableSchemaByID(
- ctx,
- txn,
- desc.GetParentSchemaID(),
- tree.CommonLookupFlags{
- Required: true,
- },
- )
- if err != nil {
- return err
+ return errors.Wrapf(err, "error fetching table name for TTL")
}
- tn := tree.MakeTableNameWithSchema(
- tree.Name(dbDesc.GetName()),
- tree.Name(schemaDesc.GetName()),
- tree.Name(desc.GetName()),
- )
name = tn.FQString()
rangeSpan = desc.TableSpan(p.ExecCfg().Codec)
ttlSettings = *ttl
From 61ca4c4eb0910e5a5adb419fcec17d0072e6739e Mon Sep 17 00:00:00 2001
From: Oliver Tan
Date: Tue, 15 Mar 2022 08:00:56 +1100
Subject: [PATCH 04/11] rfcs: fix typo in RFC
Release justification: non-prod code change
Release note: None
---
docs/RFCS/20220120_row_level_ttl.md | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/docs/RFCS/20220120_row_level_ttl.md b/docs/RFCS/20220120_row_level_ttl.md
index 7118ff618176..8dede2c71cd3 100644
--- a/docs/RFCS/20220120_row_level_ttl.md
+++ b/docs/RFCS/20220120_row_level_ttl.md
@@ -13,7 +13,7 @@ This has been a [feature commonly asked for](#20239).
This RFC proposes a CockroachDB level mechanism to support row-level TTL, where
rows will be deleted after a certain period of time. As a further extension in a
-later release, rows rows will be automatically hidden after they've expired
+later release, rows will be automatically hidden after they've expired
their TTL and before they've been physically deleted.
The following declarative syntaxes will initialize a table with row-level TTL:
@@ -379,8 +379,8 @@ a few problems:
process. This adds further complexity to CDC.
As row-level TTL is a "SQL level" feature, it makes sense that something in the
-SQL layer would be most appropriate to handle it. See [comparison
-doc](comparison doc) for other observations.
+SQL layer would be most appropriate to handle it. See [comparison doc](comparison doc)
+for other observations.
### Alternative TTL columns
Another proposal for TTL columns was to have two columns:
@@ -466,7 +466,7 @@ has two benefits, both of which are very significant:
This is predicated on filtering out expired rows working as otherwise users
could miss entries when querying the secondary index as opposed to the primary.
-### Improve the deletion loopl
+### Improve the deletion loop
We can speed up the deletion by using indexes if one was created on the TTL
column for the table.
From 5a1b83cce318e2f246e84091ff3e186937feda69 Mon Sep 17 00:00:00 2001
From: Oliver Tan
Date: Fri, 11 Mar 2022 16:50:54 +1100
Subject: [PATCH 05/11] jobs: add descsCol to OnDrop method
In preparation for the next commit.
Release justification: low-risk changes for new functionality
Release note: None
---
pkg/ccl/backupccl/schedule_exec.go | 2 ++
pkg/jobs/BUILD.bazel | 1 +
pkg/jobs/scheduled_job_executor.go | 11 +++++++++--
pkg/sql/compact_sql_stats.go | 2 ++
pkg/sql/control_schedules.go | 11 ++++++++---
pkg/sql/ttl/ttlschedule/BUILD.bazel | 1 +
pkg/sql/ttl/ttlschedule/ttlschedule.go | 2 ++
7 files changed, 25 insertions(+), 5 deletions(-)
diff --git a/pkg/ccl/backupccl/schedule_exec.go b/pkg/ccl/backupccl/schedule_exec.go
index 66e4733a41bd..e7c492880a8e 100644
--- a/pkg/ccl/backupccl/schedule_exec.go
+++ b/pkg/ccl/backupccl/schedule_exec.go
@@ -17,6 +17,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/scheduledjobs"
"github.com/cockroachdb/cockroach/pkg/sql"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
@@ -448,6 +449,7 @@ func (e *scheduledBackupExecutor) OnDrop(
env scheduledjobs.JobSchedulerEnv,
sj *jobs.ScheduledJob,
txn *kv.Txn,
+ descsCol *descs.Collection,
) error {
args := &ScheduledBackupExecutionArgs{}
if err := pbtypes.UnmarshalAny(sj.ExecutionArgs().Args, args); err != nil {
diff --git a/pkg/jobs/BUILD.bazel b/pkg/jobs/BUILD.bazel
index 11bd5b90e704..6d4b29342ab9 100644
--- a/pkg/jobs/BUILD.bazel
+++ b/pkg/jobs/BUILD.bazel
@@ -40,6 +40,7 @@ go_library(
"//pkg/sql/catalog",
"//pkg/sql/catalog/colinfo",
"//pkg/sql/catalog/descpb",
+ "//pkg/sql/catalog/descs",
"//pkg/sql/protoreflect",
"//pkg/sql/sem/builtins",
"//pkg/sql/sem/tree",
diff --git a/pkg/jobs/scheduled_job_executor.go b/pkg/jobs/scheduled_job_executor.go
index fd54b56b5007..fdff4a33a0d2 100644
--- a/pkg/jobs/scheduled_job_executor.go
+++ b/pkg/jobs/scheduled_job_executor.go
@@ -16,6 +16,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/scheduledjobs"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/util/metric"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
@@ -63,8 +64,14 @@ type ScheduledJobExecutor interface {
type ScheduledJobController interface {
// OnDrop runs before the passed in `schedule` is dropped as part of a `DROP
// SCHEDULE` query.
- OnDrop(ctx context.Context, scheduleControllerEnv scheduledjobs.ScheduleControllerEnv,
- env scheduledjobs.JobSchedulerEnv, schedule *ScheduledJob, txn *kv.Txn) error
+ OnDrop(
+ ctx context.Context,
+ scheduleControllerEnv scheduledjobs.ScheduleControllerEnv,
+ env scheduledjobs.JobSchedulerEnv,
+ schedule *ScheduledJob,
+ txn *kv.Txn,
+ descsCol *descs.Collection,
+ ) error
}
// ScheduledJobExecutorFactory is a callback to create a ScheduledJobExecutor.
diff --git a/pkg/sql/compact_sql_stats.go b/pkg/sql/compact_sql_stats.go
index 497f456d60a9..d485293c4e2f 100644
--- a/pkg/sql/compact_sql_stats.go
+++ b/pkg/sql/compact_sql_stats.go
@@ -20,6 +20,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/scheduledjobs"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sqlstats/persistedsqlstats"
@@ -172,6 +173,7 @@ func (e *scheduledSQLStatsCompactionExecutor) OnDrop(
env scheduledjobs.JobSchedulerEnv,
schedule *jobs.ScheduledJob,
txn *kv.Txn,
+ descsCol *descs.Collection,
) error {
return persistedsqlstats.ErrScheduleUndroppable
}
diff --git a/pkg/sql/control_schedules.go b/pkg/sql/control_schedules.go
index 3d9ce3265455..c64896c2a74b 100644
--- a/pkg/sql/control_schedules.go
+++ b/pkg/sql/control_schedules.go
@@ -158,9 +158,14 @@ func (n *controlSchedulesNode) startExec(params runParams) error {
if controller, ok := ex.(jobs.ScheduledJobController); ok {
scheduleControllerEnv := scheduledjobs.MakeProdScheduleControllerEnv(
params.ExecCfg().ProtectedTimestampProvider, params.ExecCfg().InternalExecutor)
- if err := controller.OnDrop(params.ctx, scheduleControllerEnv,
- scheduledjobs.ProdJobSchedulerEnv, schedule,
- params.extendedEvalCtx.Txn); err != nil {
+ if err := controller.OnDrop(
+ params.ctx,
+ scheduleControllerEnv,
+ scheduledjobs.ProdJobSchedulerEnv,
+ schedule,
+ params.extendedEvalCtx.Txn,
+ params.p.Descriptors(),
+ ); err != nil {
return errors.Wrap(err, "failed to run OnDrop")
}
}
diff --git a/pkg/sql/ttl/ttlschedule/BUILD.bazel b/pkg/sql/ttl/ttlschedule/BUILD.bazel
index 7c130ad7e8e4..981d823e45da 100644
--- a/pkg/sql/ttl/ttlschedule/BUILD.bazel
+++ b/pkg/sql/ttl/ttlschedule/BUILD.bazel
@@ -13,6 +13,7 @@ go_library(
"//pkg/security",
"//pkg/sql",
"//pkg/sql/catalog/catpb",
+ "//pkg/sql/catalog/descs",
"//pkg/sql/pgwire/pgcode",
"//pkg/sql/pgwire/pgerror",
"//pkg/sql/sem/tree",
diff --git a/pkg/sql/ttl/ttlschedule/ttlschedule.go b/pkg/sql/ttl/ttlschedule/ttlschedule.go
index 74bfd871a301..f9a173995e98 100644
--- a/pkg/sql/ttl/ttlschedule/ttlschedule.go
+++ b/pkg/sql/ttl/ttlschedule/ttlschedule.go
@@ -21,6 +21,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
@@ -53,6 +54,7 @@ func (s rowLevelTTLExecutor) OnDrop(
env scheduledjobs.JobSchedulerEnv,
schedule *jobs.ScheduledJob,
txn *kv.Txn,
+ descsCol *descs.Collection,
) error {
return errors.WithHint(
pgerror.Newf(
From d4e07124877a27e72f4e9e2006936fa5e313381e Mon Sep 17 00:00:00 2001
From: Oliver Tan
Date: Fri, 11 Mar 2022 17:11:00 +1100
Subject: [PATCH 06/11] ttlschedule: allow dropping invalid schedules
Previously, `DROP SCHEDULE` for a TTL job would fail always. Now, we
succeed if the scheduled job is invalid.
Release justification: low risk high benefit change to new functionality
Release note: None
---
.../testdata/logic_test/row_level_ttl | 6 ++
pkg/sql/ttl/ttlschedule/BUILD.bazel | 1 +
pkg/sql/ttl/ttlschedule/ttlschedule.go | 68 +++++++++++++++++--
3 files changed, 68 insertions(+), 7 deletions(-)
diff --git a/pkg/sql/logictest/testdata/logic_test/row_level_ttl b/pkg/sql/logictest/testdata/logic_test/row_level_ttl
index 31c787177eb9..eba93bc61f06 100644
--- a/pkg/sql/logictest/testdata/logic_test/row_level_ttl
+++ b/pkg/sql/logictest/testdata/logic_test/row_level_ttl
@@ -70,6 +70,12 @@ WHERE label LIKE 'row-level-ttl-%'
----
1
+let $schedule_id
+SELECT id FROM [SHOW SCHEDULES] WHERE label LIKE 'row-level-ttl-%'
+
+statement error cannot drop a row level TTL schedule\nHINT: use ALTER TABLE test\.public\.tbl RESET \(ttl\) instead
+DROP SCHEDULE $schedule_id
+
statement ok
ALTER TABLE tbl SET (ttl_expire_after = '10 days')
diff --git a/pkg/sql/ttl/ttlschedule/BUILD.bazel b/pkg/sql/ttl/ttlschedule/BUILD.bazel
index 981d823e45da..953b2c21317b 100644
--- a/pkg/sql/ttl/ttlschedule/BUILD.bazel
+++ b/pkg/sql/ttl/ttlschedule/BUILD.bazel
@@ -17,6 +17,7 @@ go_library(
"//pkg/sql/pgwire/pgcode",
"//pkg/sql/pgwire/pgerror",
"//pkg/sql/sem/tree",
+ "//pkg/sql/sqlerrors",
"//pkg/sql/sqlutil",
"//pkg/util/metric",
"//pkg/util/timeutil",
diff --git a/pkg/sql/ttl/ttlschedule/ttlschedule.go b/pkg/sql/ttl/ttlschedule/ttlschedule.go
index f9a173995e98..d801e90db1b1 100644
--- a/pkg/sql/ttl/ttlschedule/ttlschedule.go
+++ b/pkg/sql/ttl/ttlschedule/ttlschedule.go
@@ -25,6 +25,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
+ "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors"
"github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/util/metric"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
@@ -56,13 +57,66 @@ func (s rowLevelTTLExecutor) OnDrop(
txn *kv.Txn,
descsCol *descs.Collection,
) error {
- return errors.WithHint(
- pgerror.Newf(
- pgcode.InvalidTableDefinition,
- "cannot drop row level TTL schedule",
- ),
- `use ALTER TABLE ... RESET (expire_after) instead`,
- )
+
+ var args catpb.ScheduledRowLevelTTLArgs
+ if err := pbtypes.UnmarshalAny(schedule.ExecutionArgs().Args, &args); err != nil {
+ return err
+ }
+
+ canDrop, err := canDropTTLSchedule(ctx, txn, descsCol, schedule, args)
+ if err != nil {
+ return err
+ }
+
+ if !canDrop {
+ tn, err := descs.GetTableNameByID(ctx, txn, descsCol, args.TableID)
+ if err != nil {
+ return err
+ }
+ f := tree.NewFmtCtx(tree.FmtSimple)
+ tn.Format(f)
+ return errors.WithHintf(
+ pgerror.Newf(
+ pgcode.InvalidTableDefinition,
+ "cannot drop a row level TTL schedule",
+ ),
+ `use ALTER TABLE %s RESET (ttl) instead`,
+ f.CloseAndGetString(),
+ )
+ }
+ return nil
+}
+
+// canDropTTLSchedule determines whether we can drop a given row-level TTL
+// schedule. This is intended to only be permitted for schedules which are not
+// valid.
+func canDropTTLSchedule(
+ ctx context.Context,
+ txn *kv.Txn,
+ descsCol *descs.Collection,
+ schedule *jobs.ScheduledJob,
+ args catpb.ScheduledRowLevelTTLArgs,
+) (bool, error) {
+ desc, err := descsCol.GetImmutableTableByID(ctx, txn, args.TableID, tree.ObjectLookupFlags{})
+ if err != nil {
+ // If the descriptor does not exist we can drop this schedule.
+ if sqlerrors.IsUndefinedRelationError(err) {
+ return true, nil
+ }
+ return false, err
+ }
+ if desc == nil {
+ return true, nil
+ }
+ // If there is no row-level TTL on the table we can drop this schedule.
+ if !desc.HasRowLevelTTL() {
+ return true, nil
+ }
+ // If there is a schedule id mismatch we can drop this schedule.
+ if desc.GetRowLevelTTL().ScheduleID != schedule.ScheduleID() {
+ return true, nil
+ }
+ return false, nil
}
// ExecuteJob implements the jobs.ScheduledJobController interface.
From 6b50953299b32480bd536279cd79243fcc85020e Mon Sep 17 00:00:00 2001
From: Oliver Tan
Date: Wed, 23 Mar 2022 11:06:07 +1100
Subject: [PATCH 07/11] sql: implement
crdb_internal.repair_ttl_table_scheduled_job
This commit is intended for "repairing" jobs that may have been broken
by a broken schema change interaction.
Release justification: high priority fix for new functionality
Release note (sql change): Adds a
`crdb_internal.repair_ttl_table_scheduled_job` builtin, which repairs
the given TTL table's scheduled job by supplanting it with a valid
schedule.
---
docs/generated/sql/functions.md | 2 +
pkg/sql/BUILD.bazel | 4 +
pkg/sql/check.go | 80 +++++++++++++++----
pkg/sql/check_test.go | 65 ++++++++++++---
pkg/sql/faketreeeval/evalctx.go | 5 ++
.../testdata/logic_test/row_level_ttl | 6 ++
pkg/sql/sem/builtins/builtins.go | 19 +++++
pkg/sql/sem/tree/eval.go | 3 +
8 files changed, 157 insertions(+), 27 deletions(-)
diff --git a/docs/generated/sql/functions.md b/docs/generated/sql/functions.md
index 3b9f6ab4e135..bda9847c878c 100644
--- a/docs/generated/sql/functions.md
+++ b/docs/generated/sql/functions.md
@@ -3001,6 +3001,8 @@ SELECT * FROM crdb_internal.check_consistency(true, ‘\x02’, ‘\x04’)
|
crdb_internal.range_stats(key: bytes) → jsonb | This function is used to retrieve range statistics information as a JSON object.
|
+crdb_internal.repair_ttl_table_scheduled_job(oid: oid) → void | Repairs the scheduled job for a TTL table if it is missing.
+ |
crdb_internal.reset_index_usage_stats() → bool | This function is used to clear the collected index usage statistics.
|
crdb_internal.reset_sql_stats() → bool | This function is used to clear the collected SQL statistics.
diff --git a/pkg/sql/BUILD.bazel b/pkg/sql/BUILD.bazel
index 4fa37517eec0..f86771b91b45 100644
--- a/pkg/sql/BUILD.bazel
+++ b/pkg/sql/BUILD.bazel
@@ -469,6 +469,7 @@ go_test(
"backfill_test.go",
"builtin_mem_usage_test.go",
"builtin_test.go",
+ "check_test.go",
"comment_on_column_test.go",
"comment_on_constraint_test.go",
"comment_on_database_test.go",
@@ -585,6 +586,7 @@ go_test(
"//pkg/gossip",
"//pkg/jobs",
"//pkg/jobs/jobspb",
+ "//pkg/jobs/jobstest",
"//pkg/keys",
"//pkg/kv",
"//pkg/kv/kvclient",
@@ -653,6 +655,7 @@ go_test(
"//pkg/sql/sqlliveness",
"//pkg/sql/sqlstats",
"//pkg/sql/sqltestutils",
+ "//pkg/sql/sqlutil",
"//pkg/sql/stats",
"//pkg/sql/stmtdiagnostics",
"//pkg/sql/tests",
@@ -709,6 +712,7 @@ go_test(
"@com_github_cockroachdb_logtags//:logtags",
"@com_github_cockroachdb_redact//:redact",
"@com_github_gogo_protobuf//proto",
+ "@com_github_gogo_protobuf//types",
"@com_github_jackc_pgconn//:pgconn",
"@com_github_jackc_pgtype//:pgtype",
"@com_github_jackc_pgx_v4//:pgx",
diff --git a/pkg/sql/check.go b/pkg/sql/check.go
index c0dfe79e9648..61b6305c1077 100644
--- a/pkg/sql/check.go
+++ b/pkg/sql/check.go
@@ -604,6 +604,8 @@ func (p *planner) ValidateTTLScheduledJobsInCurrentDB(ctx context.Context) error
return nil
}
+var invalidTableTTLScheduledJobError = errors.Newf("invalid scheduled job for table")
+
// validateTTLScheduledJobsInCurrentDB is part of the EvalPlanner interface.
func (p *planner) validateTTLScheduledJobInTable(
ctx context.Context, tableDesc catalog.TableDescriptor,
@@ -616,6 +618,14 @@ func (p *planner) validateTTLScheduledJobInTable(
execCfg := p.ExecCfg()
env := JobSchedulerEnv(execCfg)
+ wrapError := func(origErr error) error {
+ return errors.WithHintf(
+ errors.Mark(origErr, invalidTableTTLScheduledJobError),
+ `use crdb_internal.repair_ttl_table_scheduled_job(%d) to repair the missing job`,
+ tableDesc.GetID(),
+ )
+ }
+
sj, err := jobs.LoadScheduledJob(
ctx,
env,
@@ -625,11 +635,13 @@ func (p *planner) validateTTLScheduledJobInTable(
)
if err != nil {
if jobs.HasScheduledJobNotFoundError(err) {
- return pgerror.Newf(
- pgcode.Internal,
- "table id %d maps to a non-existent schedule id %d",
- tableDesc.GetID(),
- ttl.ScheduleID,
+ return wrapError(
+ pgerror.Newf(
+ pgcode.Internal,
+ "table id %d maps to a non-existent schedule id %d",
+ tableDesc.GetID(),
+ ttl.ScheduleID,
+ ),
)
}
return errors.Wrapf(err, "error fetching schedule id %d for table id %d", ttl.ScheduleID, tableDesc.GetID())
@@ -637,28 +649,62 @@ func (p *planner) validateTTLScheduledJobInTable(
var args catpb.ScheduledRowLevelTTLArgs
if err := pbtypes.UnmarshalAny(sj.ExecutionArgs().Args, &args); err != nil {
- return pgerror.Wrapf(
- err,
- pgcode.Internal,
- "error unmarshalling scheduled jobs args for table id %d, schedule id %d",
- tableDesc.GetID(),
- ttl.ScheduleID,
+ return wrapError(
+ pgerror.Wrapf(
+ err,
+ pgcode.Internal,
+ "error unmarshalling scheduled jobs args for table id %d, schedule id %d",
+ tableDesc.GetID(),
+ ttl.ScheduleID,
+ ),
)
}
if args.TableID != tableDesc.GetID() {
- return pgerror.Newf(
- pgcode.Internal,
- "schedule id %d points to table id %d instead of table id %d",
- ttl.ScheduleID,
- args.TableID,
- tableDesc.GetID(),
+ return wrapError(
+ pgerror.Newf(
+ pgcode.Internal,
+ "schedule id %d points to table id %d instead of table id %d",
+ ttl.ScheduleID,
+ args.TableID,
+ tableDesc.GetID(),
+ ),
)
}
return nil
}
+// RepairTTLScheduledJobForTable is part of the EvalPlanner interface.
+func (p *planner) RepairTTLScheduledJobForTable(ctx context.Context, tableID int64) error {
+ tableDesc, err := p.Descriptors().GetMutableTableByID(ctx, p.txn, descpb.ID(tableID), tree.ObjectLookupFlagsWithRequired())
+ if err != nil {
+ return err
+ }
+ validateErr := p.validateTTLScheduledJobInTable(ctx, tableDesc)
+ if validateErr == nil {
+ return nil
+ }
+ if !errors.HasType(validateErr, invalidTableTTLScheduledJobError) {
+ return errors.Wrap(validateErr, "error validating TTL on table")
+ }
+ sj, err := CreateRowLevelTTLScheduledJob(
+ ctx,
+ p.ExecCfg(),
+ p.txn,
+ p.User(),
+ tableDesc.GetID(),
+ tableDesc.GetRowLevelTTL(),
+ )
+ if err != nil {
+ return err
+ }
+ tableDesc.RowLevelTTL.ScheduleID = sj.ScheduleID()
+ return p.Descriptors().WriteDesc(
+ ctx, false /* kvTrace */, tableDesc, p.txn,
+ )
+}
+
func formatValues(colNames []string, values tree.Datums) string {
var pairs bytes.Buffer
for i := range values {
diff --git a/pkg/sql/check_test.go b/pkg/sql/check_test.go
index b12bc071cbdb..abdf0d95add6 100644
--- a/pkg/sql/check_test.go
+++ b/pkg/sql/check_test.go
@@ -17,16 +17,26 @@ import (
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
+ "github.com/cockroachdb/cockroach/pkg/jobs"
+ "github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
+ "github.com/cockroachdb/cockroach/pkg/jobs/jobstest"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/sql"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
+ "github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
+ "github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
+ "github.com/cockroachdb/cockroach/pkg/util/timeutil"
+ "github.com/cockroachdb/errors"
+ pbtypes "github.com/gogo/protobuf/types"
+ "github.com/lib/pq"
"github.com/stretchr/testify/require"
)
@@ -39,7 +49,6 @@ func TestValidateTTLScheduledJobs(t *testing.T) {
desc string
setup func(t *testing.T, sqlDB *gosql.DB, kvDB *kv.DB, s serverutils.TestServerInterface, tableDesc *tabledesc.Mutable, scheduleID int64)
expectedErrRe func(tableID descpb.ID, scheduleID int64) string
- cleanup func(t *testing.T, sqlDB *gosql.DB, kvDB *kv.DB, tableID descpb.ID, scheduleID int64)
}{
{
desc: "not pointing at a valid scheduled job",
@@ -50,13 +59,43 @@ func TestValidateTTLScheduledJobs(t *testing.T) {
}))
},
expectedErrRe: func(tableID descpb.ID, scheduleID int64) string {
- return fmt.Sprintf(`table id %d does not have a maps to a non-existent scheduled job id %d`, tableID, scheduleID)
+ return fmt.Sprintf(`table id %d maps to a non-existent schedule id 0`, tableID)
},
- cleanup: func(t *testing.T, sqlDB *gosql.DB, kvDB *kv.DB, tableID descpb.ID, scheduleID int64) {
- _, err := sqlDB.Exec(`DROP SCHEDULE $1`, scheduleID)
- require.NoError(t, err)
- _, err = sqlDB.Exec(`SELECT crdb_internal.repair_ttl_table_scheduled_job($1)`, tableID)
- require.NoError(t, err)
+ },
+ {
+ desc: "scheduled job points at an different table",
+ setup: func(t *testing.T, sqlDB *gosql.DB, kvDB *kv.DB, s serverutils.TestServerInterface, tableDesc *tabledesc.Mutable, scheduleID int64) {
+ ie := s.InternalExecutor().(sqlutil.InternalExecutor)
+ require.NoError(t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
+ sj, err := jobs.LoadScheduledJob(
+ ctx,
+ jobstest.NewJobSchedulerTestEnv(
+ jobstest.UseSystemTables,
+ timeutil.Now(),
+ tree.ScheduledBackupExecutor,
+ ),
+ scheduleID,
+ ie,
+ txn,
+ )
+ if err != nil {
+ return err
+ }
+ var args catpb.ScheduledRowLevelTTLArgs
+ if err := pbtypes.UnmarshalAny(sj.ExecutionArgs().Args, &args); err != nil {
+ return err
+ }
+ args.TableID = 0
+ any, err := pbtypes.MarshalAny(&args)
+ if err != nil {
+ return err
+ }
+ sj.SetExecutionDetails(sj.ExecutorType(), jobspb.ExecutionArguments{Args: any})
+ return sj.Update(ctx, ie, txn)
+ }))
+ },
+ expectedErrRe: func(tableID descpb.ID, scheduleID int64) string {
+ return fmt.Sprintf(`schedule id %d points to table id 0 instead of table id %d`, scheduleID, tableID)
},
},
}
@@ -78,13 +117,19 @@ func TestValidateTTLScheduledJobs(t *testing.T) {
_, err = sqlDB.Exec(`SELECT crdb_internal.validate_ttl_scheduled_jobs()`)
require.Error(t, err)
require.Regexp(t, tc.expectedErrRe(tableDesc.GetID(), scheduleID), err)
+ var pgxErr *pq.Error
+ require.True(t, errors.As(err, &pgxErr))
require.Regexp(
t,
- fmt.Sprintf(`use crdb_internal.repair_ttl_table_scheduled_job(%d) to repair the missing job`, tableDesc.GetID()),
- err,
+ fmt.Sprintf(`use crdb_internal.repair_ttl_table_scheduled_job\(%d\) to repair the missing job`, tableDesc.GetID()),
+ pgxErr.Hint,
)
- tc.cleanup(t, sqlDB, kvDB, tableDesc.GetID(), scheduleID)
+ // Repair and check jobs are valid.
+ _, err = sqlDB.Exec(`DROP SCHEDULE $1`, scheduleID)
+ require.NoError(t, err)
+ _, err = sqlDB.Exec(`SELECT crdb_internal.repair_ttl_table_scheduled_job($1)`, tableDesc.GetID())
+ require.NoError(t, err)
_, err = sqlDB.Exec(`SELECT crdb_internal.validate_ttl_scheduled_jobs()`)
require.NoError(t, err)
})
diff --git a/pkg/sql/faketreeeval/evalctx.go b/pkg/sql/faketreeeval/evalctx.go
index 67f6975dcd5d..a9ebd29cb2ea 100644
--- a/pkg/sql/faketreeeval/evalctx.go
+++ b/pkg/sql/faketreeeval/evalctx.go
@@ -283,6 +283,11 @@ func (*DummyEvalPlanner) ValidateTTLScheduledJobsInCurrentDB(ctx context.Context
return errors.WithStack(errEvalPlanner)
}
+// RepairTTLScheduledJobForTable is part of the EvalPlanner interface.
+func (*DummyEvalPlanner) RepairTTLScheduledJobForTable(ctx context.Context, tableID int64) error {
+ return errors.WithStack(errEvalPlanner)
+}
+
// ExecutorConfig is part of the EvalPlanner interface.
func (*DummyEvalPlanner) ExecutorConfig() interface{} {
return nil
diff --git a/pkg/sql/logictest/testdata/logic_test/row_level_ttl b/pkg/sql/logictest/testdata/logic_test/row_level_ttl
index eba93bc61f06..31a7bee13a73 100644
--- a/pkg/sql/logictest/testdata/logic_test/row_level_ttl
+++ b/pkg/sql/logictest/testdata/logic_test/row_level_ttl
@@ -47,6 +47,12 @@ CREATE TABLE public.tbl (
statement ok
SELECT crdb_internal.validate_ttl_scheduled_jobs()
+statement ok
+SELECT crdb_internal.repair_ttl_table_scheduled_job('tbl'::regclass::oid)
+
+statement ok
+SELECT crdb_internal.validate_ttl_scheduled_jobs()
+
statement error resetting "ttl_expire_after" is not permitted\nHINT: use `RESET \(ttl\)` to remove TTL from the table
ALTER TABLE tbl RESET (ttl_expire_after)
diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go
index eac78f26958d..031c6c581a27 100644
--- a/pkg/sql/sem/builtins/builtins.go
+++ b/pkg/sql/sem/builtins/builtins.go
@@ -6437,6 +6437,25 @@ table's zone configuration this will return NULL.`,
},
),
+ "crdb_internal.repair_ttl_table_scheduled_job": makeBuiltin(
+ tree.FunctionProperties{
+ Category: categorySystemInfo,
+ },
+ tree.Overload{
+ Types: tree.ArgTypes{{"oid", types.Oid}},
+ ReturnType: tree.FixedReturnType(types.Void),
+ Fn: func(evalCtx *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
+ oid := tree.MustBeDOid(args[0])
+ if err := evalCtx.Planner.RepairTTLScheduledJobForTable(evalCtx.Ctx(), int64(oid.DInt)); err != nil {
+ return nil, err
+ }
+ return tree.DVoidDatum, nil
+ },
+ Info: `Repairs the scheduled job for a TTL table if it is missing.`,
+ Volatility: tree.VolatilityVolatile,
+ },
+ ),
+
"crdb_internal.check_password_hash_format": makeBuiltin(
tree.FunctionProperties{
Category: categorySystemInfo,
diff --git a/pkg/sql/sem/tree/eval.go b/pkg/sql/sem/tree/eval.go
index 0956e331a291..694348ff3f7b 100644
--- a/pkg/sql/sem/tree/eval.go
+++ b/pkg/sql/sem/tree/eval.go
@@ -3307,6 +3307,9 @@ type EvalPlanner interface {
// ValidateTTLScheduledJobsInCurrentDB checks scheduled jobs for each table
// in the database maps to a scheduled job.
ValidateTTLScheduledJobsInCurrentDB(ctx context.Context) error
+ // RepairTTLScheduledJob repairs the scheduled job for the given table if
+ // it is invalid.
+ RepairTTLScheduledJobForTable(ctx context.Context, tableID int64) error
// QueryRowEx executes the supplied SQL statement and returns a single row, or
// nil if no row is found, or an error if more that one row is returned.
From 101aef73308df6312968338d13adde6f5ee03d64 Mon Sep 17 00:00:00 2001
From: arulajmani
Date: Tue, 22 Mar 2022 19:54:03 -0400
Subject: [PATCH 08/11] kvserver: don't GC if protected timestamp information
isn't available
We only want to run GC on a replica that some PTS information (even if
it's stale). We don't want to run GC on a replica if no PTS information
is available however. This can happen if a Replica is being considered
for GC before the initial scan of the KVSubscriber has completed.
This wasn't an issue before this patch for implicit reasons -- this
patch just makes the check explicit and adds a test. Previously, we
wouldn't run GC if no PTS information was available because our lease
was guaranteed to be newer than the empty timestamp.
Release note: None
---
pkg/kv/kvserver/replica_protected_timestamp.go | 11 +++++++++++
pkg/kv/kvserver/replica_protected_timestamp_test.go | 10 ++++++++++
2 files changed, 21 insertions(+)
diff --git a/pkg/kv/kvserver/replica_protected_timestamp.go b/pkg/kv/kvserver/replica_protected_timestamp.go
index fb86e671987f..827627a15a67 100644
--- a/pkg/kv/kvserver/replica_protected_timestamp.go
+++ b/pkg/kv/kvserver/replica_protected_timestamp.go
@@ -132,6 +132,17 @@ func (r *Replica) checkProtectedTimestampsForGC(
if err != nil {
return false, hlc.Timestamp{}, hlc.Timestamp{}, hlc.Timestamp{}, hlc.Timestamp{}, err
}
+
+ if read.readAt.IsEmpty() {
+ // We don't want to allow GC to proceed if no protected timestamp
+ // information is available. This can happen if the initial scan of the
+ // rangefeed established by the spanconfig.KVSubscriber hasn't completed
+ // yet.
+ log.VEventf(ctx, 1,
+ "not gc'ing replica %v because protected timestamp information is unavailable", r)
+ return false, hlc.Timestamp{}, hlc.Timestamp{}, hlc.Timestamp{}, hlc.Timestamp{}, nil
+ }
+
gcTimestamp = read.readAt
if !read.earliestProtectionTimestamp.IsEmpty() {
// NB: we want to allow GC up to the timestamp preceding the earliest valid
diff --git a/pkg/kv/kvserver/replica_protected_timestamp_test.go b/pkg/kv/kvserver/replica_protected_timestamp_test.go
index f02ca7af3ead..fb54c671ab32 100644
--- a/pkg/kv/kvserver/replica_protected_timestamp_test.go
+++ b/pkg/kv/kvserver/replica_protected_timestamp_test.go
@@ -53,6 +53,16 @@ func TestCheckProtectedTimestampsForGC(t *testing.T) {
require.Zero(t, gcTimestamp)
},
},
+ {
+ name: "no PTS information is available",
+ test: func(t *testing.T, r *Replica, mp *manualPTSReader) {
+ mp.asOf = hlc.Timestamp{}
+ canGC, _, gcTimestamp, _, _, err := r.checkProtectedTimestampsForGC(ctx, makeTTLDuration(10))
+ require.NoError(t, err)
+ require.False(t, canGC)
+ require.Zero(t, gcTimestamp)
+ },
+ },
{
name: "have overlapping but new enough that it's okay",
test: func(t *testing.T, r *Replica, mp *manualPTSReader) {
From 87610fef029bc62dfc97c5f6d9b935acd4c56c8f Mon Sep 17 00:00:00 2001
From: Oleg Afanasyev
Date: Sun, 20 Mar 2022 19:01:17 +0000
Subject: [PATCH 09/11] cli: debug decode-key to support base64
Previously decode-key supported hex encoded keys. When using
json serialization of descriptors and recovery plans you need
to decode keys that are base64 encoded.
This patch adds flag to select key encoding format.
The default mode stays the same so any existing tooling will
work as expected.
Release note: None
---
pkg/cli/debug.go | 58 +++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 55 insertions(+), 3 deletions(-)
diff --git a/pkg/cli/debug.go b/pkg/cli/debug.go
index 2797bd981c18..7eec0fc0d107 100644
--- a/pkg/cli/debug.go
+++ b/pkg/cli/debug.go
@@ -129,6 +129,40 @@ func parsePositiveDuration(arg string) (time.Duration, error) {
return duration, nil
}
+type keyFormat int
+
+const (
+ hexKey = iota
+ base64Key
+)
+
+func (f *keyFormat) Set(value string) error {
+ switch value {
+ case "hex":
+ *f = hexKey
+ case "base64":
+ *f = base64Key
+ default:
+ return errors.Errorf("unsupported format %s", value)
+ }
+ return nil
+}
+
+func (f *keyFormat) String() string {
+ switch *f {
+ case hexKey:
+ return "hex"
+ case base64Key:
+ return "base64"
+ default:
+ panic(errors.AssertionFailedf("invalid format value %d", *f))
+ }
+}
+
+func (f *keyFormat) Type() string {
+ return "hex|base64"
+}
+
// OpenEngineOptions tunes the behavior of OpenEngine.
type OpenEngineOptions struct {
ReadOnly bool
@@ -528,19 +562,34 @@ func runDebugRangeDescriptors(cmd *cobra.Command, args []string) error {
})
}
+var decodeKeyOptions struct {
+ encoding keyFormat
+}
+
var debugDecodeKeyCmd = &cobra.Command{
Use: "decode-key",
Short: "decode ",
Long: `
-Decode a hexadecimal-encoded key and pretty-print it. For example:
+Decode encoded keys provided as command arguments and pretty-print them.
+Key encoding type could be changed using encoding flag.
+For example:
- $ decode-key BB89F902ADB43000151C2D1ED07DE6C009
+ $ cockroach debug decode-key BB89F902ADB43000151C2D1ED07DE6C009
/Table/51/1/44938288/1521140384.514565824,0
`,
Args: cobra.ArbitraryArgs,
RunE: func(cmd *cobra.Command, args []string) error {
for _, arg := range args {
- b, err := gohex.DecodeString(arg)
+ var b []byte
+ var err error
+ switch decodeKeyOptions.encoding {
+ case hexKey:
+ b, err = gohex.DecodeString(arg)
+ case base64Key:
+ b, err = base64.StdEncoding.DecodeString(arg)
+ default:
+ return errors.Errorf("unsupported key format %d", decodeKeyOptions.encoding)
+ }
if err != nil {
return err
}
@@ -1697,6 +1746,9 @@ func init() {
f.Var(&debugMergeLogsOpts.useColor, "color",
"force use of TTY escape codes to colorize the output")
+ f = debugDecodeKeyCmd.Flags()
+ f.Var(&decodeKeyOptions.encoding, "encoding", "key argument encoding")
+
f = debugDecodeProtoCmd.Flags()
f.StringVar(&debugDecodeProtoName, "schema", "cockroach.sql.sqlbase.Descriptor",
"fully qualified name of the proto to decode")
From d510c9dcb1df0a2ac02cf714311d0184beafbcbc Mon Sep 17 00:00:00 2001
From: Gerardo Torres
Date: Fri, 18 Mar 2022 14:15:09 -0400
Subject: [PATCH 10/11] ui: sort items in the sql activity dropdown menu
Fixes #78081.
Previously, app names in the dropdown menu for the stmts, txns, and
sessions pages were unsorted. This change sorts these app names.
Release note (ui change): app names and database names in the dropdown
menu are sorted.
---
.../workspaces/cluster-ui/src/sessions/sessionsPage.tsx | 2 +-
.../src/statementsPage/statementsPage.selectors.ts | 8 ++++++--
.../workspaces/cluster-ui/src/transactionsPage/utils.ts | 2 +-
.../db-console/src/views/statements/statements.spec.tsx | 2 +-
.../db-console/src/views/statements/statementsPage.tsx | 9 +++++++--
5 files changed, 16 insertions(+), 7 deletions(-)
diff --git a/pkg/ui/workspaces/cluster-ui/src/sessions/sessionsPage.tsx b/pkg/ui/workspaces/cluster-ui/src/sessions/sessionsPage.tsx
index bd3adc7d2dc0..2c5dd1b56730 100644
--- a/pkg/ui/workspaces/cluster-ui/src/sessions/sessionsPage.tsx
+++ b/pkg/ui/workspaces/cluster-ui/src/sessions/sessionsPage.tsx
@@ -109,7 +109,7 @@ function getSessionAppFilterOptions(sessions: SessionInfo[]): string[] {
),
);
- return Array.from(uniqueAppNames);
+ return Array.from(uniqueAppNames).sort();
}
export class SessionsPage extends React.Component<
diff --git a/pkg/ui/workspaces/cluster-ui/src/statementsPage/statementsPage.selectors.ts b/pkg/ui/workspaces/cluster-ui/src/statementsPage/statementsPage.selectors.ts
index ef420ab3e130..4a8eea6d07e6 100644
--- a/pkg/ui/workspaces/cluster-ui/src/statementsPage/statementsPage.selectors.ts
+++ b/pkg/ui/workspaces/cluster-ui/src/statementsPage/statementsPage.selectors.ts
@@ -81,7 +81,9 @@ export const selectApps = createSelector(sqlStatsSelector, sqlStatsState => {
}
},
);
- return [].concat(sawBlank ? ["(unset)"] : []).concat(Object.keys(apps));
+ return []
+ .concat(sawBlank ? ["(unset)"] : [])
+ .concat(Object.keys(apps).sort());
});
// selectDatabases returns the array of all databases with statement statistics present
@@ -99,7 +101,9 @@ export const selectDatabases = createSelector(
s.key.key_data.database ? s.key.key_data.database : "(unset)",
),
),
- ).filter((dbName: string) => dbName !== null && dbName.length > 0);
+ )
+ .filter((dbName: string) => dbName !== null && dbName.length > 0)
+ .sort();
},
);
diff --git a/pkg/ui/workspaces/cluster-ui/src/transactionsPage/utils.ts b/pkg/ui/workspaces/cluster-ui/src/transactionsPage/utils.ts
index ab5ee29a4bc5..4c8d296cccf8 100644
--- a/pkg/ui/workspaces/cluster-ui/src/transactionsPage/utils.ts
+++ b/pkg/ui/workspaces/cluster-ui/src/transactionsPage/utils.ts
@@ -45,7 +45,7 @@ export const getTrxAppFilterOptions = (
.map(t => (t.stats_data.app ? t.stats_data.app : "(unset)")),
);
- return Array.from(uniqueAppNames);
+ return Array.from(uniqueAppNames).sort();
};
export const collectStatementsText = (statements: Statement[]): string =>
diff --git a/pkg/ui/workspaces/db-console/src/views/statements/statements.spec.tsx b/pkg/ui/workspaces/db-console/src/views/statements/statements.spec.tsx
index 3b4e28ee0082..a0641bbc00b2 100644
--- a/pkg/ui/workspaces/db-console/src/views/statements/statements.spec.tsx
+++ b/pkg/ui/workspaces/db-console/src/views/statements/statements.spec.tsx
@@ -214,7 +214,7 @@ describe("selectApps", () => {
const result = selectApps(state);
- assert.deepEqual(result, ["(unset)", "foobar", "cockroach sql"]);
+ assert.deepEqual(result, ["(unset)", "cockroach sql", "foobar"]);
});
});
diff --git a/pkg/ui/workspaces/db-console/src/views/statements/statementsPage.tsx b/pkg/ui/workspaces/db-console/src/views/statements/statementsPage.tsx
index 3312d5836034..412f2aab9bed 100644
--- a/pkg/ui/workspaces/db-console/src/views/statements/statementsPage.tsx
+++ b/pkg/ui/workspaces/db-console/src/views/statements/statementsPage.tsx
@@ -188,7 +188,10 @@ export const selectApps = createSelector(
}
},
);
- return [].concat(sawBlank ? ["(unset)"] : []).concat(Object.keys(apps));
+ return []
+ .concat(sawBlank ? ["(unset)"] : [])
+ .concat(Object.keys(apps))
+ .sort();
},
);
@@ -206,7 +209,9 @@ export const selectDatabases = createSelector(
s.key.key_data.database ? s.key.key_data.database : "(unset)",
),
),
- ).filter((dbName: string) => dbName !== null && dbName.length > 0);
+ )
+ .filter((dbName: string) => dbName !== null && dbName.length > 0)
+ .sort();
},
);
From bbe57568ac38366a6390f7c5eeda627f1ff00a2e Mon Sep 17 00:00:00 2001
From: Rafi Shamim
Date: Tue, 22 Mar 2022 00:09:26 -0400
Subject: [PATCH 11/11] descmetadata: fix condition for bumping table version
This condition was inverted incorrectly. If the cache is on, it's
important to bump the table version so the cache can be invalidated.
This also adds tests for other actions that can invalidate the cache to
prevent other regressions.
Release justification: high priority bugfix to new functionality.
Release note: None
---
pkg/BUILD.bazel | 1 +
pkg/sql/descmetadata/metadata_updater.go | 6 +-
pkg/sql/sessioninit/BUILD.bazel | 29 +++-
pkg/sql/sessioninit/cache_test.go | 196 +++++++++++++++++++++++
pkg/sql/sessioninit/main_test.go | 33 ++++
5 files changed, 261 insertions(+), 4 deletions(-)
create mode 100644 pkg/sql/sessioninit/cache_test.go
create mode 100644 pkg/sql/sessioninit/main_test.go
diff --git a/pkg/BUILD.bazel b/pkg/BUILD.bazel
index 202ba8f1fbea..6df843bb9f7a 100644
--- a/pkg/BUILD.bazel
+++ b/pkg/BUILD.bazel
@@ -343,6 +343,7 @@ ALL_TESTS = [
"//pkg/sql/sem/tree/eval_test:eval_test_test",
"//pkg/sql/sem/tree:tree_test",
"//pkg/sql/sessiondata:sessiondata_test",
+ "//pkg/sql/sessioninit:sessioninit_test",
"//pkg/sql/span:span_test",
"//pkg/sql/sqlinstance/instanceprovider:instanceprovider_test",
"//pkg/sql/sqlinstance/instancestorage:instancestorage_test",
diff --git a/pkg/sql/descmetadata/metadata_updater.go b/pkg/sql/descmetadata/metadata_updater.go
index cca4aafc617b..54e67ea779e7 100644
--- a/pkg/sql/descmetadata/metadata_updater.go
+++ b/pkg/sql/descmetadata/metadata_updater.go
@@ -126,9 +126,9 @@ func (mu metadataUpdater) DeleteDatabaseRoleSettings(ctx context.Context, dbID d
if err != nil {
return err
}
- // If system table updates should be minimized, avoid bumping up the version
- // number of the table below.
- if mu.cacheEnabled || rowsDeleted == 0 {
+ // If the cache is off or if no rows changed, there's no need to bump the
+ // table version.
+ if !mu.cacheEnabled || rowsDeleted == 0 {
return nil
}
// Bump the table version for the role settings table when we modify it.
diff --git a/pkg/sql/sessioninit/BUILD.bazel b/pkg/sql/sessioninit/BUILD.bazel
index ae52b325478b..b123cf775d43 100644
--- a/pkg/sql/sessioninit/BUILD.bazel
+++ b/pkg/sql/sessioninit/BUILD.bazel
@@ -1,4 +1,4 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "sessioninit",
@@ -25,3 +25,30 @@ go_library(
"@com_github_cockroachdb_logtags//:logtags",
],
)
+
+go_test(
+ name = "sessioninit_test",
+ srcs = [
+ "cache_test.go",
+ "main_test.go",
+ ],
+ deps = [
+ ":sessioninit",
+ "//pkg/base",
+ "//pkg/kv",
+ "//pkg/security",
+ "//pkg/security/securitytest",
+ "//pkg/server",
+ "//pkg/sql",
+ "//pkg/sql/catalog/descpb",
+ "//pkg/sql/catalog/descs",
+ "//pkg/sql/sqlutil",
+ "//pkg/testutils/serverutils",
+ "//pkg/testutils/sqlutils",
+ "//pkg/testutils/testcluster",
+ "//pkg/util/leaktest",
+ "//pkg/util/log",
+ "//pkg/util/randutil",
+ "@com_github_stretchr_testify//require",
+ ],
+)
diff --git a/pkg/sql/sessioninit/cache_test.go b/pkg/sql/sessioninit/cache_test.go
new file mode 100644
index 000000000000..e1b33dc8d4fe
--- /dev/null
+++ b/pkg/sql/sessioninit/cache_test.go
@@ -0,0 +1,196 @@
+// Copyright 2022 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package sessioninit_test
+
+import (
+ "context"
+ gosql "database/sql"
+ "net/url"
+ "testing"
+
+ "github.com/cockroachdb/cockroach/pkg/base"
+ "github.com/cockroachdb/cockroach/pkg/kv"
+ "github.com/cockroachdb/cockroach/pkg/security"
+ "github.com/cockroachdb/cockroach/pkg/sql"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
+ "github.com/cockroachdb/cockroach/pkg/sql/sessioninit"
+ "github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
+ "github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
+ "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
+ "github.com/cockroachdb/cockroach/pkg/util/leaktest"
+ "github.com/cockroachdb/cockroach/pkg/util/log"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCacheInvalidation(t *testing.T) {
+ defer leaktest.AfterTest(t)()
+ defer log.Scope(t).Close(t)
+
+ ctx := context.Background()
+ s, db, _ := serverutils.StartServer(t, base.TestServerArgs{Insecure: false})
+ defer s.Stopper().Stop(ctx)
+ defer db.Close()
+
+ pgURL, cleanupFunc := sqlutils.PGUrl(
+ t, s.ServingSQLAddr(), "TestCacheInvalidation" /* prefix */, url.UserPassword("testuser", "abc"),
+ )
+ defer cleanupFunc()
+
+ // Extract login as a function so that we can call it to populate the cache
+ // with real information.
+ login := func() {
+ thisDB, err := gosql.Open("postgres", pgURL.String())
+ require.NoError(t, err)
+ var i int
+ err = thisDB.QueryRow("SELECT 1").Scan(&i)
+ require.NoError(t, err)
+ _ = thisDB.Close()
+ }
+
+ execCfg := s.ExecutorConfig().(sql.ExecutorConfig)
+ getSettingsFromCache := func() ([]sessioninit.SettingsCacheEntry, bool, error) {
+ didReadFromSystemTable := false
+ settings, err := execCfg.SessionInitCache.GetDefaultSettings(
+ ctx,
+ s.ClusterSettings(),
+ s.InternalExecutor().(sqlutil.InternalExecutor),
+ s.DB(),
+ s.CollectionFactory().(*descs.CollectionFactory),
+ security.TestUserName(),
+ "defaultdb",
+ func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, username security.SQLUsername, databaseID descpb.ID) ([]sessioninit.SettingsCacheEntry, error) {
+ didReadFromSystemTable = true
+ return nil, nil
+ })
+ return settings, didReadFromSystemTable, err
+ }
+ getAuthInfoFromCache := func() (sessioninit.AuthInfo, bool, error) {
+ didReadFromSystemTable := false
+ aInfo, err := execCfg.SessionInitCache.GetAuthInfo(
+ ctx,
+ s.ClusterSettings(),
+ s.InternalExecutor().(sqlutil.InternalExecutor),
+ s.DB(),
+ s.CollectionFactory().(*descs.CollectionFactory),
+ security.TestUserName(),
+ func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, username security.SQLUsername) (sessioninit.AuthInfo, error) {
+ didReadFromSystemTable = true
+ return sessioninit.AuthInfo{}, nil
+ })
+ return aInfo, didReadFromSystemTable, err
+ }
+
+ // Create user and warm the cache.
+ _, err := db.ExecContext(ctx, "CREATE USER testuser WITH PASSWORD 'abc'")
+ require.NoError(t, err)
+ login()
+
+ t.Run("default settings cache", func(t *testing.T) {
+ for _, stmt := range []string{
+ `ALTER ROLE ALL IN DATABASE postgres SET search_path = 'a'`,
+ `ALTER ROLE testuser SET search_path = 'b'`,
+ } {
+ _, err := db.ExecContext(ctx, stmt)
+ require.NoError(t, err)
+ }
+
+ // Check that the cache initially contains the default settings for testuser.
+ login()
+ settings, didReadFromSystemTable, err := getSettingsFromCache()
+ require.NoError(t, err)
+ require.False(t, didReadFromSystemTable)
+ require.Contains(t, settings, sessioninit.SettingsCacheEntry{
+ SettingsCacheKey: sessioninit.SettingsCacheKey{
+ DatabaseID: 0,
+ Username: security.TestUserName(),
+ },
+ Settings: []string{"search_path=b"},
+ })
+
+ // Verify that dropping a database referenced in the default settings table
+ // causes the cache to be invalidated.
+ _, err = db.ExecContext(ctx, "DROP DATABASE postgres")
+ require.NoError(t, err)
+ settings, didReadFromSystemTable, err = getSettingsFromCache()
+ require.NoError(t, err)
+ require.True(t, didReadFromSystemTable)
+ require.Empty(t, settings)
+
+ // Verify that adding a new default setting causes the cache to be
+ // invalidated. We need to use login() to load "real" data.
+ _, err = db.ExecContext(ctx, `ALTER ROLE ALL SET search_path = 'c'`)
+ require.NoError(t, err)
+ login()
+ settings, didReadFromSystemTable, err = getSettingsFromCache()
+ require.NoError(t, err)
+ require.False(t, didReadFromSystemTable)
+ require.Contains(t, settings, sessioninit.SettingsCacheEntry{
+ SettingsCacheKey: sessioninit.SettingsCacheKey{
+ DatabaseID: 0,
+ Username: security.MakeSQLUsernameFromPreNormalizedString(""),
+ },
+ Settings: []string{"search_path=c"},
+ })
+
+ // Verify that dropping a user referenced in the default settings table
+ // causes the cache to be invalidated.
+ _, err = db.ExecContext(ctx, "DROP USER testuser")
+ require.NoError(t, err)
+ settings, didReadFromSystemTable, err = getSettingsFromCache()
+ require.NoError(t, err)
+ require.True(t, didReadFromSystemTable)
+ require.Empty(t, settings)
+
+ // Re-create the user and warm the cache for the next test.
+ _, err = db.ExecContext(ctx, "CREATE USER testuser WITH PASSWORD 'abc'")
+ require.NoError(t, err)
+ login()
+ })
+
+ t.Run("auth info cache", func(t *testing.T) {
+ // Check that the cache initially contains info for testuser.
+ login()
+ aInfo, didReadFromSystemTable, err := getAuthInfoFromCache()
+ require.NoError(t, err)
+ require.False(t, didReadFromSystemTable)
+ require.True(t, aInfo.UserExists)
+ require.True(t, aInfo.CanLoginSQL)
+
+ // Verify that creating a different user invalidates the cache.
+ _, err = db.ExecContext(ctx, "CREATE USER testuser2")
+ require.NoError(t, err)
+ aInfo, didReadFromSystemTable, err = getAuthInfoFromCache()
+ require.NoError(t, err)
+ require.True(t, didReadFromSystemTable)
+
+ // Verify that dropping a user invalidates the cache
+ _, err = db.ExecContext(ctx, "DROP USER testuser2")
+ require.NoError(t, err)
+ aInfo, didReadFromSystemTable, err = getAuthInfoFromCache()
+ require.NoError(t, err)
+ require.True(t, didReadFromSystemTable)
+
+ // Verify that altering VALID UNTIL invalidates the cache
+ _, err = db.ExecContext(ctx, "ALTER USER testuser VALID UNTIL '2099-01-01'")
+ require.NoError(t, err)
+ aInfo, didReadFromSystemTable, err = getAuthInfoFromCache()
+ require.NoError(t, err)
+ require.True(t, didReadFromSystemTable)
+
+ // Sanity check to make sure the cache is used.
+ _, err = db.ExecContext(ctx, "SELECT 1")
+ require.NoError(t, err)
+ aInfo, didReadFromSystemTable, err = getAuthInfoFromCache()
+ require.NoError(t, err)
+ require.False(t, didReadFromSystemTable)
+ })
+}
diff --git a/pkg/sql/sessioninit/main_test.go b/pkg/sql/sessioninit/main_test.go
new file mode 100644
index 000000000000..597b8fd13429
--- /dev/null
+++ b/pkg/sql/sessioninit/main_test.go
@@ -0,0 +1,33 @@
+// Copyright 2015 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package sessioninit_test
+
+import (
+ "os"
+ "testing"
+
+ "github.com/cockroachdb/cockroach/pkg/security"
+ "github.com/cockroachdb/cockroach/pkg/security/securitytest"
+ "github.com/cockroachdb/cockroach/pkg/server"
+ "github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
+ "github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
+ "github.com/cockroachdb/cockroach/pkg/util/randutil"
+)
+
+//go:generate ../../util/leaktest/add-leaktest.sh *_test.go
+
+func TestMain(m *testing.M) {
+ security.SetAssetLoader(securitytest.EmbeddedAssets)
+ randutil.SeedForTests()
+ serverutils.InitTestServerFactory(server.TestServerFactory)
+ serverutils.InitTestClusterFactory(testcluster.TestClusterFactory)
+ os.Exit(m.Run())
+}
|