diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8c0d3b34f982..34c22483c917 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -69,7 +69,7 @@ # last-rule-wins so bulk i/o takes userfile.go even though cli-prs takes pkg/cli /pkg/cli/userfile.go @cockroachdb/bulk-prs /pkg/cli/demo*.go @cockroachdb/cli-prs @cockroachdb/sql-experience @cockroachdb/server-prs -/pkg/cli/debug*.go @cockroachdb/cli-prs @cockroachdb/kv +/pkg/cli/debug*.go @cockroachdb/cli-prs @cockroachdb/kv-prs /pkg/cli/debug_job_trace*.go @cockroachdb/bulk-prs /pkg/cli/doctor*.go @cockroachdb/cli-prs @cockroachdb/sql-schema /pkg/cli/import_test.go @cockroachdb/cli-prs @cockroachdb/bulk-prs @@ -98,7 +98,7 @@ /pkg/geo/ @cockroachdb/geospatial -/pkg/kv/ @cockroachdb/kv +/pkg/kv/ @cockroachdb/kv-prs /pkg/storage/ @cockroachdb/storage @@ -138,8 +138,8 @@ /pkg/ccl/utilccl/ @cockroachdb/server-prs /pkg/ccl/workloadccl/ @cockroachdb/sql-experience /pkg/ccl/benchccl/rttanalysisccl/ @cockroachdb/sql-experience -/pkg/clusterversion/ @cockroachdb/kv -/pkg/cmd/allocsim/ @cockroachdb/kv +/pkg/clusterversion/ @cockroachdb/kv-prs +/pkg/cmd/allocsim/ @cockroachdb/kv-prs /pkg/cmd/bazci/ @cockroachdb/dev-inf /pkg/cmd/cmdutil/ @cockroachdb/dev-inf /pkg/cmd/cmp-protocol/ @cockroachdb/sql-experience @@ -160,7 +160,7 @@ /pkg/cmd/geoviz/ @cockroachdb/geospatial /pkg/cmd/github-post/ @cockroachdb/test-eng /pkg/cmd/github-pull-request-make/ @cockroachdb/dev-inf -/pkg/cmd/gossipsim/ @cockroachdb/kv +/pkg/cmd/gossipsim/ @cockroachdb/kv-prs /pkg/cmd/import-tools/ @cockroachdb/dev-inf /pkg/cmd/internal/issues/ @cockroachdb/test-eng /pkg/cmd/prereqs/ @cockroachdb/dev-inf @@ -192,18 +192,18 @@ /pkg/docs/ @cockroachdb/docs /pkg/featureflag/ @cockroachdb/cli-prs-noreview /pkg/gossip/ @cockroachdb/kv-noreview -/pkg/internal/client/requestbatcher/ @cockroachdb/kv +/pkg/internal/client/requestbatcher/ @cockroachdb/kv-prs /pkg/internal/codeowners/ @cockroachdb/test-eng /pkg/internal/reporoot @cockroachdb/dev-inf /pkg/internal/rsg/ @cockroachdb/sql-queries /pkg/internal/sqlsmith/ @cockroachdb/sql-queries /pkg/internal/team/ @cockroachdb/test-eng /pkg/jobs/ @cockroachdb/sql-schema -/pkg/keys/ @cockroachdb/kv +/pkg/keys/ @cockroachdb/kv-prs /pkg/migration/ @cockroachdb/kv @cockroachdb/sql-schema /pkg/multitenant @cockroachdb/unowned /pkg/release/ @cockroachdb/dev-inf -/pkg/roachpb/ @cockroachdb/kv +/pkg/roachpb/ @cockroachdb/kv-prs /pkg/rpc/ @cockroachdb/server-prs /pkg/scheduledjobs/ @cockroachdb/bulk-prs /pkg/security/ @cockroachdb/server-prs @@ -211,7 +211,7 @@ /pkg/startupmigrations/ @cockroachdb/server-prs @cockroachdb/sql-schema /pkg/streaming/ @cockroachdb/bulk-prs /pkg/testutils/ @cockroachdb/test-eng -/pkg/ts/ @cockroachdb/kv +/pkg/ts/ @cockroachdb/kv-prs /pkg/ts/catalog/ @cockroachdb/obs-inf-prs /pkg/util/ @cockroachdb/unowned /pkg/util/log @cockroachdb/server-prs diff --git a/TEAMS.yaml b/TEAMS.yaml index e4be32d0b9ba..d8dcddc312fd 100644 --- a/TEAMS.yaml +++ b/TEAMS.yaml @@ -37,6 +37,7 @@ cockroachdb/sql-observability: cockroachdb/kv: aliases: cockroachdb/kv-triage: roachtest + cockroachdb/kv-prs: other triage_column_id: 14242655 cockroachdb/geospatial: triage_column_id: 9487269 diff --git a/pkg/ccl/backupccl/BUILD.bazel b/pkg/ccl/backupccl/BUILD.bazel index 25a4ac43a05c..a74f7981f065 100644 --- a/pkg/ccl/backupccl/BUILD.bazel +++ b/pkg/ccl/backupccl/BUILD.bazel @@ -177,6 +177,7 @@ go_test( "//pkg/sql/catalog", "//pkg/sql/catalog/catalogkv", "//pkg/sql/catalog/descpb", + "//pkg/sql/catalog/descs", "//pkg/sql/catalog/lease", "//pkg/sql/catalog/systemschema", "//pkg/sql/catalog/tabledesc", diff --git a/pkg/ccl/backupccl/restore_old_versions_test.go b/pkg/ccl/backupccl/restore_old_versions_test.go index 40c8e7c71138..f4bb4782abc7 100644 --- a/pkg/ccl/backupccl/restore_old_versions_test.go +++ b/pkg/ccl/backupccl/restore_old_versions_test.go @@ -19,7 +19,14 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" @@ -503,3 +510,57 @@ func TestRestoreOldBackupMissingOfflineIndexes(t *testing.T) { } } } + +func TestRestoreWithDroppedSchemaCorruption(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + + const ( + dbName = "foo" + backupDir = "testdata/restore_with_dropped_schema/exports/v20.2.7" + fromDir = "nodelocal://0/" + ) + + args := base.TestServerArgs{ExternalIODir: backupDir} + s, sqlDB, _ := serverutils.StartServer(t, args) + tdb := sqlutils.MakeSQLRunner(sqlDB) + defer s.Stopper().Stop(ctx) + + tdb.Exec(t, fmt.Sprintf("RESTORE DATABASE %s FROM '%s'", dbName, fromDir)) + query := fmt.Sprintf("SELECT database_name FROM [SHOW DATABASES] WHERE database_name = '%s'", dbName) + tdb.CheckQueryResults(t, query, [][]string{{dbName}}) + + // Read descriptor without validation. + execCfg := s.ExecutorConfig().(sql.ExecutorConfig) + hasSameNameSchema := func(dbName string) bool { + exists := false + var desc catalog.DatabaseDescriptor + require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( + ctx context.Context, txn *kv.Txn, collection *descs.Collection, + ) error { + // Using this method to avoid validation. + allDescs, err := catalogkv.GetAllDescriptors(ctx, txn, execCfg.Codec, false) + if err != nil { + return err + } + for _, d := range allDescs { + if d.GetName() == dbName { + desc, err = catalog.AsDatabaseDescriptor(d) + require.NoError(t, err, "unable to cast to database descriptor") + return nil + } + } + return nil + })) + require.NoError(t, desc.ForEachSchemaInfo( + func(id descpb.ID, name string, isDropped bool) error { + if name == dbName { + exists = true + } + return nil + })) + return exists + } + require.Falsef(t, hasSameNameSchema(dbName), "corrupted descriptor exists") +} diff --git a/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/create.sql b/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/create.sql new file mode 100644 index 000000000000..9c0749552da5 --- /dev/null +++ b/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/create.sql @@ -0,0 +1,14 @@ +-- The below SQL is used to create a backup of a database that +-- contains a corrupted database descriptor. Data is produced +-- using version 20.2.7. This backup is used in +-- TestRestoreWithDroppedSchemaCorruption test. + +CREATE DATABASE foo; + +SET DATABASE = foo; + +CREATE SCHEMA bar; + +DROP SCHEMA bar; + +BACKUP DATABASE foo to 'nodelocal://0/foo_backup'; diff --git a/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/exports/v20.2.7/BACKUP-CHECKPOINT-683775825115512833-CHECKSUM b/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/exports/v20.2.7/BACKUP-CHECKPOINT-683775825115512833-CHECKSUM new file mode 100644 index 000000000000..b6f4e0b22332 --- /dev/null +++ b/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/exports/v20.2.7/BACKUP-CHECKPOINT-683775825115512833-CHECKSUM @@ -0,0 +1 @@ +T \ No newline at end of file diff --git a/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/exports/v20.2.7/BACKUP-CHECKPOINT-CHECKSUM b/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/exports/v20.2.7/BACKUP-CHECKPOINT-CHECKSUM new file mode 100644 index 000000000000..b6f4e0b22332 --- /dev/null +++ b/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/exports/v20.2.7/BACKUP-CHECKPOINT-CHECKSUM @@ -0,0 +1 @@ +T \ No newline at end of file diff --git a/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/exports/v20.2.7/BACKUP-STATISTICS b/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/exports/v20.2.7/BACKUP-STATISTICS new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/exports/v20.2.7/BACKUP_MANIFEST b/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/exports/v20.2.7/BACKUP_MANIFEST new file mode 100644 index 000000000000..48a71b53c0a2 Binary files /dev/null and b/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/exports/v20.2.7/BACKUP_MANIFEST differ diff --git a/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/exports/v20.2.7/BACKUP_MANIFEST-CHECKSUM b/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/exports/v20.2.7/BACKUP_MANIFEST-CHECKSUM new file mode 100644 index 000000000000..4ef192e60070 --- /dev/null +++ b/pkg/ccl/backupccl/testdata/restore_with_dropped_schema/exports/v20.2.7/BACKUP_MANIFEST-CHECKSUM @@ -0,0 +1 @@ +=f \ No newline at end of file diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index 18f623bc202e..f3c83616bebd 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -1466,7 +1466,7 @@ func TestChangefeedAvroNotice(t *testing.T) { sqlDB.Exec(t, "CREATE table foo (i int)") sqlDB.Exec(t, `INSERT INTO foo VALUES (0)`) - sql := fmt.Sprintf("CREATE CHANGEFEED FOR d.foo INTO 'dummysink' WITH format=experimental_avro, confluent_schema_registry='%s'", schemaReg.URL()) + sql := fmt.Sprintf("CREATE CHANGEFEED FOR d.foo INTO 'null://' WITH format=experimental_avro, confluent_schema_registry='%s'", schemaReg.URL()) expectNotice(t, s, sql, `avro is no longer experimental, use format=avro`) } @@ -2614,6 +2614,12 @@ func TestChangefeedErrors(t *testing.T) { return fmt.Sprintf(`unknown %s sink query parameters: [%s]`, sink, strings.Join(params, ", ")) } + // Check that sink URLs have valid scheme + sqlDB.ExpectErr( + t, `no scheme found for sink URL`, + `CREATE CHANGEFEED FOR foo INTO 'kafka%3A%2F%2Fnope%0A'`, + ) + // Check that confluent_schema_registry is only accepted if format is avro. // TODO: This should be testing it as a WITH option and check avro_schema_prefix too sqlDB.ExpectErr( diff --git a/pkg/ccl/changefeedccl/changefeedbase/options.go b/pkg/ccl/changefeedccl/changefeedbase/options.go index cc8227000524..90f81054608e 100644 --- a/pkg/ccl/changefeedccl/changefeedbase/options.go +++ b/pkg/ccl/changefeedccl/changefeedbase/options.go @@ -110,7 +110,6 @@ const ( SinkParamSkipTLSVerify = `insecure_tls_skip_verify` SinkParamTopicPrefix = `topic_prefix` SinkParamTopicName = `topic_name` - SinkSchemeBuffer = `` SinkSchemeCloudStorageAzure = `experimental-azure` SinkSchemeCloudStorageGCS = `experimental-gs` SinkSchemeCloudStorageHTTP = `experimental-http` diff --git a/pkg/ccl/changefeedccl/sink.go b/pkg/ccl/changefeedccl/sink.go index ba379e5b8c4c..b383fe6f142b 100644 --- a/pkg/ccl/changefeedccl/sink.go +++ b/pkg/ccl/changefeedccl/sink.go @@ -79,9 +79,11 @@ func getSink( } newSink := func() (Sink, error) { - switch { - case u.Scheme == changefeedbase.SinkSchemeBuffer: + if feedCfg.SinkURI == "" { return &bufferSink{}, nil + } + + switch { case u.Scheme == changefeedbase.SinkSchemeNull: return makeNullSink(sinkURL{URL: u}) case u.Scheme == changefeedbase.SinkSchemeKafka: @@ -98,6 +100,8 @@ func getSink( case u.Scheme == changefeedbase.SinkSchemeHTTP || u.Scheme == changefeedbase.SinkSchemeHTTPS: return nil, errors.Errorf(`unsupported sink: %s. HTTP endpoints can be used with %s and %s`, u.Scheme, changefeedbase.SinkSchemeWebhookHTTPS, changefeedbase.SinkSchemeCloudStorageHTTPS) + case u.Scheme == "": + return nil, errors.Errorf(`no scheme found for sink URL %q`, feedCfg.SinkURI) default: return nil, errors.Errorf(`unsupported sink: %s`, u.Scheme) } diff --git a/pkg/migration/migrations/BUILD.bazel b/pkg/migration/migrations/BUILD.bazel index 65c82d44e539..39dabe4b6823 100644 --- a/pkg/migration/migrations/BUILD.bazel +++ b/pkg/migration/migrations/BUILD.bazel @@ -82,9 +82,11 @@ go_test( "//pkg/security/securitytest", "//pkg/server", "//pkg/settings/cluster", + "//pkg/sql", "//pkg/sql/catalog", "//pkg/sql/catalog/catalogkeys", "//pkg/sql/catalog/catalogkv", + "//pkg/sql/catalog/dbdesc", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/descs", "//pkg/sql/catalog/systemschema", diff --git a/pkg/migration/migrations/fix_descriptor_migration_external_test.go b/pkg/migration/migrations/fix_descriptor_migration_external_test.go index 465a48a47343..7c8f20043764 100644 --- a/pkg/migration/migrations/fix_descriptor_migration_external_test.go +++ b/pkg/migration/migrations/fix_descriptor_migration_external_test.go @@ -19,14 +19,21 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/stretchr/testify/require" ) @@ -191,3 +198,103 @@ func TestFixPrivilegesMigration(t *testing.T) { tc.Stopper().Stop(ctx) } } + +func TestFixDBDescriptorDroppedSchemaName(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + clusterArgs := base.TestClusterArgs{ + ServerArgs: base.TestServerArgs{ + Knobs: base.TestingKnobs{ + Server: &server.TestingKnobs{ + DisableAutomaticVersionUpgrade: 1, + BinaryVersionOverride: clusterversion.ByKey( + clusterversion.FixDescriptors - 1), + }, + JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), + }, + }, + } + + ctx := context.Background() + tc := testcluster.StartTestCluster(t, 1, clusterArgs) + defer tc.Stopper().Stop(ctx) + s := tc.Server(0) + sqlDB := tc.ServerConn(0) + tdb := sqlutils.MakeSQLRunner(sqlDB) + + // - Create database. + // - Read its descriptor, add the bad entry, write the descriptor + // without validation. + // - Read the descriptor without validation and ensure that the bad + // entry exists, while ensures that the bad entry is not removed + // while writing the descriptor. + // - Run migration. + // - Read the descriptor without validation and ensure that the bad + // entry does not exist. + + const dbName = "t" + tdb.Exec(t, "CREATE DATABASE "+dbName) + + // Write a corrupted descriptor. + var descID descpb.ID + execCfg := s.ExecutorConfig().(sql.ExecutorConfig) + require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( + ctx context.Context, txn *kv.Txn, collection *descs.Collection, + ) error { + flags := tree.DatabaseLookupFlags{Required: true} + desc, err := collection.GetMutableDatabaseByName(ctx, txn, dbName, flags) + if err != nil { + return err + } + descID = desc.GetID() + desc.Schemas = map[string]descpb.DatabaseDescriptor_SchemaInfo{dbName: {ID: descID, Dropped: true}} + builder := dbdesc.NewBuilder(desc.DatabaseDesc()) + badDesc := builder.BuildCreatedMutable() + badDesc.MaybeIncrementVersion() + collection.SkipValidationOnWrite() + return collection.WriteDesc(ctx, false, badDesc, txn) + })) + + // Checks whether the erroneous entry exists or not. + hasSameNameSchema := func(dbName string) bool { + exists := false + var desc catalog.DatabaseDescriptor + require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( + ctx context.Context, txn *kv.Txn, collection *descs.Collection, + ) error { + // Using this method to avoid calling RunPostDeserializationChanges(). + allDescs, err := catalogkv.GetAllDescriptors(ctx, txn, execCfg.Codec, false) + if err != nil { + return err + } + for _, d := range allDescs { + if d.GetID() == descID { + desc, err = catalog.AsDatabaseDescriptor(d) + require.NoError(t, err, "unable to cast to database descriptor") + return nil + } + } + return nil + })) + require.NoError(t, desc.ForEachSchemaInfo( + func(id descpb.ID, name string, isDropped bool) error { + if name == dbName { + exists = true + } + return nil + })) + return exists + } + + // Validate that the bad entry does exist after writing the descriptor. + require.True(t, hasSameNameSchema(dbName), "bad entry does not exist") + + // Migrate to the new version. + _, err := sqlDB.Exec(`SET CLUSTER SETTING version = $1`, + clusterversion.ByKey(clusterversion.FixDescriptors).String()) + require.NoError(t, err) + + // Validate that the bad entry is removed. + require.False(t, hasSameNameSchema(dbName), "bad entry exists") +} diff --git a/pkg/roachpb/app_stats.pb.go b/pkg/roachpb/app_stats.pb.go index d42df95673bc..798ea19eef07 100644 --- a/pkg/roachpb/app_stats.pb.go +++ b/pkg/roachpb/app_stats.pb.go @@ -273,6 +273,7 @@ type StatementStatisticsKey struct { Vec bool `protobuf:"varint,7,opt,name=vec" json:"vec"` FullScan bool `protobuf:"varint,8,opt,name=full_scan,json=fullScan" json:"full_scan"` Database string `protobuf:"bytes,9,opt,name=database" json:"database"` + PlanHash uint64 `protobuf:"varint,10,opt,name=plan_hash,json=planHash" json:"plan_hash"` } func (m *StatementStatisticsKey) Reset() { *m = StatementStatisticsKey{} } @@ -310,9 +311,10 @@ type CollectedStatementStatistics struct { // ID is a hash of the statement key (query fingerprint, failure status, // implicit txn or not) which can be used to identify the statement // for instance in transaction statistics. - ID StmtFingerprintID `protobuf:"varint,3,opt,name=id,casttype=StmtFingerprintID" json:"id"` - Key StatementStatisticsKey `protobuf:"bytes,1,opt,name=key" json:"key"` - Stats StatementStatistics `protobuf:"bytes,2,opt,name=stats" json:"stats"` + ID StmtFingerprintID `protobuf:"varint,3,opt,name=id,casttype=StmtFingerprintID" json:"id"` + Key StatementStatisticsKey `protobuf:"bytes,1,opt,name=key" json:"key"` + Stats StatementStatistics `protobuf:"bytes,2,opt,name=stats" json:"stats"` + AggregatedTs time.Time `protobuf:"bytes,4,opt,name=aggregated_ts,json=aggregatedTs,stdtime" json:"aggregated_ts"` } func (m *CollectedStatementStatistics) Reset() { *m = CollectedStatementStatistics{} } @@ -351,8 +353,9 @@ type CollectedTransactionStatistics struct { // transaction comprises, in order. StatementFingerprintIDs []StmtFingerprintID `protobuf:"varint,1,rep,name=statement_fingerprint_ids,json=statementFingerprintIds,casttype=StmtFingerprintID" json:"statement_fingerprint_ids,omitempty"` // App is the name of the app which executed the transaction. - App string `protobuf:"bytes,2,opt,name=app" json:"app"` - Stats TransactionStatistics `protobuf:"bytes,3,opt,name=stats" json:"stats"` + App string `protobuf:"bytes,2,opt,name=app" json:"app"` + Stats TransactionStatistics `protobuf:"bytes,3,opt,name=stats" json:"stats"` + AggregatedTs time.Time `protobuf:"bytes,4,opt,name=aggregated_ts,json=aggregatedTs,stdtime" json:"aggregated_ts"` } func (m *CollectedTransactionStatistics) Reset() { *m = CollectedTransactionStatistics{} } @@ -566,94 +569,97 @@ func init() { func init() { proto.RegisterFile("roachpb/app_stats.proto", fileDescriptor_81c296505f9d1940) } var fileDescriptor_81c296505f9d1940 = []byte{ - // 1392 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4f, 0x53, 0x1b, 0x47, - 0x16, 0x67, 0xd0, 0x60, 0x8d, 0x9e, 0x10, 0x88, 0x06, 0xcc, 0x98, 0xa2, 0x24, 0xac, 0x5a, 0x97, - 0xf1, 0xfe, 0x81, 0x2d, 0x6a, 0x2f, 0xbb, 0x5b, 0xfe, 0x87, 0xb1, 0xab, 0xc0, 0xd8, 0xb5, 0x16, - 0xec, 0x25, 0x97, 0xa9, 0x66, 0xe6, 0x09, 0x4f, 0x31, 0xd3, 0x33, 0x74, 0xb7, 0xb0, 0x74, 0xcf, - 0x07, 0xf0, 0x21, 0x97, 0xdc, 0x72, 0xcc, 0x97, 0xc8, 0xdd, 0x97, 0xa4, 0x7c, 0xf4, 0x89, 0x24, - 0xf8, 0xe2, 0x4f, 0x90, 0x43, 0x4e, 0xa9, 0xee, 0x9e, 0x11, 0x12, 0x96, 0x93, 0x21, 0xbe, 0x8d, - 0xde, 0x7b, 0xbf, 0x5f, 0xf7, 0x7b, 0xfd, 0x7b, 0xaf, 0x5b, 0xb0, 0xc4, 0x13, 0xea, 0xbf, 0x4c, - 0x0f, 0x37, 0x68, 0x9a, 0x7a, 0x42, 0x52, 0x29, 0xd6, 0x53, 0x9e, 0xc8, 0x84, 0xd4, 0xfc, 0xc4, - 0x3f, 0xd6, 0xce, 0x75, 0x71, 0x12, 0x2d, 0x2f, 0x1c, 0x25, 0x47, 0x89, 0xf6, 0x6c, 0xa8, 0x2f, - 0x13, 0xb4, 0xdc, 0x3c, 0x4a, 0x92, 0xa3, 0x08, 0x37, 0xf4, 0xaf, 0xc3, 0x6e, 0x67, 0x43, 0x86, - 0x31, 0x0a, 0x49, 0xe3, 0xd4, 0x04, 0xb4, 0xbe, 0x73, 0x60, 0x7e, 0x5f, 0x52, 0x89, 0x31, 0x32, - 0xa9, 0x3e, 0x42, 0x21, 0x43, 0x5f, 0x90, 0x65, 0x98, 0xf2, 0x93, 0x2e, 0x93, 0xae, 0xb5, 0x6a, - 0xad, 0x95, 0xb6, 0xec, 0x37, 0x67, 0xcd, 0x89, 0xb6, 0x31, 0x91, 0x7f, 0xc1, 0x7c, 0x27, 0xe4, - 0x42, 0x7a, 0x54, 0x4a, 0x8c, 0x53, 0xe9, 0x99, 0xc8, 0xc9, 0xa1, 0xc8, 0x39, 0x1d, 0xf0, 0xd0, - 0xf8, 0x1f, 0x69, 0xd4, 0x2d, 0xa8, 0xc6, 0xb4, 0xe7, 0x71, 0x94, 0x3c, 0x44, 0xe1, 0x96, 0x86, - 0xa2, 0x21, 0xa6, 0xbd, 0xb6, 0xb1, 0x93, 0xbf, 0xc3, 0x6c, 0x84, 0x47, 0xd4, 0xef, 0x7b, 0x11, - 0x15, 0xd2, 0x43, 0xce, 0x5d, 0x7b, 0xd5, 0x5a, 0xab, 0x64, 0xa1, 0x35, 0xe3, 0xdc, 0xa3, 0x42, - 0x3e, 0xe6, 0x9c, 0xdc, 0x05, 0xf7, 0x52, 0xb4, 0xc7, 0x31, 0xa0, 0xbe, 0xc4, 0xc0, 0xad, 0x0e, - 0xc1, 0x16, 0x47, 0x60, 0xed, 0x2c, 0x84, 0xfc, 0x17, 0x1c, 0xd6, 0x8d, 0x3d, 0x9e, 0xbc, 0x12, - 0xee, 0xd4, 0xaa, 0xb5, 0x56, 0xdd, 0x5c, 0x5e, 0x1f, 0x29, 0xeb, 0xfa, 0xf3, 0x6e, 0x8c, 0x3c, - 0xf4, 0x55, 0x65, 0x32, 0xaa, 0x32, 0xeb, 0xc6, 0xed, 0xe4, 0x95, 0x20, 0x77, 0xa1, 0x92, 0x52, - 0x2e, 0xd0, 0x8b, 0xa8, 0x74, 0xaf, 0x15, 0x44, 0x3b, 0x1a, 0xb2, 0x47, 0xa5, 0x5a, 0x3b, 0x8d, - 0x28, 0xd3, 0xe8, 0x72, 0xd1, 0xb5, 0x15, 0x42, 0x81, 0xff, 0x0d, 0x65, 0xde, 0x35, 0x58, 0xa7, - 0x20, 0xf6, 0x1a, 0xef, 0x6a, 0xe8, 0x43, 0xa8, 0x0a, 0xe4, 0xa7, 0xa1, 0x6f, 0x36, 0x5e, 0x29, - 0x08, 0x87, 0x0c, 0xa4, 0x28, 0x1e, 0xc1, 0x74, 0x72, 0x8a, 0xfc, 0x25, 0xd2, 0x40, 0x73, 0x40, - 0x41, 0x8e, 0x6a, 0x8e, 0x52, 0x24, 0x3b, 0x30, 0x23, 0x90, 0x89, 0x50, 0x86, 0xa7, 0xe8, 0x85, - 0xac, 0x93, 0xb8, 0xd3, 0x9a, 0x66, 0xe5, 0x12, 0xcd, 0x7e, 0x1e, 0xb4, 0xc3, 0x3a, 0x49, 0xae, - 0x02, 0x31, 0x6c, 0x24, 0xf7, 0x01, 0x0e, 0xfb, 0x12, 0x85, 0xc7, 0x91, 0x06, 0xee, 0x6c, 0xc1, - 0xdd, 0x54, 0x34, 0xa6, 0x8d, 0x34, 0x50, 0x47, 0xa9, 0x34, 0x60, 0xf0, 0xf5, 0xa2, 0x47, 0xa9, - 0x20, 0x19, 0x1c, 0xb0, 0x87, 0xbe, 0x69, 0x4f, 0x77, 0x51, 0xe3, 0xdd, 0x4b, 0xf8, 0xc7, 0x3d, - 0xd4, 0x60, 0x91, 0xaf, 0x8e, 0xb9, 0x81, 0xfc, 0x15, 0x1c, 0x71, 0x12, 0x79, 0xb2, 0x9f, 0xa2, - 0x7b, 0x5d, 0x8b, 0x76, 0x56, 0x85, 0x9c, 0x9f, 0x35, 0xcb, 0xfb, 0x2f, 0xf6, 0x0e, 0xfa, 0x29, - 0xb6, 0xcb, 0xe2, 0x24, 0x52, 0x1f, 0xe4, 0x00, 0xe6, 0x8d, 0xd2, 0xd5, 0x7a, 0x83, 0x66, 0x76, - 0x97, 0xb2, 0x3d, 0x9b, 0x76, 0x5f, 0xcf, 0xdb, 0x7d, 0xfd, 0x20, 0x8f, 0xd8, 0x72, 0x14, 0xe5, - 0xeb, 0x1f, 0x9b, 0x56, 0x7b, 0x4e, 0x11, 0xa8, 0xed, 0x0c, 0x9c, 0x64, 0x01, 0xa6, 0x58, 0x12, - 0xa0, 0x70, 0xdd, 0xd5, 0xd2, 0x5a, 0xa9, 0x6d, 0x7e, 0xec, 0xda, 0x4e, 0xad, 0x3e, 0xb3, 0x6b, - 0x3b, 0x33, 0xf5, 0xd9, 0x5d, 0xdb, 0x99, 0xab, 0x93, 0x5d, 0xdb, 0x21, 0xf5, 0xf9, 0x5d, 0xdb, - 0x99, 0xaf, 0x2f, 0xec, 0xda, 0xce, 0x42, 0x7d, 0xb1, 0xf5, 0xb5, 0x0d, 0x8b, 0x07, 0x9c, 0x32, - 0x41, 0x7d, 0x19, 0x26, 0xac, 0xe0, 0x04, 0xb9, 0x34, 0x0b, 0x26, 0x3f, 0x31, 0x0b, 0x86, 0xdb, - 0xb3, 0x74, 0xd5, 0xf6, 0xbc, 0xa4, 0x73, 0xfb, 0x4f, 0xe8, 0x5c, 0xc9, 0x02, 0x25, 0xef, 0x6b, - 0x82, 0xa9, 0xc2, 0xb2, 0x50, 0x10, 0x05, 0xbf, 0x0f, 0xe0, 0x27, 0x71, 0x1c, 0xca, 0x2b, 0x4d, - 0x88, 0x8a, 0xc1, 0x64, 0x04, 0x43, 0xba, 0x2e, 0x7f, 0xa6, 0xae, 0x9d, 0xcf, 0xd4, 0x75, 0xe5, - 0x8a, 0xba, 0x6e, 0x7d, 0x39, 0x09, 0xb5, 0x91, 0xee, 0x25, 0x4d, 0x70, 0x06, 0x53, 0xdd, 0x1a, - 0x1a, 0xcf, 0xe5, 0x28, 0x9b, 0xe7, 0x21, 0xac, 0xc4, 0x89, 0x90, 0x1e, 0x47, 0x1f, 0x99, 0xf4, - 0xf4, 0x80, 0x0c, 0x50, 0xf8, 0x3c, 0x4c, 0x95, 0xbc, 0xb4, 0x52, 0xaa, 0x9b, 0xad, 0x8f, 0xf6, - 0x90, 0x46, 0x34, 0x64, 0x07, 0x1c, 0xf1, 0x7f, 0x11, 0x65, 0xcf, 0x93, 0x00, 0x33, 0xe2, 0x1b, - 0x8a, 0xad, 0xad, 0xc9, 0x94, 0x67, 0xfb, 0x82, 0x8a, 0x50, 0x58, 0xfe, 0x68, 0xa9, 0x8b, 0x86, - 0x2a, 0x5d, 0xa1, 0xa1, 0x96, 0x46, 0x17, 0x19, 0x84, 0xfc, 0xc7, 0xfe, 0xf0, 0x4d, 0xd3, 0x6a, - 0xb5, 0xa1, 0x3a, 0x54, 0x64, 0xe2, 0x82, 0x1d, 0x23, 0x65, 0x3a, 0x7f, 0x2b, 0xdb, 0xa6, 0xb6, - 0x90, 0x3b, 0x50, 0x13, 0x27, 0x5d, 0xca, 0x31, 0xf0, 0x82, 0xb0, 0xd3, 0x31, 0x7d, 0x91, 0x87, - 0x4c, 0x67, 0xae, 0x6d, 0xe5, 0x69, 0x7d, 0x3b, 0x09, 0xd7, 0xc7, 0x5c, 0xdb, 0x4f, 0xb1, 0xaf, - 0xfa, 0xee, 0xa4, 0x8b, 0xbc, 0x3f, 0x52, 0x60, 0x63, 0x22, 0xd7, 0xa1, 0x44, 0xd3, 0x54, 0xf3, - 0xe6, 0x1e, 0x65, 0x20, 0x0d, 0x28, 0x07, 0xa1, 0x90, 0xfb, 0x2f, 0xf6, 0x74, 0xe2, 0x4e, 0x7e, - 0x2c, 0x99, 0x91, 0xac, 0xc0, 0xb5, 0x0e, 0x0d, 0x23, 0x0c, 0x74, 0x1b, 0xe5, 0xee, 0xcc, 0xa6, - 0x58, 0x93, 0xd4, 0x34, 0x48, 0xee, 0x52, 0x06, 0x72, 0x1b, 0xa6, 0xc3, 0x38, 0x8d, 0x42, 0x3f, - 0x94, 0x9e, 0xec, 0x31, 0xdd, 0x01, 0x79, 0x40, 0x35, 0xf7, 0x1c, 0xf4, 0x98, 0x22, 0x38, 0x45, - 0x5f, 0x0b, 0x7c, 0x40, 0x70, 0x8a, 0x3e, 0xb9, 0x09, 0x95, 0x4e, 0x37, 0x8a, 0x3c, 0xe1, 0x53, - 0xa6, 0xe5, 0x9b, 0x7b, 0x1d, 0x65, 0xde, 0xf7, 0x29, 0x23, 0xab, 0xe0, 0x04, 0x54, 0xd2, 0x43, - 0x2a, 0x50, 0x0b, 0x34, 0x4f, 0x6b, 0x60, 0x6d, 0xfd, 0x60, 0xc1, 0xca, 0xa3, 0x24, 0x8a, 0x50, - 0xdd, 0xf8, 0xe3, 0x9e, 0x3a, 0x1b, 0x30, 0x19, 0x06, 0x3a, 0x6f, 0x7b, 0xab, 0x99, 0x0d, 0xde, - 0xc9, 0x9d, 0xed, 0x5f, 0xcf, 0x9a, 0x73, 0xfb, 0x32, 0x96, 0x4f, 0x42, 0x76, 0x84, 0x3c, 0xe5, - 0x21, 0x93, 0x3b, 0xdb, 0xed, 0xc9, 0x50, 0xb5, 0x45, 0xe9, 0x18, 0x4d, 0x7d, 0xab, 0x9b, 0xb7, - 0x2e, 0x5f, 0x57, 0x63, 0x4f, 0x25, 0xcf, 0xea, 0x18, 0xfb, 0xe4, 0x1e, 0x4c, 0x99, 0x86, 0x1a, - 0x2f, 0xe6, 0x31, 0x04, 0xf9, 0x21, 0x6a, 0x58, 0xeb, 0x83, 0x05, 0x8d, 0x41, 0x42, 0xe3, 0x67, - 0x6f, 0x04, 0x37, 0x44, 0x4e, 0xe3, 0x75, 0x2e, 0x12, 0xf0, 0xc2, 0x40, 0xb8, 0xd6, 0x6a, 0x69, - 0xcd, 0xde, 0xfa, 0xe7, 0xf9, 0x59, 0x73, 0x69, 0xb0, 0xd6, 0x48, 0x92, 0x62, 0x7c, 0xea, 0x4b, - 0x62, 0x5c, 0x74, 0x20, 0x3e, 0xa9, 0xaa, 0x07, 0x79, 0xa2, 0xa6, 0x99, 0xfe, 0x72, 0x29, 0xd1, - 0xb1, 0x5b, 0x1f, 0x4d, 0xf5, 0x17, 0x0b, 0xe6, 0xc7, 0x34, 0xb7, 0xea, 0x21, 0x46, 0x63, 0x1c, - 0x91, 0xb8, 0xb6, 0x90, 0xbb, 0x30, 0x45, 0xa5, 0xe4, 0xaa, 0xb8, 0xa5, 0xb5, 0xea, 0xe6, 0xed, - 0x3f, 0x9e, 0x14, 0xeb, 0x0f, 0xa5, 0xe4, 0x6d, 0x83, 0x22, 0xf7, 0xc0, 0xf1, 0x5f, 0x86, 0x51, - 0xc0, 0x91, 0xb9, 0x25, 0xcd, 0x50, 0x60, 0xd6, 0xb4, 0x07, 0x98, 0xe5, 0x07, 0x60, 0x2b, 0x3a, - 0x55, 0x92, 0x5c, 0x22, 0x95, 0xe1, 0xb3, 0x5f, 0x86, 0xa9, 0x53, 0x1a, 0x75, 0x71, 0xa4, 0x58, - 0xc6, 0x64, 0xa6, 0x45, 0x36, 0x33, 0xbe, 0xb7, 0xc0, 0x39, 0xe8, 0x31, 0xf3, 0x3e, 0xb8, 0x09, - 0x15, 0xd9, 0x63, 0xde, 0xc7, 0xb7, 0xa9, 0x23, 0x7b, 0xcc, 0x3c, 0xae, 0xb7, 0x60, 0x5a, 0x85, - 0xa8, 0xf9, 0xe5, 0x09, 0xf4, 0x33, 0x69, 0x15, 0xb8, 0xed, 0x64, 0x4f, 0xcf, 0xab, 0x7d, 0xf4, - 0xc9, 0x3f, 0x60, 0xd6, 0x5c, 0x3d, 0x12, 0x83, 0x6c, 0xb1, 0xe1, 0x47, 0xfa, 0xcc, 0xc0, 0x69, - 0x96, 0xfc, 0x1b, 0xcc, 0x0c, 0xba, 0xdb, 0x44, 0xdb, 0x43, 0xd1, 0xb5, 0xdc, 0xa7, 0x83, 0x5b, - 0x5f, 0x95, 0xa0, 0x32, 0xb8, 0x29, 0x7e, 0xf7, 0x69, 0xf0, 0x18, 0x6a, 0x0c, 0xe5, 0xab, 0x84, - 0x1f, 0x7b, 0xfa, 0x1e, 0x2b, 0x9c, 0xca, 0x74, 0x06, 0xdb, 0x52, 0x28, 0xb2, 0x0d, 0x35, 0xf5, - 0xc2, 0x88, 0x31, 0xf6, 0xba, 0x82, 0x1e, 0x61, 0xe1, 0xf7, 0x83, 0x7a, 0x98, 0x3c, 0xc3, 0xf8, - 0xff, 0x0a, 0x44, 0x76, 0x54, 0x49, 0x98, 0x44, 0xa6, 0x44, 0xaa, 0xab, 0x5b, 0xf8, 0x1d, 0x31, - 0x73, 0x01, 0x54, 0x05, 0x26, 0x4f, 0xa1, 0x9e, 0xe7, 0x15, 0xa3, 0x50, 0xec, 0xc5, 0xff, 0x72, - 0xcc, 0x66, 0xc8, 0x67, 0x19, 0x90, 0x3c, 0x81, 0x19, 0x95, 0x5d, 0x10, 0x8a, 0xe3, 0x2c, 0xbd, - 0xa2, 0xaf, 0x8b, 0xe9, 0x98, 0xf6, 0xb6, 0x43, 0x71, 0xac, 0xf3, 0xdb, 0xba, 0xf3, 0xe6, 0xe7, - 0xc6, 0xc4, 0x9b, 0xf3, 0x86, 0xf5, 0xf6, 0xbc, 0x61, 0xbd, 0x3b, 0x6f, 0x58, 0x3f, 0x9d, 0x37, - 0xac, 0xd7, 0xef, 0x1b, 0x13, 0x6f, 0xdf, 0x37, 0x26, 0xde, 0xbd, 0x6f, 0x4c, 0x7c, 0x51, 0xce, - 0xfe, 0x7e, 0xfe, 0x16, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x9a, 0xba, 0xe4, 0x88, 0x0e, 0x00, 0x00, + // 1436 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x3d, 0x6f, 0x1b, 0x47, + 0x13, 0x16, 0xc9, 0x93, 0x79, 0x1c, 0x92, 0x12, 0xb5, 0x92, 0xac, 0xb3, 0x20, 0x90, 0x32, 0xf1, + 0x1a, 0x96, 0xdf, 0xf7, 0x8d, 0x14, 0x08, 0x69, 0x92, 0xc0, 0x5f, 0xb2, 0x6c, 0x44, 0xb2, 0x6c, + 0xc4, 0x14, 0xd3, 0xa4, 0x39, 0xac, 0xee, 0x86, 0xd4, 0x41, 0xf7, 0xa5, 0xdd, 0xa5, 0x4c, 0xf6, + 0xf9, 0x01, 0x2e, 0xd2, 0xb8, 0xcb, 0x7f, 0x48, 0x9d, 0xde, 0x40, 0x10, 0xc0, 0xa5, 0x2b, 0x25, + 0x91, 0x9b, 0xfc, 0x82, 0x14, 0xa9, 0x82, 0xdd, 0xbd, 0xa3, 0x48, 0x99, 0x4e, 0x4e, 0x31, 0xd2, + 0x91, 0x33, 0xf3, 0x3c, 0xbb, 0x3b, 0xfb, 0xcc, 0xec, 0x1c, 0x2c, 0xb1, 0x88, 0x3a, 0x87, 0xf1, + 0xc1, 0x06, 0x8d, 0x63, 0x9b, 0x0b, 0x2a, 0xf8, 0x7a, 0xcc, 0x22, 0x11, 0x91, 0xaa, 0x13, 0x39, + 0x47, 0xca, 0xb9, 0xce, 0x8f, 0xfd, 0xe5, 0x85, 0x6e, 0xd4, 0x8d, 0x94, 0x67, 0x43, 0xfe, 0xd2, + 0x41, 0xcb, 0x8d, 0x6e, 0x14, 0x75, 0x7d, 0xdc, 0x50, 0xff, 0x0e, 0x7a, 0x9d, 0x0d, 0xe1, 0x05, + 0xc8, 0x05, 0x0d, 0x62, 0x1d, 0xd0, 0xfc, 0xc1, 0x84, 0xf9, 0x7d, 0x41, 0x05, 0x06, 0x18, 0x0a, + 0xf9, 0xc3, 0xe3, 0xc2, 0x73, 0x38, 0x59, 0x86, 0x69, 0x27, 0xea, 0x85, 0xc2, 0xca, 0xad, 0xe6, + 0xd6, 0x0a, 0x5b, 0xc6, 0xab, 0xd3, 0xc6, 0x54, 0x4b, 0x9b, 0xc8, 0x27, 0x30, 0xdf, 0xf1, 0x18, + 0x17, 0x36, 0x15, 0x02, 0x83, 0x58, 0xd8, 0x3a, 0x32, 0x3f, 0x12, 0x39, 0xa7, 0x02, 0xee, 0x6b, + 0xff, 0x03, 0x85, 0xba, 0x01, 0xe5, 0x80, 0xf6, 0x6d, 0x86, 0x82, 0x79, 0xc8, 0xad, 0xc2, 0x48, + 0x34, 0x04, 0xb4, 0xdf, 0xd2, 0x76, 0xf2, 0x7f, 0x98, 0xf5, 0xb1, 0x4b, 0x9d, 0x81, 0xed, 0x53, + 0x2e, 0x6c, 0x64, 0xcc, 0x32, 0x56, 0x73, 0x6b, 0xa5, 0x24, 0xb4, 0xaa, 0x9d, 0x7b, 0x94, 0x8b, + 0x87, 0x8c, 0x91, 0xdb, 0x60, 0x5d, 0x88, 0xb6, 0x19, 0xba, 0xd4, 0x11, 0xe8, 0x5a, 0xe5, 0x11, + 0xd8, 0xe2, 0x18, 0xac, 0x95, 0x84, 0x90, 0xcf, 0xc1, 0x0c, 0x7b, 0x81, 0xcd, 0xa2, 0xe7, 0xdc, + 0x9a, 0x5e, 0xcd, 0xad, 0x95, 0x37, 0x97, 0xd7, 0xc7, 0xd2, 0xba, 0xfe, 0xb4, 0x17, 0x20, 0xf3, + 0x1c, 0x99, 0x99, 0x84, 0xaa, 0x18, 0xf6, 0x82, 0x56, 0xf4, 0x9c, 0x93, 0xdb, 0x50, 0x8a, 0x29, + 0xe3, 0x68, 0xfb, 0x54, 0x58, 0x57, 0x32, 0xa2, 0x4d, 0x05, 0xd9, 0xa3, 0x42, 0xae, 0x1d, 0xfb, + 0x34, 0x54, 0xe8, 0x62, 0xd6, 0xb5, 0x25, 0x42, 0x82, 0x3f, 0x85, 0x22, 0xeb, 0x69, 0xac, 0x99, + 0x11, 0x7b, 0x85, 0xf5, 0x14, 0xf4, 0x3e, 0x94, 0x39, 0xb2, 0x13, 0xcf, 0xd1, 0x1b, 0x2f, 0x65, + 0x84, 0x43, 0x02, 0x92, 0x14, 0x0f, 0xa0, 0x12, 0x9d, 0x20, 0x3b, 0x44, 0xea, 0x2a, 0x0e, 0xc8, + 0xc8, 0x51, 0x4e, 0x51, 0x92, 0x64, 0x07, 0x66, 0x38, 0x86, 0xdc, 0x13, 0xde, 0x09, 0xda, 0x5e, + 0xd8, 0x89, 0xac, 0x8a, 0xa2, 0x59, 0xb9, 0x40, 0xb3, 0x9f, 0x06, 0xed, 0x84, 0x9d, 0x28, 0x55, + 0x01, 0x1f, 0x35, 0x92, 0xbb, 0x00, 0x07, 0x03, 0x81, 0xdc, 0x66, 0x48, 0x5d, 0x6b, 0x36, 0xe3, + 0x6e, 0x4a, 0x0a, 0xd3, 0x42, 0xea, 0xca, 0xab, 0x94, 0x1a, 0xd0, 0xf8, 0x5a, 0xd6, 0xab, 0x94, + 0x90, 0x04, 0x0e, 0xd8, 0x47, 0x47, 0x97, 0xa7, 0xb5, 0xa8, 0xf0, 0xd6, 0x05, 0xfc, 0xc3, 0x3e, + 0x2a, 0x30, 0x4f, 0x57, 0xc7, 0xd4, 0x40, 0xfe, 0x0b, 0x26, 0x3f, 0xf6, 0x6d, 0x31, 0x88, 0xd1, + 0xba, 0xaa, 0x44, 0x3b, 0x2b, 0x43, 0xce, 0x4e, 0x1b, 0xc5, 0xfd, 0x67, 0x7b, 0xed, 0x41, 0x8c, + 0xad, 0x22, 0x3f, 0xf6, 0xe5, 0x0f, 0xd2, 0x86, 0x79, 0xad, 0x74, 0xb9, 0xde, 0xb0, 0x98, 0xad, + 0xa5, 0x64, 0xcf, 0xba, 0xdc, 0xd7, 0xd3, 0x72, 0x5f, 0x6f, 0xa7, 0x11, 0x5b, 0xa6, 0xa4, 0x7c, + 0xf1, 0x73, 0x23, 0xd7, 0x9a, 0x93, 0x04, 0x72, 0x3b, 0x43, 0x27, 0x59, 0x80, 0xe9, 0x30, 0x72, + 0x91, 0x5b, 0xd6, 0x6a, 0x61, 0xad, 0xd0, 0xd2, 0x7f, 0x76, 0x0d, 0xb3, 0x5a, 0x9b, 0xd9, 0x35, + 0xcc, 0x99, 0xda, 0xec, 0xae, 0x61, 0xce, 0xd5, 0xc8, 0xae, 0x61, 0x92, 0xda, 0xfc, 0xae, 0x61, + 0xce, 0xd7, 0x16, 0x76, 0x0d, 0x73, 0xa1, 0xb6, 0xd8, 0x7c, 0x69, 0xc0, 0x62, 0x9b, 0xd1, 0x90, + 0x53, 0x47, 0x78, 0x51, 0x98, 0xb1, 0x83, 0x5c, 0xe8, 0x05, 0xf9, 0xf7, 0xf4, 0x82, 0xd1, 0xf2, + 0x2c, 0x5c, 0xb6, 0x3c, 0x2f, 0xe8, 0xdc, 0xf8, 0x07, 0x3a, 0x97, 0xb2, 0x40, 0xc1, 0x06, 0x8a, + 0x60, 0x3a, 0xb3, 0x2c, 0x24, 0x44, 0xc2, 0xef, 0x02, 0x38, 0x51, 0x10, 0x78, 0xe2, 0x52, 0x1d, + 0xa2, 0xa4, 0x31, 0x09, 0xc1, 0x88, 0xae, 0x8b, 0x1f, 0xa8, 0x6b, 0xf3, 0x03, 0x75, 0x5d, 0xba, + 0xa4, 0xae, 0x9b, 0xdf, 0xe4, 0xa1, 0x3a, 0x56, 0xbd, 0xa4, 0x01, 0xe6, 0xb0, 0xab, 0xe7, 0x46, + 0xda, 0x73, 0xd1, 0x4f, 0xfa, 0xb9, 0x07, 0x2b, 0x41, 0xc4, 0x85, 0xcd, 0xd0, 0xc1, 0x50, 0xd8, + 0xaa, 0x41, 0xba, 0xc8, 0x1d, 0xe6, 0xc5, 0x52, 0x5e, 0x4a, 0x29, 0xe5, 0xcd, 0xe6, 0x3b, 0x7b, + 0x88, 0x7d, 0xea, 0x85, 0x6d, 0x86, 0xf8, 0xa5, 0x4f, 0xc3, 0xa7, 0x91, 0x8b, 0x09, 0xf1, 0x35, + 0xc9, 0xd6, 0x52, 0x64, 0xd2, 0xb3, 0x7d, 0x4e, 0x45, 0x28, 0x2c, 0xbf, 0xb3, 0xd4, 0x79, 0x41, + 0x15, 0x2e, 0x51, 0x50, 0x4b, 0xe3, 0x8b, 0x0c, 0x43, 0x3e, 0x33, 0x7e, 0xfb, 0xae, 0x91, 0x6b, + 0xb6, 0xa0, 0x3c, 0x92, 0x64, 0x62, 0x81, 0x11, 0x20, 0x0d, 0xd5, 0xf9, 0x73, 0xc9, 0x36, 0x95, + 0x85, 0xdc, 0x82, 0x2a, 0x3f, 0xee, 0x51, 0x86, 0xae, 0xed, 0x7a, 0x9d, 0x8e, 0xae, 0x8b, 0x34, + 0xa4, 0x92, 0xb8, 0xb6, 0xa5, 0xa7, 0xf9, 0x63, 0x1e, 0xae, 0x4e, 0x78, 0xb6, 0x1f, 0xe3, 0x40, + 0xd6, 0xdd, 0x71, 0x0f, 0xd9, 0x60, 0x2c, 0xc1, 0xda, 0x44, 0xae, 0x42, 0x81, 0xc6, 0xb1, 0xe2, + 0x4d, 0x3d, 0xd2, 0x40, 0xea, 0x50, 0x74, 0x3d, 0x2e, 0xf6, 0x9f, 0xed, 0xa9, 0x83, 0x9b, 0xe9, + 0xb5, 0x24, 0x46, 0xb2, 0x02, 0x57, 0x3a, 0xd4, 0xf3, 0xd1, 0x55, 0x65, 0x94, 0xba, 0x13, 0x9b, + 0x64, 0x8d, 0x62, 0x5d, 0x20, 0xa9, 0x4b, 0x1a, 0xc8, 0x4d, 0xa8, 0x78, 0x41, 0xec, 0x7b, 0x8e, + 0x27, 0x6c, 0xd1, 0x0f, 0x55, 0x05, 0xa4, 0x01, 0xe5, 0xd4, 0xd3, 0xee, 0x87, 0x92, 0xe0, 0x04, + 0x1d, 0x25, 0xf0, 0x21, 0xc1, 0x09, 0x3a, 0xe4, 0x3a, 0x94, 0x3a, 0x3d, 0xdf, 0xb7, 0xb9, 0x43, + 0x43, 0x25, 0xdf, 0xd4, 0x6b, 0x4a, 0xf3, 0xbe, 0x43, 0x43, 0xb2, 0x0a, 0xa6, 0x4b, 0x05, 0x3d, + 0xa0, 0x1c, 0x95, 0x40, 0xd3, 0x63, 0x0d, 0xad, 0x92, 0x44, 0xdd, 0xed, 0x21, 0xe5, 0x87, 0xea, + 0xa5, 0x32, 0x86, 0x4f, 0xb1, 0x4f, 0xc3, 0x2f, 0x28, 0x3f, 0x6c, 0xbe, 0xcc, 0xc3, 0xca, 0x83, + 0xc8, 0xf7, 0x51, 0x0e, 0x05, 0x93, 0xa6, 0xa1, 0x0d, 0xc8, 0x7b, 0xae, 0x4a, 0x8d, 0xb1, 0xd5, + 0x48, 0x7a, 0x73, 0x7e, 0x67, 0xfb, 0x8f, 0xd3, 0xc6, 0xdc, 0xbe, 0x08, 0xc4, 0x23, 0x2f, 0xec, + 0x22, 0x8b, 0x99, 0x17, 0x8a, 0x9d, 0xed, 0x56, 0xde, 0x93, 0x95, 0x53, 0x38, 0x42, 0x7d, 0x05, + 0xe5, 0xcd, 0x1b, 0x17, 0x5f, 0xb4, 0x89, 0x17, 0x97, 0x1e, 0xfc, 0x08, 0x07, 0xe4, 0x0e, 0x4c, + 0xeb, 0x9a, 0x9b, 0xac, 0xf7, 0x09, 0x04, 0xe9, 0x3d, 0x2b, 0x18, 0xd9, 0x81, 0x2a, 0xed, 0x76, + 0x19, 0x76, 0xa9, 0x40, 0xd7, 0x16, 0x7c, 0xd8, 0xfd, 0xb2, 0xc8, 0xb9, 0x72, 0x0e, 0x6d, 0xf3, + 0xe6, 0xf7, 0x79, 0xa8, 0x0f, 0x73, 0x33, 0xb9, 0xd3, 0xfb, 0x70, 0x8d, 0xa7, 0x3b, 0xb2, 0x3b, + 0xe7, 0xb9, 0xb0, 0x3d, 0x97, 0x5b, 0xb9, 0xd5, 0xc2, 0x9a, 0xb1, 0xf5, 0xf1, 0xd9, 0x69, 0x63, + 0x69, 0xb8, 0xed, 0xb1, 0x7c, 0xf1, 0xc9, 0x59, 0x5c, 0xe2, 0x93, 0xa2, 0x5d, 0xfe, 0x5e, 0x0d, + 0xdf, 0x4b, 0x73, 0xa6, 0x4b, 0xf7, 0x3f, 0x17, 0x72, 0x36, 0x71, 0xeb, 0xff, 0x5a, 0xd6, 0x7e, + 0xcf, 0xc1, 0xfc, 0x84, 0xae, 0x24, 0x8b, 0x3f, 0xa4, 0x01, 0x8e, 0xd5, 0xa6, 0xb2, 0x90, 0xdb, + 0x30, 0x4d, 0x85, 0x60, 0xf2, 0xca, 0x0b, 0x6b, 0xe5, 0xcd, 0x9b, 0x7f, 0xdf, 0xe2, 0xd6, 0xef, + 0x0b, 0xc1, 0x5a, 0x1a, 0x45, 0xee, 0x80, 0xe9, 0x1c, 0x7a, 0xbe, 0xcb, 0x30, 0xb4, 0x0a, 0x8a, + 0x21, 0x43, 0x93, 0x6c, 0x0d, 0x31, 0xcb, 0xf7, 0xc0, 0x90, 0x74, 0x32, 0xbb, 0xa9, 0x70, 0x4b, + 0xa3, 0x8a, 0x5c, 0x86, 0xe9, 0x13, 0xea, 0xf7, 0x70, 0x2c, 0xef, 0xda, 0xa4, 0xdb, 0x5c, 0xd2, + 0xec, 0x7e, 0xca, 0x81, 0xd9, 0xee, 0x87, 0x7a, 0xb0, 0xb9, 0x0e, 0x25, 0xd1, 0x0f, 0xed, 0x77, + 0xc7, 0x00, 0x53, 0xf4, 0x43, 0xfd, 0x55, 0xb0, 0x05, 0x15, 0x19, 0x22, 0x1b, 0xaf, 0xcd, 0xd1, + 0x49, 0x04, 0x9f, 0xe1, 0x99, 0x16, 0x7d, 0xd5, 0x68, 0xf7, 0xd1, 0x21, 0x1f, 0xc1, 0xac, 0x7e, + 0x33, 0xe5, 0xb5, 0xe9, 0xc5, 0x46, 0xbf, 0x2e, 0x66, 0x86, 0x4e, 0xbd, 0xe4, 0xff, 0x60, 0x66, + 0xd8, 0x96, 0x74, 0xb4, 0x31, 0x12, 0x5d, 0x4d, 0x7d, 0x2a, 0xb8, 0xf9, 0x6d, 0x01, 0x4a, 0xc3, + 0x27, 0xee, 0x2f, 0x67, 0x9a, 0x87, 0x50, 0x0d, 0x51, 0x3c, 0x8f, 0xd8, 0x91, 0xad, 0x1e, 0xe0, + 0xcc, 0x47, 0xa9, 0x24, 0xb0, 0x2d, 0x89, 0x22, 0xdb, 0x50, 0x95, 0xa3, 0x51, 0x80, 0x81, 0xdd, + 0xe3, 0xb4, 0x8b, 0x99, 0x07, 0x1f, 0x39, 0x51, 0x3d, 0xc1, 0xe0, 0x2b, 0x09, 0x22, 0x3b, 0x32, + 0x25, 0xa1, 0xc0, 0x50, 0xea, 0x5d, 0x65, 0x37, 0xf3, 0x00, 0x34, 0x73, 0x0e, 0x94, 0x09, 0x26, + 0x8f, 0xa1, 0x96, 0x9e, 0x2b, 0x40, 0x2e, 0xd9, 0xb3, 0x7f, 0x2b, 0xcd, 0x26, 0xc8, 0x27, 0x09, + 0x90, 0x3c, 0x82, 0x19, 0x79, 0x3a, 0xd7, 0xe3, 0x47, 0xc9, 0xf1, 0xb2, 0x8e, 0x45, 0x95, 0x80, + 0xf6, 0xb7, 0x3d, 0x7e, 0xa4, 0xce, 0xb7, 0x75, 0xeb, 0xd5, 0xaf, 0xf5, 0xa9, 0x57, 0x67, 0xf5, + 0xdc, 0xeb, 0xb3, 0x7a, 0xee, 0xcd, 0x59, 0x3d, 0xf7, 0xcb, 0x59, 0x3d, 0xf7, 0xe2, 0x6d, 0x7d, + 0xea, 0xf5, 0xdb, 0xfa, 0xd4, 0x9b, 0xb7, 0xf5, 0xa9, 0xaf, 0x8b, 0xc9, 0x77, 0xf3, 0x9f, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xf6, 0x90, 0x8c, 0x85, 0x41, 0x0f, 0x00, 0x00, } func (this *SensitiveInfo) Equal(that interface{}) bool { @@ -1121,6 +1127,9 @@ func (m *StatementStatisticsKey) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + i = encodeVarintAppStats(dAtA, i, uint64(m.PlanHash)) + i-- + dAtA[i] = 0x50 i -= len(m.Database) copy(dAtA[i:], m.Database) i = encodeVarintAppStats(dAtA, i, uint64(len(m.Database))) @@ -1207,6 +1216,14 @@ func (m *CollectedStatementStatistics) MarshalToSizedBuffer(dAtA []byte) (int, e _ = i var l int _ = l + n21, err21 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.AggregatedTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.AggregatedTs):]) + if err21 != nil { + return 0, err21 + } + i -= n21 + i = encodeVarintAppStats(dAtA, i, uint64(n21)) + i-- + dAtA[i] = 0x22 i = encodeVarintAppStats(dAtA, i, uint64(m.ID)) i-- dAtA[i] = 0x18 @@ -1253,6 +1270,14 @@ func (m *CollectedTransactionStatistics) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + n24, err24 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.AggregatedTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.AggregatedTs):]) + if err24 != nil { + return 0, err24 + } + i -= n24 + i = encodeVarintAppStats(dAtA, i, uint64(n24)) + i-- + dAtA[i] = 0x22 { size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1610,6 +1635,7 @@ func (m *StatementStatisticsKey) Size() (n int) { n += 2 l = len(m.Database) n += 1 + l + sovAppStats(uint64(l)) + n += 1 + sovAppStats(uint64(m.PlanHash)) return n } @@ -1624,6 +1650,8 @@ func (m *CollectedStatementStatistics) Size() (n int) { l = m.Stats.Size() n += 1 + l + sovAppStats(uint64(l)) n += 1 + sovAppStats(uint64(m.ID)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.AggregatedTs) + n += 1 + l + sovAppStats(uint64(l)) return n } @@ -1642,6 +1670,8 @@ func (m *CollectedTransactionStatistics) Size() (n int) { n += 1 + l + sovAppStats(uint64(l)) l = m.Stats.Size() n += 1 + l + sovAppStats(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.AggregatedTs) + n += 1 + l + sovAppStats(uint64(l)) return n } @@ -3147,6 +3177,25 @@ func (m *StatementStatisticsKey) Unmarshal(dAtA []byte) error { } m.Database = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PlanHash", wireType) + } + m.PlanHash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAppStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PlanHash |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipAppStats(dAtA[iNdEx:]) @@ -3282,6 +3331,39 @@ func (m *CollectedStatementStatistics) Unmarshal(dAtA []byte) error { break } } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregatedTs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAppStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAppStats + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAppStats + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.AggregatedTs, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipAppStats(dAtA[iNdEx:]) @@ -3473,6 +3555,39 @@ func (m *CollectedTransactionStatistics) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregatedTs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAppStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAppStats + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAppStats + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.AggregatedTs, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipAppStats(dAtA[iNdEx:]) diff --git a/pkg/roachpb/app_stats.proto b/pkg/roachpb/app_stats.proto index 261092410f9b..7aefad841ae5 100644 --- a/pkg/roachpb/app_stats.proto +++ b/pkg/roachpb/app_stats.proto @@ -179,6 +179,7 @@ message StatementStatisticsKey { optional bool vec = 7 [(gogoproto.nullable) = false]; optional bool full_scan = 8 [(gogoproto.nullable) = false]; optional string database = 9 [(gogoproto.nullable) = false]; + optional uint64 plan_hash = 10 [(gogoproto.nullable) = false]; } // CollectedStatementStatistics wraps collected timings and metadata for some @@ -190,6 +191,7 @@ message CollectedStatementStatistics { optional uint64 id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID", (gogoproto.casttype) = "StmtFingerprintID"]; optional StatementStatisticsKey key = 1 [(gogoproto.nullable) = false]; optional StatementStatistics stats = 2 [(gogoproto.nullable) = false]; + optional google.protobuf.Timestamp aggregated_ts = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; } @@ -202,6 +204,7 @@ message CollectedTransactionStatistics { // App is the name of the app which executed the transaction. optional string app = 2 [(gogoproto.nullable) = false]; optional TransactionStatistics stats = 3 [(gogoproto.nullable) = false]; + optional google.protobuf.Timestamp aggregated_ts = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; } diff --git a/pkg/sql/catalog/dbdesc/BUILD.bazel b/pkg/sql/catalog/dbdesc/BUILD.bazel index 0cfee769f6b7..548e17dacb22 100644 --- a/pkg/sql/catalog/dbdesc/BUILD.bazel +++ b/pkg/sql/catalog/dbdesc/BUILD.bazel @@ -38,6 +38,7 @@ go_test( "//pkg/sql/parser", "//pkg/sql/sem/tree", "//pkg/util/leaktest", + "//pkg/util/log", "@com_github_cockroachdb_redact//:redact", "@com_github_stretchr_testify//require", "@in_gopkg_yaml_v2//:yaml_v2", diff --git a/pkg/sql/catalog/dbdesc/database_desc.go b/pkg/sql/catalog/dbdesc/database_desc.go index b9132c3e0202..c36f3b32f3f9 100644 --- a/pkg/sql/catalog/dbdesc/database_desc.go +++ b/pkg/sql/catalog/dbdesc/database_desc.go @@ -443,3 +443,18 @@ func (desc *Mutable) SetDefaultPrivilegeDescriptor( ) { desc.DefaultPrivileges = defaultPrivilegeDescriptor } + +// maybeRemoveDroppedSelfEntryFromSchemas removes an entry in the Schemas map corresponding to the +// database itself which was added due to a bug in prior versions when dropping any user-defined schema. +// The bug inserted an entry for the database rather than the schema being dropped. This function fixes the +// problem by deleting the erroneous entry. +func maybeRemoveDroppedSelfEntryFromSchemas(dbDesc *descpb.DatabaseDescriptor) bool { + if dbDesc == nil { + return false + } + if sc, ok := dbDesc.Schemas[dbDesc.Name]; ok && sc.ID == dbDesc.ID { + delete(dbDesc.Schemas, dbDesc.Name) + return true + } + return false +} diff --git a/pkg/sql/catalog/dbdesc/database_desc_builder.go b/pkg/sql/catalog/dbdesc/database_desc_builder.go index 85c8009b3513..1944fd099b51 100644 --- a/pkg/sql/catalog/dbdesc/database_desc_builder.go +++ b/pkg/sql/catalog/dbdesc/database_desc_builder.go @@ -58,8 +58,10 @@ func (ddb *databaseDescriptorBuilder) RunPostDeserializationChanges( _ context.Context, _ catalog.DescGetter, ) error { ddb.maybeModified = protoutil.Clone(ddb.original).(*descpb.DatabaseDescriptor) - ddb.changed = descpb.MaybeFixPrivileges(ddb.maybeModified.ID, ddb.maybeModified.ID, + privsChanged := descpb.MaybeFixPrivileges(ddb.maybeModified.ID, ddb.maybeModified.ID, &ddb.maybeModified.Privileges, privilege.Database) + removedSelfEntryInSchemas := maybeRemoveDroppedSelfEntryFromSchemas(ddb.maybeModified) + ddb.changed = privsChanged || removedSelfEntryInSchemas return nil } diff --git a/pkg/sql/catalog/dbdesc/database_test.go b/pkg/sql/catalog/dbdesc/database_test.go index bf6ad1b2e46d..fb030982c550 100644 --- a/pkg/sql/catalog/dbdesc/database_test.go +++ b/pkg/sql/catalog/dbdesc/database_test.go @@ -23,6 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/redact" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -297,3 +298,30 @@ func TestValidateCrossDatabaseReferences(t *testing.T) { } } } + +// TestFixDroppedSchemaName tests fixing a corrupted descriptor as part of +// RunPostDeserializationChanges. It tests for a particular corruption that +// happened when a schema was dropped that had the same name as its parent +// database name. +func TestFixDroppedSchemaName(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + const ( + dbName = "foo" + dbID = 1 + ) + dbDesc := descpb.DatabaseDescriptor{ + Name: dbName, + ID: dbID, + Schemas: map[string]descpb.DatabaseDescriptor_SchemaInfo{ + dbName: {ID: dbID, Dropped: true}, + }, + } + b := NewBuilder(&dbDesc) + require.NoError(t, b.RunPostDeserializationChanges(ctx, nil)) + desc := b.BuildCreatedMutableDatabase() + require.Truef(t, desc.HasPostDeserializationChanges(), "expected changes in descriptor, found none") + _, ok := desc.Schemas[dbName] + require.Falsef(t, ok, "erroneous entry exists") +} diff --git a/pkg/sql/colexec/BUILD.bazel b/pkg/sql/colexec/BUILD.bazel index 660d6098a276..bd6c08d3ed4f 100644 --- a/pkg/sql/colexec/BUILD.bazel +++ b/pkg/sql/colexec/BUILD.bazel @@ -82,6 +82,7 @@ go_library( "//pkg/util/tracing", "@com_github_cockroachdb_apd_v2//:apd", # keep "@com_github_cockroachdb_errors//:errors", + "@com_github_cockroachdb_redact//:redact", "@com_github_marusama_semaphore//:semaphore", ], ) diff --git a/pkg/sql/colexec/colbuilder/execplan.go b/pkg/sql/colexec/colbuilder/execplan.go index 98408c7f089d..9949b7d8c3e7 100644 --- a/pkg/sql/colexec/colbuilder/execplan.go +++ b/pkg/sql/colexec/colbuilder/execplan.go @@ -555,9 +555,9 @@ func (r opResult) createAndWrapRowSource( if args.ProcessorConstructor == nil { return errors.New("processorConstructor is nil") } - log.VEventf(ctx, 1, "planning a row-execution processor in the vectorized flow because %v", causeToWrap) + log.VEventf(ctx, 1, "planning a row-execution processor in the vectorized flow: %v", causeToWrap) if err := canWrap(flowCtx.EvalCtx.SessionData.VectorizeMode, spec); err != nil { - log.VEventf(ctx, 1, "planning a wrapped processor failed because %v", err) + log.VEventf(ctx, 1, "planning a wrapped processor failed: %v", err) // Return the original error for why we don't support this spec // natively since it is more interesting. return causeToWrap @@ -698,7 +698,7 @@ func NewColOperator( } result.OpMonitors = result.OpMonitors[:0] if returnedErr != nil { - log.VEventf(ctx, 1, "vectorized planning failed with %v", returnedErr) + log.VEventf(ctx, 1, "vectorized planning failed: %v", returnedErr) } } if panicErr != nil { @@ -1536,7 +1536,7 @@ func NewColOperator( streamingAllocator, r.Root, i, castedIdx, actual, expected, evalCtx, ) if err != nil { - return r, errors.AssertionFailedf("unexpectedly couldn't plan a cast although IsCastSupported returned true: %v", err) + return r, errors.NewAssertionErrorWithWrappedErrf(err, "unexpectedly couldn't plan a cast although IsCastSupported returned true") } projection[i] = uint32(castedIdx) typesWithCasts = append(typesWithCasts, expected) diff --git a/pkg/sql/colexec/hash_based_partitioner.go b/pkg/sql/colexec/hash_based_partitioner.go index 67bdc0f52f31..32f1ec704f77 100644 --- a/pkg/sql/colexec/hash_based_partitioner.go +++ b/pkg/sql/colexec/hash_based_partitioner.go @@ -29,6 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/errors" + "github.com/cockroachdb/redact" "github.com/marusama/semaphore" ) @@ -120,7 +121,7 @@ type hashBasedPartitioner struct { colexecop.CloserHelper unlimitedAllocator *colmem.Allocator - name string + name redact.SafeString state hashBasedPartitionerState inputs []colexecop.Operator inputTypes [][]*types.T @@ -209,7 +210,7 @@ func newHashBasedPartitioner( unlimitedAllocator *colmem.Allocator, flowCtx *execinfra.FlowCtx, args *colexecargs.NewColOperatorArgs, - name string, + name redact.SafeString, inputs []colexecop.Operator, inputTypes [][]*types.T, hashCols [][]uint32, @@ -536,7 +537,7 @@ StateChanged: if partitionInfo.memSize <= op.maxPartitionSizeToProcessUsingMain { log.VEventf(op.Ctx, 2, `%s processes partition with idx %d of size %s using the "main" strategy`, - op.name, partitionIdx, humanizeutil.IBytes(partitionInfo.memSize), + op.name, partitionIdx, redact.SafeString(humanizeutil.IBytes(partitionInfo.memSize)), ) for i := range op.partitionedInputs { op.partitionedInputs[i].partitionIdx = partitionIdx diff --git a/pkg/sql/colflow/colrpc/BUILD.bazel b/pkg/sql/colflow/colrpc/BUILD.bazel index 4ecb86d9aaa1..fe333a017115 100644 --- a/pkg/sql/colflow/colrpc/BUILD.bazel +++ b/pkg/sql/colflow/colrpc/BUILD.bazel @@ -31,6 +31,7 @@ go_library( "@com_github_apache_arrow_go_arrow//array", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_logtags//:logtags", + "@com_github_cockroachdb_redact//:redact", ], ) diff --git a/pkg/sql/colflow/colrpc/outbox.go b/pkg/sql/colflow/colrpc/outbox.go index fae781a7f4de..be83658ccb4e 100644 --- a/pkg/sql/colflow/colrpc/outbox.go +++ b/pkg/sql/colflow/colrpc/outbox.go @@ -13,7 +13,6 @@ package colrpc import ( "bytes" "context" - "fmt" "io" "sync/atomic" "time" @@ -32,6 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" "github.com/cockroachdb/logtags" + "github.com/cockroachdb/redact" ) // flowStreamClient is a utility interface used to mock out the RPC layer. @@ -222,7 +222,10 @@ func (o *Outbox) Run( // called, for all other errors flowCtxCancel is. The given error is logged with // the associated opName. func handleStreamErr( - ctx context.Context, opName string, err error, flowCtxCancel, outboxCtxCancel context.CancelFunc, + ctx context.Context, + opName redact.SafeString, + err error, + flowCtxCancel, outboxCtxCancel context.CancelFunc, ) { if err == io.EOF { if log.V(1) { @@ -235,7 +238,7 @@ func handleStreamErr( } } -func (o *Outbox) moveToDraining(ctx context.Context, reason string) { +func (o *Outbox) moveToDraining(ctx context.Context, reason redact.RedactableString) { if atomic.CompareAndSwapUint32(&o.draining, 0, 1) { log.VEventf(ctx, 2, "Outbox moved to draining (%s)", reason) } @@ -406,9 +409,11 @@ func (o *Outbox) runWithStream( terminatedGracefully, errToSend := o.sendBatches(ctx, stream, flowCtxCancel, outboxCtxCancel) if terminatedGracefully || errToSend != nil { - reason := "terminated gracefully" + var reason redact.RedactableString if errToSend != nil { - reason = fmt.Sprintf("encountered error when sending batches: %v", errToSend) + reason = redact.Sprintf("encountered error when sending batches: %v", errToSend) + } else { + reason = redact.Sprint(redact.SafeString("terminated gracefully")) } o.moveToDraining(ctx, reason) if err := o.sendMetadata(ctx, stream, errToSend); err != nil { diff --git a/pkg/sql/colflow/vectorized_flow.go b/pkg/sql/colflow/vectorized_flow.go index 660faa181b64..59a830b60ad4 100644 --- a/pkg/sql/colflow/vectorized_flow.go +++ b/pkg/sql/colflow/vectorized_flow.go @@ -275,7 +275,7 @@ func (f *vectorizedFlow) GetPath(ctx context.Context) string { f.tempStorage.path = filepath.Join(f.Cfg.TempStoragePath, tempDirName) log.VEventf(ctx, 1, "flow %s spilled to disk, stack trace: %s", f.ID, util.GetSmallTrace(2)) if err := f.Cfg.TempFS.MkdirAll(f.tempStorage.path); err != nil { - colexecerror.InternalError(errors.Errorf("unable to create temporary storage directory: %v", err)) + colexecerror.InternalError(errors.Wrap(err, "unable to create temporary storage directory")) } return f.tempStorage.path } diff --git a/pkg/sql/distsql/server.go b/pkg/sql/distsql/server.go index 16f8b30f5e3b..6289f893a2ce 100644 --- a/pkg/sql/distsql/server.go +++ b/pkg/sql/distsql/server.go @@ -159,7 +159,7 @@ func (ds *ServerImpl) Drain( ctx context.Context, flowDrainWait time.Duration, reporter func(int, redact.SafeString), ) { if err := ds.setDraining(true); err != nil { - log.Warningf(ctx, "unable to gossip distsql draining state: %s", err) + log.Warningf(ctx, "unable to gossip distsql draining state: %v", err) } flowWait := flowDrainWait diff --git a/pkg/sql/sqlstats/persistedsqlstats/BUILD.bazel b/pkg/sql/sqlstats/persistedsqlstats/BUILD.bazel index a887dfcbc6b2..afebce05b2cf 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/BUILD.bazel +++ b/pkg/sql/sqlstats/persistedsqlstats/BUILD.bazel @@ -4,8 +4,11 @@ go_library( name = "persistedsqlstats", srcs = [ "cluster_settings.go", + "combined_iterator.go", "flush.go", + "mem_iterator.go", "provider.go", + "stmt_reader.go", "test_utils.go", "writer.go", ], @@ -26,6 +29,7 @@ go_library( "//pkg/sql/sqlstats/sslocal", "//pkg/sql/sqlstats/ssmemstorage", "//pkg/sql/sqlutil", + "//pkg/util/encoding", "//pkg/util/log", "//pkg/util/metric", "//pkg/util/stop", @@ -39,6 +43,7 @@ go_test( srcs = [ "flush_test.go", "main_test.go", + "reader_test.go", ], deps = [ ":persistedsqlstats", diff --git a/pkg/sql/sqlstats/persistedsqlstats/combined_iterator.go b/pkg/sql/sqlstats/persistedsqlstats/combined_iterator.go new file mode 100644 index 000000000000..f4e3378f0491 --- /dev/null +++ b/pkg/sql/sqlstats/persistedsqlstats/combined_iterator.go @@ -0,0 +1,198 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package persistedsqlstats + +import ( + "context" + "strings" + + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/errors" +) + +// CombinedStmtStatsIterator is an iterator that iterates through both +// in-memory and persisted stmt stats provided by the in-memory iterator and +// the on-disk iterator. +type CombinedStmtStatsIterator struct { + nextToRead *roachpb.CollectedStatementStatistics + expectedColCnt int + + mem struct { + canBeAdvanced bool + paused bool + it *memStmtStatsIterator + } + + disk struct { + canBeAdvanced bool + paused bool + it sqlutil.InternalRows + } +} + +// NewCombinedStmtStatsIterator returns a new instance of +// CombinedStmtStatsIterator. +func NewCombinedStmtStatsIterator( + memIter *memStmtStatsIterator, diskIter sqlutil.InternalRows, expectedColCnt int, +) *CombinedStmtStatsIterator { + c := &CombinedStmtStatsIterator{ + expectedColCnt: expectedColCnt, + } + + c.mem.it = memIter + c.mem.canBeAdvanced = true + + c.disk.it = diskIter + c.disk.canBeAdvanced = true + + return c +} + +// Next increments the internal counter of the CombinedStmtStatsIterator. It +// returns true if the following Cur() call will be valid, false otherwise. +func (c *CombinedStmtStatsIterator) Next(ctx context.Context) (bool, error) { + var err error + + if c.mem.canBeAdvanced && !c.mem.paused { + c.mem.canBeAdvanced = c.mem.it.Next() + } + + if c.disk.canBeAdvanced && !c.disk.paused { + c.disk.canBeAdvanced, err = c.disk.it.Next(ctx) + if err != nil { + return false, err + } + } + + // Both iterators are exhausted, no new value can be produced. + if !c.mem.canBeAdvanced && !c.disk.canBeAdvanced { + // Sanity check. + if c.mem.paused || c.disk.paused { + return false, errors.AssertionFailedf("bug: leaked iterator") + } + return false, nil + } + + // If memIter is exhausted, but disk iterator can still move forward. + // We promote the disk.Cur() and resume the disk iterator if it was paused. + if !c.mem.canBeAdvanced { + row := c.disk.it.Cur() + if row == nil { + return false, errors.New("unexpected nil row") + } + + if len(row) != c.expectedColCnt { + return false, errors.AssertionFailedf("unexpectedly received %d columns", len(row)) + } + + c.nextToRead, err = rowToStmtStats(c.disk.it.Cur()) + if err != nil { + return false, err + } + + if c.disk.canBeAdvanced { + c.disk.paused = false + } + return true, nil + } + + // If diskIter is exhausted, but mem iterator can still move forward. + // We promote the mem.Cur() and resume the mem iterator if it was paused. + if !c.disk.canBeAdvanced { + c.nextToRead = c.mem.it.Cur() + + if c.mem.canBeAdvanced { + c.mem.paused = false + } + return true, nil + } + + // Both iterators can be moved forward. Now we check the value of Cur() + // for both iterators. We will have a few scenarios: + // 1. mem.Cur() < disk.Cur(): + // we promote mem.Cur() to c.nextToRead. We then pause + // the disk iterator and resume the mem iterator for next iteration. + // 2. mem.Cur() == disk.Cur(): + // we promote both mem.Cur() and disk.Cur() by merging both + // stats. We resume both iterators for next iteration. + // 3. mem.Cur() > disk.Cur(): + // we promote disk.Cur() to c.nextToRead. We then pause + // mem iterator and resume disk iterator for next iteration. + memCurVal := c.mem.it.Cur() + diskCurVal, err := rowToStmtStats(c.disk.it.Cur()) + if err != nil { + return false, err + } + + switch compareStmtStats(memCurVal, diskCurVal) { + case -1: + // First Case. + c.nextToRead = memCurVal + c.mem.paused = false + c.disk.paused = true + case 0: + // Second Case. + c.nextToRead = memCurVal + c.nextToRead.Stats.Add(&diskCurVal.Stats) + c.mem.paused = false + c.disk.paused = false + case 1: + // Third Case. + c.nextToRead = diskCurVal + c.mem.paused = true + c.disk.paused = false + default: + return false, errors.AssertionFailedf("bug: impossible state") + } + + return true, nil +} + +// Cur returns the roachpb.CollectedStatementStatistics at the current internal +// counter. +func (c *CombinedStmtStatsIterator) Cur() *roachpb.CollectedStatementStatistics { + return c.nextToRead +} + +func compareStmtStats(lhs, rhs *roachpb.CollectedStatementStatistics) int { + // 1. we compare their aggregated_ts + if lhs.AggregatedTs.Before(rhs.AggregatedTs) { + return -1 + } + if lhs.AggregatedTs.After(rhs.AggregatedTs) { + return 1 + } + + // 2. we compare their app name. + cmp := strings.Compare(lhs.Key.App, rhs.Key.App) + if cmp != 0 { + return cmp + } + + // 3. we compare their fingerprint ID. + if lhs.ID < rhs.ID { + return -1 + } + if lhs.ID > rhs.ID { + return 1 + } + + // 4. we compare their plan hash. + if lhs.Key.PlanHash < rhs.Key.PlanHash { + return -1 + } + if lhs.Key.PlanHash > rhs.Key.PlanHash { + return 1 + } + + return 0 +} diff --git a/pkg/sql/sqlstats/persistedsqlstats/flush.go b/pkg/sql/sqlstats/persistedsqlstats/flush.go index 7d599b2404ef..b3b2f2ba9f12 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/flush.go +++ b/pkg/sql/sqlstats/persistedsqlstats/flush.go @@ -34,14 +34,14 @@ func (s *PersistedSQLStats) Flush(ctx context.Context) { // The flush routine directly logs errors if they are encountered. Therefore, // no error is returned here. - _ = s.IterateStatementStats(ctx, &sqlstats.IteratorOptions{}, func(ctx context.Context, statistics *roachpb.CollectedStatementStatistics) error { + _ = s.SQLStats.IterateStatementStats(ctx, &sqlstats.IteratorOptions{}, func(ctx context.Context, statistics *roachpb.CollectedStatementStatistics) error { s.doFlush(ctx, func() error { return s.doFlushSingleStmtStats(ctx, statistics) }, "failed to flush statement statistics" /* errMsg */) return nil }) - _ = s.IterateTransactionStats(ctx, &sqlstats.IteratorOptions{}, func(ctx context.Context, key roachpb.TransactionFingerprintID, statistics *roachpb.CollectedTransactionStatistics) error { + _ = s.SQLStats.IterateTransactionStats(ctx, &sqlstats.IteratorOptions{}, func(ctx context.Context, key roachpb.TransactionFingerprintID, statistics *roachpb.CollectedTransactionStatistics) error { s.doFlush(ctx, func() error { return s.doFlushSingleTxnStats(ctx, key, statistics) }, "failed to flush transaction statistics" /* errMsg */) diff --git a/pkg/sql/sqlstats/persistedsqlstats/flush_test.go b/pkg/sql/sqlstats/persistedsqlstats/flush_test.go index 1e1a1de33737..b8651ccb6a9e 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/flush_test.go +++ b/pkg/sql/sqlstats/persistedsqlstats/flush_test.go @@ -40,28 +40,28 @@ type testCase struct { count int64 } +var testQueries = []testCase{ + { + query: "SELECT 1", + fingerprint: "SELECT _", + count: 3, + }, + { + query: "SELECT 1, 2, 3", + fingerprint: "SELECT _, _, _", + count: 10, + }, + { + query: "SELECT 1, 1 WHERE 1 < 10", + fingerprint: "SELECT _, _ WHERE _ < _", + count: 7, + }, +} + func TestSQLStatsFlush(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testCases := []testCase{ - { - query: "SELECT 1", - fingerprint: "SELECT _", - count: 3, - }, - { - query: "SELECT 1, 2, 3", - fingerprint: "SELECT _, _, _", - count: 10, - }, - { - query: "SELECT 1, 1 WHERE 1 < 10", - fingerprint: "SELECT _, _ WHERE _ < _", - count: 7, - }, - } - fakeTime := stubTime{ aggInterval: time.Hour, } @@ -117,24 +117,24 @@ func TestSQLStatsFlush(t *testing.T) { // Regular inserts. { - for _, tc := range testCases { + for _, tc := range testQueries { for i := int64(0); i < tc.count; i++ { firstSQLConn.Exec(t, tc.query) } } - verifyInMemoryStatsCorrectness(t, testCases, firstServerSQLStats) - verifyInMemoryStatsEmpty(t, testCases, secondServerSQLStats) + verifyInMemoryStatsCorrectness(t, testQueries, firstServerSQLStats) + verifyInMemoryStatsEmpty(t, testQueries, secondServerSQLStats) firstServerSQLStats.Flush(ctx) secondServerSQLStats.Flush(ctx) - verifyInMemoryStatsEmpty(t, testCases, firstServerSQLStats) - verifyInMemoryStatsEmpty(t, testCases, secondServerSQLStats) + verifyInMemoryStatsEmpty(t, testQueries, firstServerSQLStats) + verifyInMemoryStatsEmpty(t, testQueries, secondServerSQLStats) // For each test case, we verify that it's being properly inserted exactly // once and it is exactly executed tc.count number of times. - for _, tc := range testCases { + for _, tc := range testQueries { verifyNumOfInsertedEntries(t, secondSQLConn, tc.fingerprint, firstServer.NodeID(), 1 /* expectedStmtEntryCnt */, 1 /* expectedTxnEntryCtn */) verifyInsertedFingerprintExecCount(t, secondSQLConn, tc.fingerprint, fakeTime.getAggTimeTs(), firstServer.NodeID(), tc.count) } @@ -143,23 +143,23 @@ func TestSQLStatsFlush(t *testing.T) { // We insert the same data during the same aggregation window to ensure that // no new entries will be created but the statistics is updated. { - for i := range testCases { + for i := range testQueries { // Increment the execution count. - testCases[i].count++ - for execCnt := int64(0); execCnt < testCases[i].count; execCnt++ { - firstSQLConn.Exec(t, testCases[i].query) + testQueries[i].count++ + for execCnt := int64(0); execCnt < testQueries[i].count; execCnt++ { + firstSQLConn.Exec(t, testQueries[i].query) } } - verifyInMemoryStatsCorrectness(t, testCases, firstServerSQLStats) - verifyInMemoryStatsEmpty(t, testCases, secondServerSQLStats) + verifyInMemoryStatsCorrectness(t, testQueries, firstServerSQLStats) + verifyInMemoryStatsEmpty(t, testQueries, secondServerSQLStats) firstServerSQLStats.Flush(ctx) secondServerSQLStats.Flush(ctx) - verifyInMemoryStatsEmpty(t, testCases, firstServerSQLStats) - verifyInMemoryStatsEmpty(t, testCases, secondServerSQLStats) + verifyInMemoryStatsEmpty(t, testQueries, firstServerSQLStats) + verifyInMemoryStatsEmpty(t, testQueries, secondServerSQLStats) - for _, tc := range testCases { + for _, tc := range testQueries { verifyNumOfInsertedEntries(t, secondSQLConn, tc.fingerprint, firstServer.NodeID(), 1 /* expectedStmtEntryCnt */, 1 /* expectedTxnEntryCtn */) // The execution count is doubled here because we execute all of the // statements here in the same aggregation interval. @@ -171,21 +171,21 @@ func TestSQLStatsFlush(t *testing.T) { { fakeTime.setTime(fakeTime.StubTimeNow().Add(time.Hour * 3)) - for _, tc := range testCases { + for _, tc := range testQueries { for i := int64(0); i < tc.count; i++ { firstSQLConn.Exec(t, tc.query) } } - verifyInMemoryStatsCorrectness(t, testCases, firstServerSQLStats) - verifyInMemoryStatsEmpty(t, testCases, secondServerSQLStats) + verifyInMemoryStatsCorrectness(t, testQueries, firstServerSQLStats) + verifyInMemoryStatsEmpty(t, testQueries, secondServerSQLStats) firstServerSQLStats.Flush(ctx) secondServerSQLStats.Flush(ctx) - verifyInMemoryStatsEmpty(t, testCases, firstServerSQLStats) - verifyInMemoryStatsEmpty(t, testCases, secondServerSQLStats) + verifyInMemoryStatsEmpty(t, testQueries, firstServerSQLStats) + verifyInMemoryStatsEmpty(t, testQueries, secondServerSQLStats) - for _, tc := range testCases { + for _, tc := range testQueries { // We expect exactly 2 entries since we are in a different aggregation window. verifyNumOfInsertedEntries(t, secondSQLConn, tc.fingerprint, firstServer.NodeID(), 2 /* expectedStmtEntryCnt */, 2 /* expectedTxnEntryCtn */) verifyInsertedFingerprintExecCount(t, secondSQLConn, tc.fingerprint, fakeTime.getAggTimeTs(), firstServer.NodeID(), tc.count) @@ -194,24 +194,24 @@ func TestSQLStatsFlush(t *testing.T) { // We run queries in a different server and trigger the flush. { - for _, tc := range testCases { + for _, tc := range testQueries { for i := int64(0); i < tc.count; i++ { secondSQLConn.Exec(t, tc.query) require.NoError(t, err) } } - verifyInMemoryStatsEmpty(t, testCases, firstServerSQLStats) - verifyInMemoryStatsCorrectness(t, testCases, secondServerSQLStats) + verifyInMemoryStatsEmpty(t, testQueries, firstServerSQLStats) + verifyInMemoryStatsCorrectness(t, testQueries, secondServerSQLStats) firstServerSQLStats.Flush(ctx) secondServerSQLStats.Flush(ctx) - verifyInMemoryStatsEmpty(t, testCases, firstServerSQLStats) - verifyInMemoryStatsEmpty(t, testCases, secondServerSQLStats) + verifyInMemoryStatsEmpty(t, testQueries, firstServerSQLStats) + verifyInMemoryStatsEmpty(t, testQueries, secondServerSQLStats) // Ensure that we encode the correct node_id for the new entry and did not // accidentally tamper the entries written by another server. - for _, tc := range testCases { + for _, tc := range testQueries { verifyNumOfInsertedEntries(t, firstSQLConn, tc.fingerprint, secondServer.NodeID(), 1 /* expectedStmtEntryCnt */, 1 /* expectedTxnEntryCtn */) verifyInsertedFingerprintExecCount(t, firstSQLConn, tc.fingerprint, fakeTime.getAggTimeTs(), secondServer.NodeID(), tc.count) verifyNumOfInsertedEntries(t, secondSQLConn, tc.fingerprint, firstServer.NodeID(), 2 /* expectedStmtEntryCnt */, 2 /* expectedTxnEntryCtn */) @@ -336,7 +336,7 @@ func verifyInMemoryStatsCorrectness( t *testing.T, tcs []testCase, statsProvider *persistedsqlstats.PersistedSQLStats, ) { for _, tc := range tcs { - err := statsProvider.IterateStatementStats(context.Background(), &sqlstats.IteratorOptions{}, func(ctx context.Context, statistics *roachpb.CollectedStatementStatistics) error { + err := statsProvider.SQLStats.IterateStatementStats(context.Background(), &sqlstats.IteratorOptions{}, func(ctx context.Context, statistics *roachpb.CollectedStatementStatistics) error { if tc.fingerprint == statistics.Key.Query { require.Equal(t, tc.count, statistics.Stats.Count, "fingerprint: %s", tc.fingerprint) } @@ -351,7 +351,7 @@ func verifyInMemoryStatsEmpty( t *testing.T, tcs []testCase, statsProvider *persistedsqlstats.PersistedSQLStats, ) { for _, tc := range tcs { - err := statsProvider.IterateStatementStats(context.Background(), &sqlstats.IteratorOptions{}, func(ctx context.Context, statistics *roachpb.CollectedStatementStatistics) error { + err := statsProvider.SQLStats.IterateStatementStats(context.Background(), &sqlstats.IteratorOptions{}, func(ctx context.Context, statistics *roachpb.CollectedStatementStatistics) error { if tc.fingerprint == statistics.Key.Query { require.Equal(t, 0 /* expected */, statistics.Stats.Count, "fingerprint: %s", tc.fingerprint) } diff --git a/pkg/sql/sqlstats/persistedsqlstats/mem_iterator.go b/pkg/sql/sqlstats/persistedsqlstats/mem_iterator.go new file mode 100644 index 000000000000..b606f2ecb89f --- /dev/null +++ b/pkg/sql/sqlstats/persistedsqlstats/mem_iterator.go @@ -0,0 +1,46 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package persistedsqlstats + +import ( + "time" + + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" + "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/sslocal" +) + +// memStmtStatsIterator wraps a sslocal.StmtStatsIterator. Since in-memory +// statement statistics does not have aggregated_ts field populated, +// memStmtStatsIterator overrides the sslocal.StmtStatsIterator's Cur() method +// to populate the aggregated_ts field on the returning +// roachpb.CollectedStatementStatistics. +type memStmtStatsIterator struct { + *sslocal.StmtStatsIterator + aggregatedTs time.Time +} + +func newMemStmtStatsIterator( + stats *sslocal.SQLStats, options *sqlstats.IteratorOptions, aggregatedTS time.Time, +) *memStmtStatsIterator { + return &memStmtStatsIterator{ + StmtStatsIterator: stats.StmtStatsIterator(options), + aggregatedTs: aggregatedTS, + } +} + +// Cur calls the m.StmtStatsIterator.Cur() and populates the m.aggregatedTs +// field. +func (m *memStmtStatsIterator) Cur() *roachpb.CollectedStatementStatistics { + c := m.StmtStatsIterator.Cur() + c.AggregatedTs = m.aggregatedTs + return c +} diff --git a/pkg/sql/sqlstats/persistedsqlstats/reader_test.go b/pkg/sql/sqlstats/persistedsqlstats/reader_test.go new file mode 100644 index 000000000000..011112e7ad97 --- /dev/null +++ b/pkg/sql/sqlstats/persistedsqlstats/reader_test.go @@ -0,0 +1,127 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. +// +// Package sqlstats is a subsystem that is responsible for tracking the +// statistics of statements and transactions. + +package persistedsqlstats_test + +import ( + "context" + "testing" + "time" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" + "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/persistedsqlstats" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/stretchr/testify/require" +) + +func TestPersistedSQLStatsRead(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + fakeTime := stubTime{ + aggInterval: time.Hour, + } + fakeTime.setTime(timeutil.Now()) + + testCluster := serverutils.StartNewTestCluster(t, 3 /* numNodes */, base.TestClusterArgs{ + ServerArgs: base.TestServerArgs{ + Knobs: base.TestingKnobs{ + SQLStatsKnobs: &persistedsqlstats.TestingKnobs{ + StubTimeNow: fakeTime.StubTimeNow, + DisableFollowerRead: true, + }, + }, + }, + }) + ctx := context.Background() + defer testCluster.Stopper().Stop(ctx) + + server1 := testCluster.Server(0 /* idx */) + sqlConn := sqlutils.MakeSQLRunner(testCluster.ServerConn(0 /* idx */)) + sqlStats := server1.SQLServer().(*sql.Server).GetSQLStatsProvider().(*persistedsqlstats.PersistedSQLStats) + + expectedStmtFingerprints := make(map[string]int64) + for _, tc := range testQueries { + expectedStmtFingerprints[tc.fingerprint] = tc.count + for i := int64(0); i < tc.count; i++ { + sqlConn.Exec(t, tc.query) + } + } + + t.Run("in-memory only read", func(t *testing.T) { + verifyStoredStmtFingerprints(t, expectedStmtFingerprints, sqlStats) + }) + + t.Run("disk only read", func(t *testing.T) { + sqlStats.Flush(ctx) + verifyStoredStmtFingerprints(t, expectedStmtFingerprints, sqlStats) + }) + + t.Run("hybrid read", func(t *testing.T) { + // We execute each test queries one more time without flushing the stats. + // This means that we should see the exact same result as previous subtest + // except the execution count field will be incremented. We should not + // be seeing duplicated fields. + for _, tc := range testQueries { + sqlConn.Exec(t, tc.query) + tc.count++ + expectedStmtFingerprints[tc.fingerprint]++ + } + + foundQueries := make(map[string]struct{}) + require.NoError(t, sqlStats.IterateStatementStats(context.Background(), &sqlstats.IteratorOptions{ + SortedKey: true, + SortedAppNames: true, + }, func(ctx context.Context, statistics *roachpb.CollectedStatementStatistics) error { + if expectedExecCount, ok := expectedStmtFingerprints[statistics.Key.Query]; ok { + _, ok = foundQueries[statistics.Key.Query] + require.False(t, ok, "should only found one stats entry for %s, but found more than one", statistics.Key.Query) + foundQueries[statistics.Key.Query] = struct{}{} + require.Equal(t, expectedExecCount, statistics.Stats.Count, "query: %s", statistics.Key.Query) + } + return nil + })) + + for expectedStmtFingerprint := range expectedStmtFingerprints { + _, ok := foundQueries[expectedStmtFingerprint] + require.True(t, ok, "expected %s to be returned, but it didn't", expectedStmtFingerprint) + } + }) +} + +func verifyStoredStmtFingerprints( + t *testing.T, + expectedStmtFingerprints map[string]int64, + sqlStats *persistedsqlstats.PersistedSQLStats, +) { + foundQueries := make(map[string]struct{}) + require.NoError(t, sqlStats.IterateStatementStats(context.Background(), &sqlstats.IteratorOptions{}, func(ctx context.Context, statistics *roachpb.CollectedStatementStatistics) error { + if expectedExecCount, ok := expectedStmtFingerprints[statistics.Key.Query]; ok { + foundQueries[statistics.Key.Query] = struct{}{} + require.Equal(t, expectedExecCount, statistics.Stats.Count) + } + return nil + })) + + for expectedStmtFingerprint := range expectedStmtFingerprints { + _, ok := foundQueries[expectedStmtFingerprint] + require.True(t, ok, "expected %s to be returned, but it didn't", expectedStmtFingerprint) + } +} diff --git a/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil/json_encoding_test.go b/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil/json_encoding_test.go index 64aa1321c512..b7870561934c 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil/json_encoding_test.go +++ b/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil/json_encoding_test.go @@ -78,6 +78,7 @@ var fieldBlacklist = map[string]struct{}{ "LegacyLastErrRedacted": {}, "LastExecTimestamp": {}, "StatementFingerprintIDs": {}, + "AggregatedTs": {}, } func fillObject(t *testing.T, val reflect.Value, data *randomData) { diff --git a/pkg/sql/sqlstats/persistedsqlstats/stmt_reader.go b/pkg/sql/sqlstats/persistedsqlstats/stmt_reader.go new file mode 100644 index 000000000000..7288fcdfbce7 --- /dev/null +++ b/pkg/sql/sqlstats/persistedsqlstats/stmt_reader.go @@ -0,0 +1,181 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package persistedsqlstats + +import ( + "context" + "fmt" + "strings" + + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/security" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" + "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" + "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil" + "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/util/encoding" + "github.com/cockroachdb/errors" +) + +// IterateStatementStats implements sqlstats.Provider interface. +func (s *PersistedSQLStats) IterateStatementStats( + ctx context.Context, options *sqlstats.IteratorOptions, visitor sqlstats.StatementVisitor, +) (err error) { + // We override the sorting options since otherwise we would need to implement + // sorted and unsorted merge separately. We can revisit this decision if + // there's a good reason that we want the performance optimization from + // unsorted merge. + options.SortedKey = true + options.SortedAppNames = true + + // We compute the current aggregated_ts so that the in-memory stats can be + // merged with the persisted stats. + curAggTs := s.computeAggregatedTs() + memIter := newMemStmtStatsIterator(s.SQLStats, options, curAggTs) + + var persistedIter sqlutil.InternalRows + var colCnt int + persistedIter, colCnt, err = s.persistedStmtStatsIter(ctx, options) + if err != nil { + return err + } + defer func() { + closeError := persistedIter.Close() + if closeError != nil { + err = errors.CombineErrors(err, closeError) + } + }() + + combinedIter := NewCombinedStmtStatsIterator(memIter, persistedIter, colCnt) + + for { + var ok bool + ok, err = combinedIter.Next(ctx) + if err != nil { + return err + } + + if !ok { + break + } + + stats := combinedIter.Cur() + if err = visitor(ctx, stats); err != nil { + return err + } + } + + return nil +} + +func (s *PersistedSQLStats) persistedStmtStatsIter( + ctx context.Context, options *sqlstats.IteratorOptions, +) (iter sqlutil.InternalRows, expectedColCnt int, err error) { + query, expectedColCnt := s.getFetchQueryForStmtStatsTable(options) + + persistedIter, err := s.cfg.InternalExecutor.QueryIteratorEx( + ctx, + "read-stmt-stats", + nil, /* txn */ + sessiondata.InternalExecutorOverride{User: security.NodeUserName()}, + query, + ) + + if err != nil { + return nil /* iter */, 0 /* expectedColCnt */, err + } + + return persistedIter, expectedColCnt, err +} + +func (s *PersistedSQLStats) getFetchQueryForStmtStatsTable( + options *sqlstats.IteratorOptions, +) (query string, colCnt int) { + selectedColumns := []string{ + "aggregated_ts", + "fingerprint_id", + "plan_hash", + "app_name", + "metadata", + "statistics", + "plan", + } + query = fmt.Sprintf(` +SELECT + %s +FROM + system.statement_statistics +`, strings.Join(selectedColumns, ",")) + + if s.cfg.Knobs != nil && !s.cfg.Knobs.DisableFollowerRead { + query = fmt.Sprintf("%s AS OF SYSTEM TIME follower_read_timestamp()", query) + } + + orderByColumns := []string{"aggregated_ts"} + if options.SortedAppNames { + orderByColumns = append(orderByColumns, "app_name") + } + + // TODO(azhng): what we should really be sorting here is fingerprint_id + // column. This is so that we are backward compatible with the way + // we are ordering the in-memory stats. + if options.SortedKey { + orderByColumns = append(orderByColumns, "metadata ->> 'query'") + } + + query = fmt.Sprintf("%s ORDER BY %s", query, strings.Join(orderByColumns, ",")) + + return query, len(selectedColumns) +} + +func rowToStmtStats(row tree.Datums) (*roachpb.CollectedStatementStatistics, error) { + var stats roachpb.CollectedStatementStatistics + stats.AggregatedTs = tree.MustBeDTimestampTZ(row[0]).Time + + stmtFingerprintID, err := datumToUint64(row[1]) + if err != nil { + return nil, err + } + stats.ID = roachpb.StmtFingerprintID(stmtFingerprintID) + stats.Key.PlanHash = uint64(tree.MustBeDInt(row[2])) + stats.Key.App = string(tree.MustBeDString(row[3])) + + metadata := tree.MustBeDJSON(row[4]).JSON + if err = sqlstatsutil.DecodeStmtStatsMetadataJSON(metadata, &stats); err != nil { + return nil, err + } + + statistics := tree.MustBeDJSON(row[5]).JSON + if err = sqlstatsutil.DecodeStmtStatsStatisticsJSON(statistics, &stats.Stats); err != nil { + return nil, err + } + + jsonPlan := tree.MustBeDJSON(row[6]).JSON + plan, err := sqlstatsutil.JSONToExplainTreePlanNode(jsonPlan) + if err != nil { + return nil, err + } + stats.Stats.SensitiveInfo.MostRecentPlanDescription = *plan + + return &stats, nil +} + +func datumToUint64(d tree.Datum) (uint64, error) { + b := []byte(tree.MustBeDBytes(d)) + + _, val, err := encoding.DecodeUint64Ascending(b) + if err != nil { + return 0, err + } + + return val, nil +} diff --git a/pkg/sql/sqlstats/persistedsqlstats/test_utils.go b/pkg/sql/sqlstats/persistedsqlstats/test_utils.go index ed49d099059b..337a4469d988 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/test_utils.go +++ b/pkg/sql/sqlstats/persistedsqlstats/test_utils.go @@ -21,6 +21,11 @@ type TestingKnobs struct { // StubTimeNow allows tests to override the timeutil.Now() function used // by the flush operation to calculate aggregated_ts timestamp. StubTimeNow func() time.Time + + // DisableFollowerRead disallows the PersistedSQLStats to use follower read. + // This is used in the unit tests where it might be reading from the past + // where the stmt/txn stats system table are not yet created. + DisableFollowerRead bool } // ModuleTestingKnobs implements base.ModuleTestingKnobs interface. diff --git a/pkg/sql/sqlstats/ssprovider.go b/pkg/sql/sqlstats/ssprovider.go index 4146e56d5633..fadf87fc509c 100644 --- a/pkg/sql/sqlstats/ssprovider.go +++ b/pkg/sql/sqlstats/ssprovider.go @@ -84,9 +84,9 @@ type Reader interface { // IteratorOptions provides the ability to the caller to change how it iterates // the statements and transactions. -// TODO(azhng): we want to support pagination/continuation tokens as well as -// different error handling behaviors when error is encountered once we start -// to support cluster-wide implementation of the sqlstats.Reader interface. +// TODO(azhng): introduce StartTime and EndTime field so we can implement +// virtual indexes on crdb_internal.{statement,transaction}_statistics +// using the iterators. type IteratorOptions struct { // SortedAppNames determines whether or not the application names will be // sorted when iterating through statistics.