From 67099fadb9a1d069abaf84004076bc32aa9f00fd Mon Sep 17 00:00:00 2001 From: Marius Posta Date: Thu, 18 Feb 2021 23:23:11 -0500 Subject: [PATCH 1/2] sql: descriptor validation overhaul Previously, descriptor validation suffered from a few shortcomings: 1. The methods to call were inconsistent across descriptor types, and the error messages they return are inconsistent as well. 2. Those methods made ad-hoc use of catalog.DescGetter to walk through the descriptor reference graph, and could sometimes walk surprisingly far. This is particularly true for type descriptors, which hold references to other types. 3. To complicate things further, DescGetter implementations which read descriptors from storage themselves perform validation as well. Although it is perfectly sensible to systematically validate descriptors when read, there is a circularity problem at hand which would benefit from being solved without melting the maintainer's brain in the process. 4. The validation methods return an error type, making it awkward to return multiple errors. Tools like doctor would be made more useful if they could report more than only the first encountered error. Recently we introduced a change that added descriptor validation on write. This change involves validating all uncommitted descriptors in bulk and this in turn favours a bulk approach to reading all their referenced descriptors from storage. With this in mind, this commit adds a GetReferencedDescIDs method to catalog.Descriptor which returns the IDs of all descriptors referenced by the receiver. By calling this method on all descriptors we intend to validate and by reading all referenced descriptors in one kv.Batch and stuffing them in a catalog.MapDescGetter, we can now validate using this in-memory DescGetter instead. Turning validation into this multi-step process means it can no longer be invoked as a method call on the target descriptor. This commit moves the entry point to descriptor validation to the catalog.Validate function. The descriptor objects themselves still define the validation checks themselves but these are effectively made inaccessible from outside the catalog package. The rationale behind this is to enforce order in the sequence of checks performed and to enforce some uniformity in the formatting of the error messages. The checks are grouped into three tiers: internal consistency checks, cross-reference consistency checks, and transactional consistency checks. All this also helps reduce the number of call sites of descriptor validations as well as increase the scope of the validations. This has uncovered a few bugs related to schemas and temporary tables. This effort has also helped identify validation checks which have since been made redundant, either by other existing rules or by the new validation-on-write behaviour. Finally, this has allowed us to strip doctor of its own duplicated (and dodgy) validation logic and simply report all validation errors instead. Release justification: This commit is safe for this release because it consists of bug fixes and changes involving descriptor validation. If anything these changes will help uncover descriptor corruptions which were previously unnoticed, both because we're extending the validation suite and because the doctor tool produces richer output. Release note (cli change): The doctor tool can now report multiple descriptor validation failures per descriptor. --- pkg/base/test_server_args.go | 3 - pkg/ccl/backupccl/backupbase/targets.go | 6 +- pkg/ccl/backupccl/restore_job.go | 19 +- pkg/ccl/changefeedccl/avro_test.go | 2 +- pkg/ccl/importccl/import_stmt.go | 4 - .../testdata/logic_test/multi_region | 2 +- pkg/ccl/partitionccl/BUILD.bazel | 1 + pkg/ccl/partitionccl/partition_test.go | 3 +- pkg/cli/testdata/doctor/testcluster | 2 +- pkg/cli/testdata/doctor/testzipdir | 12 +- pkg/server/testserver.go | 3 - pkg/sql/alter_column_type_test.go | 2 +- pkg/sql/alter_database.go | 10 +- pkg/sql/alter_table.go | 22 +- pkg/sql/alter_table_locality.go | 23 +- pkg/sql/alter_type.go | 6 - pkg/sql/catalog/BUILD.bazel | 1 + pkg/sql/catalog/catalogkv/catalogkv.go | 77 +- pkg/sql/catalog/dbdesc/BUILD.bazel | 2 + pkg/sql/catalog/dbdesc/database_desc.go | 137 +- pkg/sql/catalog/dbdesc/database_test.go | 239 +++ pkg/sql/catalog/desc_getter.go | 38 +- pkg/sql/catalog/descpb/privilege.go | 6 +- pkg/sql/catalog/descpb/privilege_test.go | 2 +- pkg/sql/catalog/descriptor.go | 16 +- pkg/sql/catalog/descs/collection.go | 70 +- pkg/sql/catalog/descs/collection_test.go | 2 +- pkg/sql/catalog/errors.go | 87 +- pkg/sql/catalog/schemadesc/BUILD.bazel | 3 + pkg/sql/catalog/schemadesc/schema_desc.go | 75 +- .../catalog/schemadesc/schema_desc_test.go | 124 ++ pkg/sql/catalog/tabledesc/BUILD.bazel | 1 + pkg/sql/catalog/tabledesc/helpers_test.go | 20 - pkg/sql/catalog/tabledesc/safe_format_test.go | 33 +- pkg/sql/catalog/tabledesc/structured.go | 1358 +-------------- pkg/sql/catalog/tabledesc/structured_test.go | 1457 ----------------- pkg/sql/catalog/tabledesc/table.go | 12 +- pkg/sql/catalog/tabledesc/validate.go | 1351 +++++++++++++++ pkg/sql/catalog/tabledesc/validate_test.go | 1409 ++++++++++++++++ pkg/sql/catalog/typedesc/safe_format_test.go | 5 +- pkg/sql/catalog/typedesc/type_desc.go | 389 ++--- pkg/sql/catalog/typedesc/type_desc_test.go | 152 +- pkg/sql/catalog/validate.go | 325 ++++ pkg/sql/conn_executor.go | 8 - pkg/sql/crdb_internal.go | 4 +- pkg/sql/crdb_internal_test.go | 4 +- pkg/sql/create_index.go | 2 +- pkg/sql/create_sequence.go | 5 +- pkg/sql/create_table.go | 11 +- pkg/sql/create_view.go | 24 +- pkg/sql/descriptor.go | 38 +- pkg/sql/descriptor_mutation_test.go | 15 +- pkg/sql/doctor/BUILD.bazel | 1 - pkg/sql/doctor/doctor.go | 79 +- pkg/sql/doctor/doctor_test.go | 80 +- pkg/sql/drop_index.go | 6 +- pkg/sql/exec_util.go | 5 - .../testdata/logic_test/alter_primary_key | 8 +- .../testdata/logic_test/system_columns | 4 +- pkg/sql/namespace_test.go | 3 +- pkg/sql/pgwire/testdata/pgtest/notice | 2 +- pkg/sql/planner.go | 8 + pkg/sql/rename_column.go | 5 +- pkg/sql/rename_index.go | 5 +- pkg/sql/rename_table.go | 4 +- pkg/sql/repair.go | 61 +- pkg/sql/schema_changer.go | 9 - pkg/sql/schema_changer_test.go | 2 +- pkg/sql/scrub.go | 12 +- pkg/sql/table.go | 3 +- pkg/sql/table_test.go | 2 +- pkg/sql/tests/repair_test.go | 20 +- pkg/sql/tests/system_table_test.go | 3 +- pkg/sql/virtual_schema.go | 4 +- 74 files changed, 4316 insertions(+), 3632 deletions(-) create mode 100644 pkg/sql/catalog/tabledesc/validate.go create mode 100644 pkg/sql/catalog/validate.go diff --git a/pkg/base/test_server_args.go b/pkg/base/test_server_args.go index 7027295c7d95..39bb2f845ba3 100644 --- a/pkg/base/test_server_args.go +++ b/pkg/base/test_server_args.go @@ -128,9 +128,6 @@ type TestServerArgs struct { // IF set, the demo login endpoint will be enabled. EnableDemoLoginEndpoint bool - - // If set, testing specific descriptor validation will be disabled. even if the server - DisableTestingDescriptorValidation bool } // TestClusterArgs contains the parameters one can set when creating a test diff --git a/pkg/ccl/backupccl/backupbase/targets.go b/pkg/ccl/backupccl/backupbase/targets.go index c591d3113923..21df4b0c4f4f 100644 --- a/pkg/ccl/backupccl/backupbase/targets.go +++ b/pkg/ccl/backupccl/backupbase/targets.go @@ -190,9 +190,9 @@ func NewDescriptorResolver(descs []catalog.Descriptor) (*DescriptorResolver, err if !ok { return errors.Errorf("schema %d not found for desc %d", scID, desc.GetID()) } - scDesc, ok := scDescI.(catalog.SchemaDescriptor) - if !ok { - return errors.Errorf("descriptor %d is not a schema", scDescI.GetID()) + scDesc, err := catalog.AsSchemaDescriptor(scDescI) + if err != nil { + return err } scName = scDesc.GetName() } diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index d7720afff04f..c9597b90538f 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -433,23 +433,16 @@ func WriteDescriptors( } return err } - // TODO(ajwerner): Utilize validation inside of the descs.Collection - // rather than reaching into the store. - dg := catalogkv.NewOneLevelUncachedDescGetter(txn, codec) + + bdg := catalogkv.NewOneLevelUncachedDescGetter(txn, codec) + descs := make([]catalog.Descriptor, 0, len(databases)+len(tables)) for _, table := range tables { - if err := table.Validate(ctx, dg); err != nil { - return errors.Wrapf(err, - "validate table %d", errors.Safe(table.GetID())) - } + descs = append(descs, table) } - for _, db := range databases { - if err := db.Validate(ctx, dg); err != nil { - return errors.Wrapf(err, - "validate database %d", errors.Safe(db.GetID())) - } + descs = append(descs, db) } - return nil + return catalog.ValidateSelfAndCrossReferences(ctx, bdg, descs...) }() return errors.Wrapf(err, "restoring table desc and namespace entries") } diff --git a/pkg/ccl/changefeedccl/avro_test.go b/pkg/ccl/changefeedccl/avro_test.go index 0ffac4e65b9a..d6ca21414bc0 100644 --- a/pkg/ccl/changefeedccl/avro_test.go +++ b/pkg/ccl/changefeedccl/avro_test.go @@ -59,7 +59,7 @@ func parseTableDesc(createTableStmt string) (catalog.TableDescriptor, error) { if err != nil { return nil, err } - return mutDesc, mutDesc.ValidateSelf(ctx) + return mutDesc, catalog.ValidateSelf(mutDesc) } func parseValues(tableDesc catalog.TableDescriptor, values string) ([]rowenc.EncDatumRow, error) { diff --git a/pkg/ccl/importccl/import_stmt.go b/pkg/ccl/importccl/import_stmt.go index ed292bb5c35f..712621fc02ec 100644 --- a/pkg/ccl/importccl/import_stmt.go +++ b/pkg/ccl/importccl/import_stmt.go @@ -1280,10 +1280,6 @@ func writeNonDropDatabaseChange( queuedJob := []jobspb.JobID{job.ID()} b := txn.NewBatch() - dg := catalogkv.NewOneLevelUncachedDescGetter(txn, p.ExecCfg().Codec) - if err := desc.Validate(ctx, dg); err != nil { - return nil, err - } err = descsCol.WriteDescToBatch( ctx, p.ExtendedEvalContext().Tracing.KVTracingEnabled(), diff --git a/pkg/ccl/logictestccl/testdata/logic_test/multi_region b/pkg/ccl/logictestccl/testdata/logic_test/multi_region index 62ce9c9674e2..7b1a6ef9fa35 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/multi_region +++ b/pkg/ccl/logictestccl/testdata/logic_test/multi_region @@ -911,7 +911,7 @@ SHOW ENUMS FROM drop_region_db schema name values owner public crdb_internal_region {ap-southeast-2,ca-central-1} root -statement error pq: region "us-east-1" has not been added to database "drop_region_db" +statement error pq: relation "t" \([0-9]+\): invalid locality config: region "us-east-1" has not been added to database "drop_region_db" CREATE TABLE drop_region_db.public.t(a int) LOCALITY REGIONAL BY TABLE IN "us-east-1" statement ok diff --git a/pkg/ccl/partitionccl/BUILD.bazel b/pkg/ccl/partitionccl/BUILD.bazel index e5d0668f92be..5159a0c48a32 100644 --- a/pkg/ccl/partitionccl/BUILD.bazel +++ b/pkg/ccl/partitionccl/BUILD.bazel @@ -52,6 +52,7 @@ go_test( "//pkg/server", "//pkg/settings/cluster", "//pkg/sql", + "//pkg/sql/catalog", "//pkg/sql/catalog/catalogkv", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/tabledesc", diff --git a/pkg/ccl/partitionccl/partition_test.go b/pkg/ccl/partitionccl/partition_test.go index b3daa6209ea5..8e508c7f8451 100644 --- a/pkg/ccl/partitionccl/partition_test.go +++ b/pkg/ccl/partitionccl/partition_test.go @@ -29,6 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/parser" @@ -139,7 +140,7 @@ func (pt *partitioningTest) parse() error { return err } pt.parsed.tableDesc = mutDesc - if err := pt.parsed.tableDesc.ValidateSelf(ctx); err != nil { + if err := catalog.ValidateSelf(pt.parsed.tableDesc); err != nil { return err } } diff --git a/pkg/cli/testdata/doctor/testcluster b/pkg/cli/testdata/doctor/testcluster index 2953ab3a8ff3..cdf0d32cd8b8 100644 --- a/pkg/cli/testdata/doctor/testcluster +++ b/pkg/cli/testdata/doctor/testcluster @@ -2,6 +2,6 @@ doctor cluster ---- debug doctor cluster Examining 35 descriptors and 36 namespace entries... - Table 53: ParentID 50, ParentSchemaID 29, Name 'foo': not being dropped but no namespace entry found + ParentID 50, ParentSchemaID 29: relation "foo" (53): not being dropped but no namespace entry found Examining 1 running jobs... ERROR: validation failed diff --git a/pkg/cli/testdata/doctor/testzipdir b/pkg/cli/testdata/doctor/testzipdir index 8c4ca04b8652..44c6eda2e2e1 100644 --- a/pkg/cli/testdata/doctor/testzipdir +++ b/pkg/cli/testdata/doctor/testzipdir @@ -2,12 +2,12 @@ doctor zipdir ---- debug doctor zipdir testdata/doctor/debugzip Examining 38 descriptors and 43 namespace entries... - Table 53: ParentID 52, ParentSchemaID 29, Name 'users': desc 53: parentID 52 does not exist - Table 54: ParentID 52, ParentSchemaID 29, Name 'vehicles': desc 54: parentID 52 does not exist - Table 55: ParentID 52, ParentSchemaID 29, Name 'rides': desc 55: parentID 52 does not exist - Table 56: ParentID 52, ParentSchemaID 29, Name 'vehicle_location_histories': desc 56: parentID 52 does not exist - Table 57: ParentID 52, ParentSchemaID 29, Name 'promo_codes': desc 57: parentID 52 does not exist - Table 58: ParentID 52, ParentSchemaID 29, Name 'user_promo_codes': desc 58: parentID 52 does not exist + ParentID 52, ParentSchemaID 29: relation "users" (53): referenced database ID 52: descriptor not found + ParentID 52, ParentSchemaID 29: relation "vehicles" (54): referenced database ID 52: descriptor not found + ParentID 52, ParentSchemaID 29: relation "rides" (55): referenced database ID 52: descriptor not found + ParentID 52, ParentSchemaID 29: relation "vehicle_location_histories" (56): referenced database ID 52: descriptor not found + ParentID 52, ParentSchemaID 29: relation "promo_codes" (57): referenced database ID 52: descriptor not found + ParentID 52, ParentSchemaID 29: relation "user_promo_codes" (58): referenced database ID 52: descriptor not found Descriptor 52: has namespace row(s) [{ParentID:0 ParentSchemaID:0 Name:movr}] but no descriptor Examining 1 running jobs... job 587337426984566785: schema change GC refers to missing table descriptor(s) [59] diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go index 92af61e71b15..6049a98a0778 100644 --- a/pkg/server/testserver.go +++ b/pkg/server/testserver.go @@ -274,9 +274,6 @@ func makeTestConfigFromParams(params base.TestServerArgs) Config { if params.Knobs.SQLExecutor == nil { cfg.TestingKnobs.SQLExecutor = &sql.ExecutorTestingKnobs{} } - if !params.DisableTestingDescriptorValidation { - cfg.TestingKnobs.SQLExecutor.(*sql.ExecutorTestingKnobs).TestingDescriptorValidation = true - } // For test servers, leave interleaved tables enabled by default. We'll remove // this when we remove interleaved tables altogether. diff --git a/pkg/sql/alter_column_type_test.go b/pkg/sql/alter_column_type_test.go index 8e2eb8534480..04f04e5b6462 100644 --- a/pkg/sql/alter_column_type_test.go +++ b/pkg/sql/alter_column_type_test.go @@ -404,7 +404,7 @@ ALTER TABLE t.test ALTER COLUMN x TYPE STRING; <-childJobStartNotification - expected := "pq: unimplemented: cannot perform a schema change operation while an ALTER COLUMN TYPE schema change is in progress" + expected := `pq: relation "test" \(53\): unimplemented: cannot perform a schema change operation while an ALTER COLUMN TYPE schema change is in progress` sqlDB.ExpectErr(t, expected, ` ALTER TABLE t.test ADD COLUMN y INT; `) diff --git a/pkg/sql/alter_database.go b/pkg/sql/alter_database.go index cd6a39dd7915..1d9466b0d25c 100644 --- a/pkg/sql/alter_database.go +++ b/pkg/sql/alter_database.go @@ -17,7 +17,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" @@ -219,8 +218,7 @@ func (n *alterDatabaseAddRegionNode) startExec(params runParams) error { // Validate the type descriptor after the changes. We have to do this explicitly here, because // we're using an internal call to addEnumValue above which doesn't perform validation. - dg := catalogkv.NewOneLevelUncachedDescGetter(params.p.txn, params.ExecCfg().Codec) - if err := typeDesc.Validate(params.ctx, dg); err != nil { + if err := validateDescriptor(params.ctx, params.p, typeDesc); err != nil { return err } @@ -522,12 +520,6 @@ func (n *alterDatabasePrimaryRegionNode) switchPrimaryRegion(params runParams) e return err } - // Validate the type descriptor after the changes. - dg := catalogkv.NewOneLevelUncachedDescGetter(params.p.txn, params.ExecCfg().Codec) - if err := typeDesc.Validate(params.ctx, dg); err != nil { - return err - } - // Update the database's zone configuration. if err := ApplyZoneConfigFromDatabaseRegionConfig( params.ctx, diff --git a/pkg/sql/alter_table.go b/pkg/sql/alter_table.go index 1cbb7bcc3781..8bdab383de20 100644 --- a/pkg/sql/alter_table.go +++ b/pkg/sql/alter_table.go @@ -21,7 +21,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" @@ -281,7 +280,7 @@ func (n *alterTableNode) startExec(params runParams) error { case *tree.CheckConstraintTableDef: var err error params.p.runWithOptions(resolveFlags{contextDatabaseID: n.tableDesc.ParentID}, func() { - info, infoErr := n.tableDesc.GetConstraintInfo(params.ctx, nil) + info, infoErr := n.tableDesc.GetConstraintInfo() if infoErr != nil { err = infoErr return @@ -663,14 +662,13 @@ func (n *alterTableNode) startExec(params runParams) error { return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "column %q in the middle of being added, try again later", t.Column) } - if err := n.tableDesc.Validate( - params.ctx, catalogkv.NewOneLevelUncachedDescGetter(params.p.Txn(), params.ExecCfg().Codec), - ); err != nil { + + if err := validateDescriptor(params.ctx, params.p, n.tableDesc); err != nil { return err } case *tree.AlterTableDropConstraint: - info, err := n.tableDesc.GetConstraintInfo(params.ctx, nil) + info, err := n.tableDesc.GetConstraintInfo() if err != nil { return err } @@ -692,14 +690,12 @@ func (n *alterTableNode) startExec(params runParams) error { return err } descriptorChanged = true - if err := n.tableDesc.Validate( - params.ctx, catalogkv.NewOneLevelUncachedDescGetter(params.p.Txn(), params.ExecCfg().Codec), - ); err != nil { + if err := validateDescriptor(params.ctx, params.p, n.tableDesc); err != nil { return err } case *tree.AlterTableValidateConstraint: - info, err := n.tableDesc.GetConstraintInfo(params.ctx, nil) + info, err := n.tableDesc.GetConstraintInfo() if err != nil { return err } @@ -889,7 +885,7 @@ func (n *alterTableNode) startExec(params runParams) error { descriptorChanged = descriptorChanged || descChanged case *tree.AlterTableRenameConstraint: - info, err := n.tableDesc.GetConstraintInfo(params.ctx, nil) + info, err := n.tableDesc.GetConstraintInfo() if err != nil { return err } @@ -1106,7 +1102,7 @@ func applyColumnMutation( } } - info, err := tableDesc.GetConstraintInfo(params.ctx, nil) + info, err := tableDesc.GetConstraintInfo() if err != nil { return err } @@ -1141,7 +1137,7 @@ func applyColumnMutation( "constraint in the middle of being dropped") } } - info, err := tableDesc.GetConstraintInfo(params.ctx, nil) + info, err := tableDesc.GetConstraintInfo() if err != nil { return err } diff --git a/pkg/sql/alter_table_locality.go b/pkg/sql/alter_table_locality.go index afe5f63c3437..44d341c67f69 100644 --- a/pkg/sql/alter_table_locality.go +++ b/pkg/sql/alter_table_locality.go @@ -15,7 +15,6 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/kv" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -135,7 +134,7 @@ func (n *alterTableSetLocalityNode) alterTableLocalityGlobalToRegionalByTable( // Finalize the alter by writing a new table descriptor and updating the zone // configuration. - if err := n.validateAndWriteNewTableLocalityAndZoneConfig( + if err := n.writeNewTableLocalityAndZoneConfig( params, n.dbDesc, ); err != nil { @@ -167,7 +166,7 @@ func (n *alterTableSetLocalityNode) alterTableLocalityRegionalByTableToGlobal( // Finalize the alter by writing a new table descriptor and updating the zone // configuration. - if err := n.validateAndWriteNewTableLocalityAndZoneConfig( + if err := n.writeNewTableLocalityAndZoneConfig( params, n.dbDesc, ); err != nil { @@ -208,7 +207,7 @@ func (n *alterTableSetLocalityNode) alterTableLocalityRegionalByTableToRegionalB } // Finalize the alter by writing a new table descriptor and updating the zone configuration. - if err := n.validateAndWriteNewTableLocalityAndZoneConfig( + if err := n.writeNewTableLocalityAndZoneConfig( params, n.dbDesc, ); err != nil { @@ -539,21 +538,11 @@ func (n *alterTableSetLocalityNode) startExec(params runParams) error { }) } -// validateAndWriteNewTableLocalityAndZoneConfig validates the newly updated -// LocalityConfig in a table descriptor, writes that table descriptor, and -// writes a new zone configuration for the given table. -func (n *alterTableSetLocalityNode) validateAndWriteNewTableLocalityAndZoneConfig( +// writeNewTableLocalityAndZoneConfig writes the table descriptor with the newly +// updated LocalityConfig and writes a new zone configuration for the table. +func (n *alterTableSetLocalityNode) writeNewTableLocalityAndZoneConfig( params runParams, dbDesc *dbdesc.Immutable, ) error { - // Validate the new locality before updating the table descriptor. - dg := catalogkv.NewOneLevelUncachedDescGetter(params.p.txn, params.EvalContext().Codec) - if err := n.tableDesc.ValidateTableLocalityConfig( - params.ctx, - dg, - ); err != nil { - return err - } - // Write out the table descriptor update. if err := params.p.writeSchemaChange( params.ctx, diff --git a/pkg/sql/alter_type.go b/pkg/sql/alter_type.go index 24e42fa05c68..4656d1ebda8e 100644 --- a/pkg/sql/alter_type.go +++ b/pkg/sql/alter_type.go @@ -126,12 +126,6 @@ func (n *alterTypeNode) startExec(params runParams) error { return err } - // Validate the type descriptor after the changes. - dg := catalogkv.NewOneLevelUncachedDescGetter(params.p.txn, params.ExecCfg().Codec) - if err := n.desc.Validate(params.ctx, dg); err != nil { - return err - } - if !eventLogDone { // Write a log event. if err := params.p.logEvent(params.ctx, diff --git a/pkg/sql/catalog/BUILD.bazel b/pkg/sql/catalog/BUILD.bazel index 4d6a9348ff60..1987e1582366 100644 --- a/pkg/sql/catalog/BUILD.bazel +++ b/pkg/sql/catalog/BUILD.bazel @@ -11,6 +11,7 @@ go_library( "errors.go", "table_col_map.go", "table_col_set.go", + "validate.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/sql/catalog", visibility = ["//visibility:public"], diff --git a/pkg/sql/catalog/catalogkv/catalogkv.go b/pkg/sql/catalog/catalogkv/catalogkv.go index 94cd53859a6f..8ef4c2051d79 100644 --- a/pkg/sql/catalog/catalogkv/catalogkv.go +++ b/pkg/sql/catalog/catalogkv/catalogkv.go @@ -203,26 +203,32 @@ func desiredKindError(desc catalog.Descriptor, kind DescriptorKind, id descpb.ID // some of the logic in physical_accessor.go. func requiredError(kind DescriptorKind, id descpb.ID) error { var err error + var wrapper func(descpb.ID, error) error switch kind { case TableDescriptorKind: err = sqlerrors.NewUndefinedRelationError(&tree.TableRef{TableID: int64(id)}) + wrapper = catalog.WrapTableDescRefErr case DatabaseDescriptorKind: err = sqlerrors.NewUndefinedDatabaseError(fmt.Sprintf("[%d]", id)) + wrapper = catalog.WrapDatabaseDescRefErr case SchemaDescriptorKind: err = sqlerrors.NewUnsupportedSchemaUsageError(fmt.Sprintf("[%d]", id)) + wrapper = catalog.WrapSchemaDescRefErr case TypeDescriptorKind: err = sqlerrors.NewUndefinedTypeError(tree.NewUnqualifiedTypeName(tree.Name(fmt.Sprintf("[%d]", id)))) + wrapper = catalog.WrapTypeDescRefErr default: err = errors.Errorf("failed to find descriptor [%d]", id) + wrapper = func(_ descpb.ID, err error) error { return err } } - return errors.CombineErrors(catalog.ErrDescriptorNotFound, err) + return errors.CombineErrors(wrapper(id, catalog.ErrDescriptorNotFound), err) } // NewOneLevelUncachedDescGetter returns a new DescGetter backed by the passed // Txn. It will use the transaction to resolve mutable descriptors using // GetDescriptorByID but will pass a nil DescGetter into those lookup calls to // ensure that the entire graph of dependencies is not traversed. -func NewOneLevelUncachedDescGetter(txn *kv.Txn, codec keys.SQLCodec) catalog.DescGetter { +func NewOneLevelUncachedDescGetter(txn *kv.Txn, codec keys.SQLCodec) catalog.BatchDescGetter { return &oneLevelUncachedDescGetter{ txn: txn, codec: codec, @@ -312,7 +318,11 @@ func unwrapDescriptor( return nil, nil } if validate { - if err := unwrapped.Validate(ctx, dg); err != nil { + var level catalog.ValidationLevel + if dg != nil { + level = catalog.ValidationLevelSelfAndCrossReferences + } + if err := catalog.Validate(ctx, dg, level, unwrapped).CombinedError(); err != nil { return nil, err } } @@ -329,29 +339,25 @@ func unwrapDescriptorMutable( table, database, typ, schema := descpb.TableFromDescriptor(desc, hlc.Timestamp{}), desc.GetDatabase(), desc.GetType(), desc.GetSchema() + var err error + var mut catalog.MutableDescriptor switch { case table != nil: - mutTable, err := tabledesc.NewFilledInExistingMutable(ctx, dg, false /* skipFKsWithMissingTable */, table) - if err != nil { - return nil, err - } - if err := mutTable.ValidateSelf(ctx); err != nil { - return nil, err - } - return mutTable, nil + mut, err = tabledesc.NewFilledInExistingMutable(ctx, dg, false /* skipFKsWithMissingTable */, table) case database != nil: - dbDesc := dbdesc.NewExistingMutable(*database) - if err := dbDesc.Validate(ctx, dg); err != nil { - return nil, err - } - return dbDesc, nil + mut, err = dbdesc.NewExistingMutable(*database), nil case typ != nil: - return typedesc.NewExistingMutable(*typ), nil + mut, err = typedesc.NewExistingMutable(*typ), nil case schema != nil: - return schemadesc.NewMutableExisting(*schema), nil - default: - return nil, nil + mut, err = schemadesc.NewMutableExisting(*schema), nil + } + if mut != nil && err == nil { + err = catalog.ValidateSelf(mut) } + if err != nil { + return nil, err + } + return mut, nil } // CountUserDescriptors returns the number of descriptors present that were @@ -416,10 +422,8 @@ func GetAllDescriptors( for _, desc := range descs { dg[desc.GetID()] = desc } - for _, desc := range descs { - if err := desc.Validate(ctx, dg); err != nil { - return nil, err - } + if err := catalog.ValidateSelfAndCrossReferences(ctx, dg, descs...); err != nil { + return nil, err } return descs, nil } @@ -578,7 +582,11 @@ func MustGetSchemaDescByID( } func getDescriptorsFromIDs( - ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, ids []descpb.ID, + ctx context.Context, + txn *kv.Txn, + codec keys.SQLCodec, + ids []descpb.ID, + wrapFn func(id descpb.ID, err error) error, ) ([]catalog.Descriptor, error) { b := txn.NewBatch() for _, id := range ids { @@ -617,7 +625,7 @@ func getDescriptorsFromIDs( } if catalogDesc == nil { - return nil, catalog.ErrDescriptorNotFound + return nil, wrapFn(ids[i], catalog.ErrDescriptorNotFound) } results = append(results, catalogDesc) } @@ -633,19 +641,19 @@ func getDescriptorsFromIDs( func GetDatabaseDescriptorsFromIDs( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, ids []descpb.ID, ) ([]*dbdesc.Immutable, error) { - descs, err := getDescriptorsFromIDs(ctx, txn, codec, ids) + descs, err := getDescriptorsFromIDs(ctx, txn, codec, ids, catalog.WrapDatabaseDescRefErr) if err != nil { return nil, err } res := make([]*dbdesc.Immutable, len(descs)) - for i := range descs { + for i, id := range ids { desc := descs[i] if desc == nil { - return nil, catalog.ErrDescriptorNotFound + return nil, catalog.WrapDatabaseDescRefErr(id, catalog.ErrDescriptorNotFound) } db, ok := desc.(*dbdesc.Immutable) if !ok { - return nil, errors.AssertionFailedf("%q is not a database", desc.GetName()) + return nil, catalog.WrapDatabaseDescRefErr(id, catalog.NewDescriptorTypeError(desc)) } res[i] = db } @@ -658,16 +666,19 @@ func GetDatabaseDescriptorsFromIDs( func GetSchemaDescriptorsFromIDs( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, ids []descpb.ID, ) ([]*schemadesc.Immutable, error) { - descs, err := getDescriptorsFromIDs(ctx, txn, codec, ids) + descs, err := getDescriptorsFromIDs(ctx, txn, codec, ids, catalog.WrapSchemaDescRefErr) if err != nil { return nil, err } res := make([]*schemadesc.Immutable, len(descs)) - for i := range descs { + for i, id := range ids { desc := descs[i] + if desc == nil { + return nil, catalog.WrapSchemaDescRefErr(id, catalog.ErrDescriptorNotFound) + } schema, ok := desc.(*schemadesc.Immutable) if !ok { - return nil, errors.AssertionFailedf("%q is not a schema", desc.GetName()) + return nil, catalog.WrapSchemaDescRefErr(id, catalog.NewDescriptorTypeError(desc)) } res[i] = schema } diff --git a/pkg/sql/catalog/dbdesc/BUILD.bazel b/pkg/sql/catalog/dbdesc/BUILD.bazel index 7a8dfb7fb5ec..4e0524459611 100644 --- a/pkg/sql/catalog/dbdesc/BUILD.bazel +++ b/pkg/sql/catalog/dbdesc/BUILD.bazel @@ -28,6 +28,8 @@ go_test( "//pkg/security", "//pkg/sql/catalog", "//pkg/sql/catalog/descpb", + "//pkg/sql/catalog/schemadesc", + "//pkg/sql/catalog/typedesc", "//pkg/sql/parser", "//pkg/sql/sem/tree", "//pkg/util/leaktest", diff --git a/pkg/sql/catalog/dbdesc/database_desc.go b/pkg/sql/catalog/dbdesc/database_desc.go index 1d93197052e3..c919cd31919d 100644 --- a/pkg/sql/catalog/dbdesc/database_desc.go +++ b/pkg/sql/catalog/dbdesc/database_desc.go @@ -13,7 +13,6 @@ package dbdesc import ( - "context" "fmt" "github.com/cockroachdb/cockroach/pkg/keys" @@ -107,9 +106,7 @@ func NewImmutable(desc descpb.DatabaseDescriptor) *Immutable { // a nil cluster version. This is for a database that is created in the same // transaction. func NewCreatedMutable(desc descpb.DatabaseDescriptor) *Mutable { - return &Mutable{ - Immutable: makeImmutable(desc), - } + return &Mutable{Immutable: makeImmutable(desc)} } // NewExistingMutable returns a Mutable from the given database descriptor with @@ -271,55 +268,115 @@ func (desc *Immutable) ForEachSchemaInfo( // ValidateSelf validates that the database descriptor is well formed. // Checks include validate the database name, and verifying that there // is at least one read and write user. -func (desc *Immutable) ValidateSelf(_ context.Context) error { - if err := catalog.ValidateName(desc.GetName(), "descriptor"); err != nil { - return err - } - if desc.GetID() == 0 { - return fmt.Errorf("invalid database ID %d", desc.GetID()) +func (desc *Immutable) ValidateSelf(vea catalog.ValidationErrorAccumulator) { + // Validate local properties of the descriptor. + vea.Report(catalog.ValidateName(desc.GetName(), "descriptor")) + if desc.GetID() == descpb.InvalidID { + vea.Report(fmt.Errorf("invalid database ID %d", desc.GetID())) } + // Fill in any incorrect privileges that may have been missed due to mixed-versions. + // TODO(mberhault): remove this in 2.1 (maybe 2.2) when privilege-fixing migrations have been + // run again and mixed-version clusters always write "good" descriptors. + descpb.MaybeFixPrivileges(desc.ID, &desc.Privileges) + + // Validate the privilege descriptor. + vea.Report(desc.Privileges.Validate(desc.GetID(), privilege.Database)) + if desc.IsMultiRegion() { - // Ensure no regions are duplicated. - regions := make(map[descpb.RegionName]struct{}) - dbRegions, err := desc.RegionNames() - if err != nil { - return err - } - for _, region := range dbRegions { - if _, seen := regions[region]; seen { - return errors.AssertionFailedf("region %q seen twice on db %d", region, desc.GetID()) - } - regions[region] = struct{}{} - } + desc.validateMultiRegion(vea) + } +} - if desc.RegionConfig.PrimaryRegion == "" { - return errors.AssertionFailedf("primary region unset on a multi-region db %d", desc.GetID()) - } +// validateMultiRegion performs checks specific to multi-region DBs. +func (desc *Immutable) validateMultiRegion(vea catalog.ValidationErrorAccumulator) { - if _, found := regions[desc.RegionConfig.PrimaryRegion]; !found { - return errors.AssertionFailedf( - "primary region not found in list of regions on db %d", desc.GetID()) + // Ensure no regions are duplicated. + regions := make(map[descpb.RegionName]struct{}) + dbRegions, err := desc.RegionNames() + if err != nil { + vea.Report(err) + return + } + + for _, region := range dbRegions { + if _, seen := regions[region]; seen { + vea.Report(errors.AssertionFailedf( + "region %q seen twice on db %d", region, desc.GetID())) } + regions[region] = struct{}{} } - // Fill in any incorrect privileges that may have been missed due to mixed-versions. - // TODO(mberhault): remove this in 2.1 (maybe 2.2) when privilege-fixing migrations have been - // run again and mixed-version clusters always write "good" descriptors. - descpb.MaybeFixPrivileges(desc.GetID(), desc.Privileges) + if desc.RegionConfig.PrimaryRegion == "" { + vea.Report(errors.AssertionFailedf( + "primary region unset on a multi-region db %d", desc.GetID())) + } else if _, found := regions[desc.RegionConfig.PrimaryRegion]; !found { + vea.Report(errors.AssertionFailedf( + "primary region not found in list of regions on db %d", desc.GetID())) + } +} - // Validate the privilege descriptor. - return desc.Privileges.Validate(desc.GetID(), privilege.Database) +// GetReferencedDescIDs returns the IDs of all descriptors referenced by +// this descriptor, including itself. +func (desc *Immutable) GetReferencedDescIDs() catalog.DescriptorIDSet { + ids := catalog.MakeDescriptorIDSet(desc.GetID()) + if id, err := desc.MultiRegionEnumID(); err == nil { + ids.Add(id) + } + for _, schema := range desc.Schemas { + ids.Add(schema.ID) + } + return ids } -// Validate punts to ValidateSelf. -func (desc *Immutable) Validate(ctx context.Context, _ catalog.DescGetter) error { - return desc.ValidateSelf(ctx) +// ValidateCrossReferences implements the catalog.Descriptor interface. +func (desc *Immutable) ValidateCrossReferences( + vea catalog.ValidationErrorAccumulator, vdg catalog.ValidationDescGetter, +) { + // Check schema references. + for schemaName, schemaInfo := range desc.Schemas { + if schemaInfo.Dropped { + continue + } + report := func(err error) { + vea.Report(errors.Wrapf(err, "schema mapping entry %q (%d)", + errors.Safe(schemaName), schemaInfo.ID)) + } + schemaDesc, err := vdg.GetSchemaDescriptor(schemaInfo.ID) + if err != nil { + report(err) + continue + } + if schemaDesc.GetName() != schemaName { + report(errors.Errorf("schema name is actually %q", errors.Safe(schemaDesc.GetName()))) + } + if schemaDesc.GetParentID() != desc.GetID() { + report(errors.Errorf("schema parentID is actually %d", schemaDesc.GetParentID())) + } + } + + // Check multi-region enum type. + if enumID, err := desc.MultiRegionEnumID(); err == nil { + report := func(err error) { + vea.Report(errors.Wrap(err, "multi-region enum")) + } + typ, err := vdg.GetTypeDescriptor(enumID) + if err != nil { + report(err) + return + } + if typ.GetParentID() != desc.GetID() { + report(errors.Errorf("parentID is actually %d", typ.GetParentID())) + } + // Further validation should be handled by the type descriptor itself. + } } -// ValidateTxnCommit punts to Validate. -func (desc *Immutable) ValidateTxnCommit(ctx context.Context, descGetter catalog.DescGetter) error { - return desc.Validate(ctx, descGetter) +// ValidateTxnCommit implements the catalog.Descriptor interface. +func (desc *Immutable) ValidateTxnCommit( + _ catalog.ValidationErrorAccumulator, _ catalog.ValidationDescGetter, +) { + // No-op. } // SchemaMeta implements the tree.SchemaMeta interface. diff --git a/pkg/sql/catalog/dbdesc/database_test.go b/pkg/sql/catalog/dbdesc/database_test.go index 4db2d2c13650..0735b79bf616 100644 --- a/pkg/sql/catalog/dbdesc/database_test.go +++ b/pkg/sql/catalog/dbdesc/database_test.go @@ -11,11 +11,15 @@ package dbdesc import ( + "context" + "fmt" "testing" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -79,3 +83,238 @@ func TestMakeDatabaseDesc(t *testing.T) { t.Fatalf("wrong number of privilege users, expected 2, got: %d", len(desc.GetPrivileges().Users)) } } + +func TestValidateDatabaseDesc(t *testing.T) { + defer leaktest.AfterTest(t)() + + testData := []struct { + err string + desc *Immutable + }{ + {`invalid database ID 0`, + NewImmutable(descpb.DatabaseDescriptor{ + Name: "db", + ID: 0, + Privileges: descpb.NewDefaultPrivilegeDescriptor(security.RootUserName()), + }), + }, + { + `region "us-east-1" seen twice on db 200`, + NewImmutable(descpb.DatabaseDescriptor{ + Name: "multi-region-db", + ID: 200, + RegionConfig: &descpb.DatabaseDescriptor_RegionConfig{ + Regions: []descpb.DatabaseDescriptor_RegionConfig_Region{ + {Name: "us-east-1"}, + {Name: "us-east-1"}, + }, + PrimaryRegion: "us-east-1", + }, + Privileges: descpb.NewDefaultPrivilegeDescriptor(security.RootUserName()), + }), + }, + { + `primary region unset on a multi-region db 200`, + NewImmutable(descpb.DatabaseDescriptor{ + Name: "multi-region-db", + ID: 200, + RegionConfig: &descpb.DatabaseDescriptor_RegionConfig{ + Regions: []descpb.DatabaseDescriptor_RegionConfig_Region{ + {Name: "us-east-1"}, + }, + }, + Privileges: descpb.NewDefaultPrivilegeDescriptor(security.RootUserName()), + }), + }, + { + `primary region not found in list of regions on db 200`, + NewImmutable(descpb.DatabaseDescriptor{ + Name: "multi-region-db", + ID: 200, + RegionConfig: &descpb.DatabaseDescriptor_RegionConfig{ + Regions: []descpb.DatabaseDescriptor_RegionConfig_Region{ + {Name: "us-east-1"}, + }, + PrimaryRegion: "us-east-2", + }, + Privileges: descpb.NewDefaultPrivilegeDescriptor(security.RootUserName()), + }), + }, + } + for i, d := range testData { + t.Run(d.err, func(t *testing.T) { + expectedErr := fmt.Sprintf("%s %q (%d): %s", d.desc.TypeName(), d.desc.GetName(), d.desc.GetID(), d.err) + if err := catalog.ValidateSelf(d.desc); err == nil { + t.Errorf("%d: expected \"%s\", but found success: %+v", i, expectedErr, d.desc) + } else if expectedErr != err.Error() { + t.Errorf("%d: expected \"%s\", but found \"%+v\"", i, expectedErr, err) + } + }) + } +} + +func TestValidateCrossDatabaseReferences(t *testing.T) { + defer leaktest.AfterTest(t)() + ctx := context.Background() + + tests := []struct { + err string + desc descpb.DatabaseDescriptor + multiRegionEnum descpb.TypeDescriptor + schemaDescs []descpb.SchemaDescriptor + }{ + { // 0 + desc: descpb.DatabaseDescriptor{ + ID: 51, + Name: "db1", + }, + }, + { // 1 + desc: descpb.DatabaseDescriptor{ + ID: 51, + Name: "db1", + Schemas: map[string]descpb.DatabaseDescriptor_SchemaInfo{ + "schema1": {ID: 52, Dropped: false}, + }, + }, + schemaDescs: []descpb.SchemaDescriptor{ + { + Name: "schema1", + ID: 52, + ParentID: 51, + }, + }, + }, + { // 2 + desc: descpb.DatabaseDescriptor{ + ID: 51, + Name: "db1", + Schemas: map[string]descpb.DatabaseDescriptor_SchemaInfo{ + "schema1": {ID: 53, Dropped: true}, + }, + }, + }, + { // 3 + err: `schema mapping entry "schema1" (500): referenced schema ID 500: descriptor not found`, + desc: descpb.DatabaseDescriptor{ + ID: 51, + Name: "db1", + Schemas: map[string]descpb.DatabaseDescriptor_SchemaInfo{ + "schema1": {ID: 500, Dropped: false}, + }, + }, + }, + { // 4 + err: `schema mapping entry "schema1" (52): schema name is actually "foo"`, + desc: descpb.DatabaseDescriptor{ + ID: 51, + Name: "db1", + Schemas: map[string]descpb.DatabaseDescriptor_SchemaInfo{ + "schema1": {ID: 52, Dropped: false}, + }, + }, + schemaDescs: []descpb.SchemaDescriptor{ + { + Name: "foo", + ID: 52, + ParentID: 51, + }, + }, + }, + { // 5 + err: `schema mapping entry "schema1" (52): schema parentID is actually 500`, + desc: descpb.DatabaseDescriptor{ + ID: 51, + Name: "db1", + Schemas: map[string]descpb.DatabaseDescriptor_SchemaInfo{ + "schema1": {ID: 52}, + }, + }, + schemaDescs: []descpb.SchemaDescriptor{ + { + Name: "schema1", + ID: 52, + ParentID: 500, + }, + }, + }, + { // 6 + err: `multi-region enum: referenced type ID 500: descriptor not found`, + desc: descpb.DatabaseDescriptor{ + ID: 51, + Name: "db1", + RegionConfig: &descpb.DatabaseDescriptor_RegionConfig{ + RegionEnumID: 500, + }, + }, + }, + { // 7 + err: `multi-region enum: parentID is actually 500`, + desc: descpb.DatabaseDescriptor{ + ID: 51, + Name: "db1", + RegionConfig: &descpb.DatabaseDescriptor_RegionConfig{ + RegionEnumID: 52, + }, + }, + multiRegionEnum: descpb.TypeDescriptor{ + ID: 52, + ParentID: 500, + }, + }, + { // 8 + err: `schema mapping entry "schema1" (53): referenced schema ID 53: descriptor is a *typedesc.Immutable: unexpected descriptor type`, + desc: descpb.DatabaseDescriptor{ + ID: 51, + Name: "db1", + Schemas: map[string]descpb.DatabaseDescriptor_SchemaInfo{ + "schema1": {ID: 53}, + }, + }, + multiRegionEnum: descpb.TypeDescriptor{ + ID: 53, + ParentID: 51, + }, + }, + { // 9 + err: `multi-region enum: referenced type ID 53: descriptor is a *schemadesc.Immutable: unexpected descriptor type`, + desc: descpb.DatabaseDescriptor{ + ID: 51, + Name: "db1", + RegionConfig: &descpb.DatabaseDescriptor_RegionConfig{ + RegionEnumID: 53, + }, + }, + schemaDescs: []descpb.SchemaDescriptor{ + { + Name: "schema1", + ID: 53, + ParentID: 51, + }, + }, + }, + } + + for i, test := range tests { + privilege := descpb.NewDefaultPrivilegeDescriptor(security.AdminRoleName()) + descs := catalog.MapDescGetter{} + test.desc.Privileges = privilege + desc := NewImmutable(test.desc) + descs[test.desc.ID] = desc + test.multiRegionEnum.Privileges = privilege + descs[test.multiRegionEnum.ID] = typedesc.NewImmutable(test.multiRegionEnum) + for _, schemaDesc := range test.schemaDescs { + schemaDesc.Privileges = privilege + descs[schemaDesc.ID] = schemadesc.NewImmutable(schemaDesc) + } + expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.TypeName(), desc.GetName(), desc.GetID(), test.err) + const validateCrossReferencesOnly = catalog.ValidationLevelSelfAndCrossReferences &^ (catalog.ValidationLevelSelfAndCrossReferences >> 1) + if err := catalog.Validate(ctx, descs, validateCrossReferencesOnly, desc).CombinedError(); err == nil { + if test.err != "" { + t.Errorf("%d: expected \"%s\", but found success: %+v", i, expectedErr, test.desc) + } + } else if expectedErr != err.Error() { + t.Errorf("%d: expected \"%s\", but found \"%s\"", i, expectedErr, err.Error()) + } + } +} diff --git a/pkg/sql/catalog/desc_getter.go b/pkg/sql/catalog/desc_getter.go index 00d550082de5..ce088fb6e354 100644 --- a/pkg/sql/catalog/desc_getter.go +++ b/pkg/sql/catalog/desc_getter.go @@ -23,44 +23,12 @@ type DescGetter interface { } // BatchDescGetter is like DescGetter but retrieves batches of descriptors, -// which for some implementation may make more sense performance-wise. +// which for some implementations may make more sense performance-wise. type BatchDescGetter interface { + DescGetter GetDescs(ctx context.Context, reqs []descpb.ID) ([]Descriptor, error) } -// GetDescs retrieves multiple descriptors using a DescGetter. -// If the latter is also a BatchDescGetter, it will delegate to its GetDescs -// method. -func GetDescs(ctx context.Context, descGetter DescGetter, reqs []descpb.ID) ([]Descriptor, error) { - if bdg, ok := descGetter.(BatchDescGetter); ok { - return bdg.GetDescs(ctx, reqs) - } - ret := make([]Descriptor, len(reqs)) - for i, id := range reqs { - desc, err := descGetter.GetDesc(ctx, id) - if err != nil { - return nil, err - } - ret[i] = desc - } - return ret, nil -} - -// GetTypeDescFromID retrieves the type descriptor for the type ID passed -// in using an existing descGetter. It returns an error if the descriptor -// doesn't exist or if it exists and is not a type descriptor. -func GetTypeDescFromID(ctx context.Context, dg DescGetter, id descpb.ID) (TypeDescriptor, error) { - desc, err := dg.GetDesc(ctx, id) - if err != nil { - return nil, err - } - typ, ok := desc.(TypeDescriptor) - if !ok { - return nil, ErrDescriptorNotFound - } - return typ, nil -} - // GetTableDescFromID retrieves the table descriptor for the table // ID passed in using an existing proto getter. Returns an error if the // descriptor doesn't exist or if it exists and is not a table. @@ -71,7 +39,7 @@ func GetTableDescFromID(ctx context.Context, dg DescGetter, id descpb.ID) (Table } table, ok := desc.(TableDescriptor) if !ok { - return nil, ErrDescriptorNotFound + return nil, WrapTableDescRefErr(id, ErrDescriptorNotFound) } return table, nil } diff --git a/pkg/sql/catalog/descpb/privilege.go b/pkg/sql/catalog/descpb/privilege.go index c128985acbf2..887dba317bae 100644 --- a/pkg/sql/catalog/descpb/privilege.go +++ b/pkg/sql/catalog/descpb/privilege.go @@ -218,7 +218,11 @@ func (p *PrivilegeDescriptor) Revoke( // perhaps it was intended only for the 2.0 release but then somehow we got // bad descriptors with bad initial permissions into later versions or we didn't // properly bake this migration in. -func MaybeFixPrivileges(id ID, p *PrivilegeDescriptor) bool { +func MaybeFixPrivileges(id ID, ptr **PrivilegeDescriptor) bool { + if *ptr == nil { + *ptr = &PrivilegeDescriptor{} + } + p := *ptr allowedPrivilegesBits := privilege.ALL.Mask() if IsReservedID(id) { // System databases and tables have custom maximum allowed privileges. diff --git a/pkg/sql/catalog/descpb/privilege_test.go b/pkg/sql/catalog/descpb/privilege_test.go index 3c66ce3fe592..8d434f27c7dd 100644 --- a/pkg/sql/catalog/descpb/privilege_test.go +++ b/pkg/sql/catalog/descpb/privilege_test.go @@ -564,7 +564,7 @@ func TestFixPrivileges(t *testing.T) { desc.Grant(u, p) } - if a, e := MaybeFixPrivileges(testCase.id, desc), testCase.modified; a != e { + if a, e := MaybeFixPrivileges(testCase.id, &desc), testCase.modified; a != e { t.Errorf("#%d: expected modified=%t, got modified=%t", num, e, a) continue } diff --git a/pkg/sql/catalog/descriptor.go b/pkg/sql/catalog/descriptor.go index fb414d7c1f50..d5ddc04aed05 100644 --- a/pkg/sql/catalog/descriptor.go +++ b/pkg/sql/catalog/descriptor.go @@ -73,14 +73,18 @@ type Descriptor interface { // DescriptorProto prepares this descriptor for serialization. DescriptorProto() *descpb.Descriptor + // GetReferencedDescIDs returns the IDs of all descriptors directly referenced + // by this descriptor, including itself. + GetReferencedDescIDs() DescriptorIDSet + // ValidateSelf checks the internal consistency of the descriptor. - ValidateSelf(ctx context.Context) error + ValidateSelf(vea ValidationErrorAccumulator) - // Validate is like ValidateSelf but with additional cross-reference checks. - Validate(ctx context.Context, descGetter DescGetter) error + // ValidateCrossReferences performs cross-reference checks. + ValidateCrossReferences(vea ValidationErrorAccumulator, vdg ValidationDescGetter) - // ValidateTxnCommit is like Validate but with additional pre-commit checks. - ValidateTxnCommit(ctx context.Context, descGetter DescGetter) error + // ValidateTxnCommit performs pre-commit checks. + ValidateTxnCommit(vea ValidationErrorAccumulator, vdg ValidationDescGetter) } // DatabaseDescriptor will eventually be called dbdesc.Descriptor. @@ -264,7 +268,7 @@ type TableDescriptor interface { GetUniqueWithoutIndexConstraints() []descpb.UniqueWithoutIndexConstraint AllActiveAndInactiveUniqueWithoutIndexConstraints() []*descpb.UniqueWithoutIndexConstraint ForeachInboundFK(f func(fk *descpb.ForeignKeyConstraint) error) error - GetConstraintInfo(ctx context.Context, dg DescGetter) (map[string]descpb.ConstraintDetail, error) + GetConstraintInfo() (map[string]descpb.ConstraintDetail, error) AllActiveAndInactiveForeignKeys() []*descpb.ForeignKeyConstraint GetInboundFKs() []descpb.ForeignKeyConstraint GetOutboundFKs() []descpb.ForeignKeyConstraint diff --git a/pkg/sql/catalog/descs/collection.go b/pkg/sql/catalog/descs/collection.go index feace87c4367..8da19c4bafd1 100644 --- a/pkg/sql/catalog/descs/collection.go +++ b/pkg/sql/catalog/descs/collection.go @@ -217,6 +217,10 @@ type Collection struct { // a mutable descriptor by name or ID when a matching synthetic descriptor // exists is illegal. syntheticDescriptors []catalog.Descriptor + + // skipValidationOnWrite should only be set to true during forced descriptor + // repairs. + skipValidationOnWrite bool } // getLeasedDescriptorByName return a leased descriptor valid for the @@ -958,6 +962,12 @@ func (tc *Collection) getDescriptorByID( ctx, txn, id, flags, mutable, false /* setTxnDeadline */) } +// SkipValidationOnWrite avoids validating uncommitted descriptors prior to +// a transaction commit. +func (tc *Collection) SkipValidationOnWrite() { + tc.skipValidationOnWrite = true +} + // getDescriptorByIDMaybeSetTxnDeadline returns a descriptor according to the // provided lookup flags. Note that flags.Required is ignored, and an error is // always returned if no descriptor with the ID exists. @@ -1077,6 +1087,16 @@ func (tc *Collection) GetMutableDescriptorByIDWithFlags( return desc.(catalog.MutableDescriptor), nil } +// GetImmutableDescriptorByID returns an immmutable implementation of the +// descriptor with the requested id. An error is returned if no descriptor exists. +// Deprecated in favor of GetMutableDescriptorByIDWithFlags. +func (tc *Collection) GetImmutableDescriptorByID( + ctx context.Context, txn *kv.Txn, id descpb.ID, flags tree.CommonLookupFlags, +) (catalog.Descriptor, error) { + log.VEventf(ctx, 2, "planner getting immutable descriptor for id %d", id) + return tc.getDescriptorByID(ctx, txn, id, flags, false /* mutable */) +} + // GetMutableSchemaByID returns a ResolvedSchema wrapping a mutable // descriptor, if applicable. RequireMutable is ignored. // Required is ignored, and an error is always returned if no descriptor with @@ -1382,8 +1402,8 @@ func (tc *Collection) WriteDescToBatch( ctx context.Context, kvTrace bool, desc catalog.MutableDescriptor, b *kv.Batch, ) error { desc.MaybeIncrementVersion() - if ValidateOnWriteEnabled.Get(&tc.settings.SV) { - if err := desc.ValidateSelf(ctx); err != nil { + if !tc.skipValidationOnWrite && ValidateOnWriteEnabled.Get(&tc.settings.SV) { + if err := catalog.ValidateSelf(desc); err != nil { return err } } @@ -1430,41 +1450,25 @@ func (tc *Collection) GetUncommittedTables() (tables []catalog.TableDescriptor) return tables } -type collectionDescGetter struct { - tc *Collection - txn *kv.Txn -} - -var _ catalog.DescGetter = collectionDescGetter{} - -func (cdg collectionDescGetter) GetDesc( - ctx context.Context, id descpb.ID, -) (catalog.Descriptor, error) { - flags := tree.CommonLookupFlags{ - Required: true, - // Include everything, we want to cast the net as wide as we can. - IncludeOffline: true, - IncludeDropped: true, - // Avoid leased descriptors, if we're leasing the previous version then this - // older version may be returned and this may cause validation to fail. - AvoidCached: true, - } - return cdg.tc.getDescriptorByID(ctx, cdg.txn, id, flags, false /* mutable */) -} - -// ValidateUncommittedDescriptors validates all uncommitted descriptors +// ValidateUncommittedDescriptors validates all uncommitted descriptors. +// Validation includes cross-reference checks. Referenced descriptors are +// read from the store unless they happen to also be part of the uncommitted +// descriptor set. We purposefully avoid using leased descriptors as those may +// be one version behind, in which case it's possible (and legitimate) that +// those are missing back-references which would cause validation to fail. func (tc *Collection) ValidateUncommittedDescriptors(ctx context.Context, txn *kv.Txn) error { - if !ValidateOnWriteEnabled.Get(&tc.settings.SV) { + if tc.skipValidationOnWrite || !ValidateOnWriteEnabled.Get(&tc.settings.SV) { return nil } - cdg := collectionDescGetter{tc: tc, txn: txn} - for i, n := 0, len(tc.uncommittedDescriptors); i < n; i++ { - desc := tc.uncommittedDescriptors[i].immutable - if err := desc.ValidateTxnCommit(ctx, cdg); err != nil { - return err - } + descs := make([]catalog.Descriptor, len(tc.uncommittedDescriptors)) + for i, ud := range tc.uncommittedDescriptors { + descs[i] = ud.immutable } - return nil + if len(descs) == 0 { + return nil + } + bdg := catalogkv.NewOneLevelUncachedDescGetter(txn, tc.codec()) + return catalog.Validate(ctx, bdg, catalog.ValidationLevelAllPreTxnCommit, descs...).CombinedError() } // User defined type accessors. diff --git a/pkg/sql/catalog/descs/collection_test.go b/pkg/sql/catalog/descs/collection_test.go index 5c60529d742e..7b437a315e3f 100644 --- a/pkg/sql/catalog/descs/collection_test.go +++ b/pkg/sql/catalog/descs/collection_test.go @@ -229,7 +229,7 @@ func TestAddUncommittedDescriptorAndMutableResolution(t *testing.T) { mut := db mut.MaybeIncrementVersion() - mut.Schemas["foo"] = descpb.DatabaseDescriptor_SchemaInfo{ID: 2} + mut.Schemas["foo"] = descpb.DatabaseDescriptor_SchemaInfo{ID: 2, Dropped: true} flags.RequireMutable = false diff --git a/pkg/sql/catalog/errors.go b/pkg/sql/catalog/errors.go index c1c13198ea3d..67b720f3e34e 100644 --- a/pkg/sql/catalog/errors.go +++ b/pkg/sql/catalog/errors.go @@ -11,6 +11,7 @@ package catalog import ( + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/errors" @@ -71,6 +72,88 @@ func NewInactiveDescriptorError(err error) error { return &inactiveDescriptorError{err} } -// ErrDescriptorNotFound is returned by getTableDescFromID to signal that a -// descriptor could not be found with the given id. +// ErrDescriptorNotFound is returned to signal that a descriptor could not be +// found with the given id. var ErrDescriptorNotFound = errors.New("descriptor not found") + +// ErrDescriptorWrongType is returned to signal that a descriptor was found but +// that it wasn't of the expected type. +var ErrDescriptorWrongType = errors.New("unexpected descriptor type") + +// NewDescriptorTypeError returns ErrDescriptorWrongType prefixed with +// the actual go type of the descriptor. +func NewDescriptorTypeError(desc interface{}) error { + return errors.Wrapf(ErrDescriptorWrongType, "descriptor is a %T", desc) +} + +// AsDatabaseDescriptor tries to cast desc to a DatabaseDescriptor. +// Returns an ErrDescriptorWrongType otherwise. +func AsDatabaseDescriptor(desc Descriptor) (DatabaseDescriptor, error) { + db, ok := desc.(DatabaseDescriptor) + if !ok { + if desc == nil { + return nil, NewDescriptorTypeError(desc) + } + return nil, WrapDatabaseDescRefErr(desc.GetID(), NewDescriptorTypeError(desc)) + } + return db, nil +} + +// AsSchemaDescriptor tries to cast desc to a SchemaDescriptor. +// Returns an ErrDescriptorWrongType otherwise. +func AsSchemaDescriptor(desc Descriptor) (SchemaDescriptor, error) { + schema, ok := desc.(SchemaDescriptor) + if !ok { + if desc == nil { + return nil, NewDescriptorTypeError(desc) + } + return nil, WrapSchemaDescRefErr(desc.GetID(), NewDescriptorTypeError(desc)) + } + return schema, nil +} + +// AsTableDescriptor tries to cast desc to a TableDescriptor. +// Returns an ErrDescriptorWrongType otherwise. +func AsTableDescriptor(desc Descriptor) (TableDescriptor, error) { + table, ok := desc.(TableDescriptor) + if !ok { + if desc == nil { + return nil, NewDescriptorTypeError(desc) + } + return nil, WrapTableDescRefErr(desc.GetID(), NewDescriptorTypeError(desc)) + } + return table, nil +} + +// AsTypeDescriptor tries to cast desc to a TypeDescriptor. +// Returns an ErrDescriptorWrongType otherwise. +func AsTypeDescriptor(desc Descriptor) (TypeDescriptor, error) { + typ, ok := desc.(TypeDescriptor) + if !ok { + if desc == nil { + return nil, NewDescriptorTypeError(desc) + } + return nil, WrapTypeDescRefErr(desc.GetID(), NewDescriptorTypeError(desc)) + } + return typ, nil +} + +// WrapDatabaseDescRefErr wraps an error pertaining to a database descriptor id. +func WrapDatabaseDescRefErr(id descpb.ID, err error) error { + return errors.Wrapf(err, "referenced database ID %d", errors.Safe(id)) +} + +// WrapSchemaDescRefErr wraps an error pertaining to a schema descriptor id. +func WrapSchemaDescRefErr(id descpb.ID, err error) error { + return errors.Wrapf(err, "referenced schema ID %d", errors.Safe(id)) +} + +// WrapTableDescRefErr wraps an error pertaining to a table descriptor id. +func WrapTableDescRefErr(id descpb.ID, err error) error { + return errors.Wrapf(err, "referenced table ID %d", errors.Safe(id)) +} + +// WrapTypeDescRefErr wraps an error pertaining to a type descriptor id. +func WrapTypeDescRefErr(id descpb.ID, err error) error { + return errors.Wrapf(err, "referenced type ID %d", errors.Safe(id)) +} diff --git a/pkg/sql/catalog/schemadesc/BUILD.bazel b/pkg/sql/catalog/schemadesc/BUILD.bazel index e730a535b450..b85ff078598b 100644 --- a/pkg/sql/catalog/schemadesc/BUILD.bazel +++ b/pkg/sql/catalog/schemadesc/BUILD.bazel @@ -26,8 +26,11 @@ go_test( srcs = ["schema_desc_test.go"], deps = [ ":schemadesc", + "//pkg/security", "//pkg/sql/catalog", + "//pkg/sql/catalog/dbdesc", "//pkg/sql/catalog/descpb", + "//pkg/util/leaktest", "@com_github_cockroachdb_redact//:redact", "@com_github_stretchr_testify//require", "@in_gopkg_yaml_v2//:yaml_v2", diff --git a/pkg/sql/catalog/schemadesc/schema_desc.go b/pkg/sql/catalog/schemadesc/schema_desc.go index 499599ad865b..7762092928ff 100644 --- a/pkg/sql/catalog/schemadesc/schema_desc.go +++ b/pkg/sql/catalog/schemadesc/schema_desc.go @@ -11,7 +11,6 @@ package schemadesc import ( - "context" "fmt" "strings" @@ -169,74 +168,70 @@ func (desc *Immutable) DescriptorProto() *descpb.Descriptor { } // ValidateSelf implements the catalog.Descriptor interface. -func (desc *Immutable) ValidateSelf(_ context.Context) error { - if err := catalog.ValidateName(desc.GetName(), "descriptor"); err != nil { - return err - } - if desc.GetID() == 0 { - return fmt.Errorf("invalid schema ID %d", desc.GetID()) +func (desc *Immutable) ValidateSelf(vea catalog.ValidationErrorAccumulator) { + // Validate local properties of the descriptor. + vea.Report(catalog.ValidateName(desc.GetName(), "descriptor")) + if desc.GetID() == descpb.InvalidID { + vea.Report(fmt.Errorf("invalid schema ID %d", desc.GetID())) } + // Validate the privilege descriptor. - return desc.Privileges.Validate(desc.GetID(), privilege.Schema) + vea.Report(desc.Privileges.Validate(desc.GetID(), privilege.Schema)) } -// Validate implements the catalog.Descriptor interface. -func (desc *Immutable) Validate(ctx context.Context, descGetter catalog.DescGetter) error { - if err := desc.ValidateSelf(ctx); err != nil { - return err - } - // Don't validate cross-references for dropped schemas. - if desc.Dropped() || descGetter == nil { - return nil - } +// GetReferencedDescIDs returns the IDs of all descriptors referenced by +// this descriptor, including itself. +func (desc *Immutable) GetReferencedDescIDs() catalog.DescriptorIDSet { + return catalog.MakeDescriptorIDSet(desc.GetID(), desc.GetParentID()) +} +// ValidateCrossReferences implements the catalog.Descriptor interface. +func (desc *Immutable) ValidateCrossReferences( + vea catalog.ValidationErrorAccumulator, vdg catalog.ValidationDescGetter, +) { // Check schema parent reference. - foundDesc, err := descGetter.GetDesc(ctx, desc.GetParentID()) + db, err := vdg.GetDatabaseDescriptor(desc.GetParentID()) if err != nil { - return err - } - db, isDB := foundDesc.(catalog.DatabaseDescriptor) - if !isDB { - return errors.AssertionFailedf("parent database ID %d does not exist", errors.Safe(desc.GetParentID())) + vea.Report(err) + return } // Check that parent has correct entry in schemas mapping. isInDBSchemas := false - err = db.ForEachSchemaInfo(func(id descpb.ID, name string, isDropped bool) error { + _ = db.ForEachSchemaInfo(func(id descpb.ID, name string, isDropped bool) error { if id == desc.GetID() { if isDropped { if name == desc.GetName() { - return errors.AssertionFailedf("present in parent database [%d] schemas mapping but marked as dropped", - errors.Safe(desc.GetParentID())) + vea.Report(errors.AssertionFailedf("present in parent database [%d] schemas mapping but marked as dropped", + desc.GetParentID())) } return nil } if name != desc.GetName() { - return errors.AssertionFailedf("present in parent database [%d] schemas mapping but under name %q", - errors.Safe(desc.GetParentID()), errors.Safe(name)) + vea.Report(errors.AssertionFailedf("present in parent database [%d] schemas mapping but under name %q", + desc.GetParentID(), errors.Safe(name))) + return nil } isInDBSchemas = true return nil } - if !isDropped && name == desc.GetName() { - return errors.AssertionFailedf("present in parent database [%d] schemas mapping but name maps to other schema [%d]", - errors.Safe(desc.GetParentID()), errors.Safe(id)) + if name == desc.GetName() && !isDropped { + vea.Report(errors.AssertionFailedf("present in parent database [%d] schemas mapping but name maps to other schema [%d]", + desc.GetParentID(), id)) } return nil }) - if err != nil { - return err - } if !isInDBSchemas { - return errors.AssertionFailedf("not present in parent database [%d] schemas mapping", - errors.Safe(desc.GetParentID())) + vea.Report(errors.AssertionFailedf("not present in parent database [%d] schemas mapping", + desc.GetParentID())) } - return nil } -// ValidateTxnCommit punts to Validate. -func (desc *Immutable) ValidateTxnCommit(ctx context.Context, descGetter catalog.DescGetter) error { - return desc.Validate(ctx, descGetter) +// ValidateTxnCommit implements the catalog.Descriptor interface. +func (desc *Immutable) ValidateTxnCommit( + _ catalog.ValidationErrorAccumulator, _ catalog.ValidationDescGetter, +) { + // No-op. } // NameResolutionResult implements the ObjectDescriptor interface. diff --git a/pkg/sql/catalog/schemadesc/schema_desc_test.go b/pkg/sql/catalog/schemadesc/schema_desc_test.go index 23fc0c66bea3..1987ea17ddc8 100644 --- a/pkg/sql/catalog/schemadesc/schema_desc_test.go +++ b/pkg/sql/catalog/schemadesc/schema_desc_test.go @@ -11,11 +11,16 @@ package schemadesc_test import ( + "context" + "fmt" "testing" + "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/redact" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -57,3 +62,122 @@ func TestSafeMessage(t *testing.T) { }) } } + +func TestValidateCrossSchemaReferences(t *testing.T) { + defer leaktest.AfterTest(t)() + ctx := context.Background() + + tests := []struct { + err string + desc descpb.SchemaDescriptor + dbDesc descpb.DatabaseDescriptor + }{ + { // 0 + desc: descpb.SchemaDescriptor{ + ID: 52, + ParentID: 51, + Name: "schema1", + }, + dbDesc: descpb.DatabaseDescriptor{ + ID: 51, + Schemas: map[string]descpb.DatabaseDescriptor_SchemaInfo{ + "schema1": {ID: 52}, + }, + }, + }, + { // 1 + err: `referenced database ID 500: descriptor not found`, + desc: descpb.SchemaDescriptor{ + ID: 52, + ParentID: 500, + Name: "schema1", + }, + }, + { // 2 + err: `not present in parent database [51] schemas mapping`, + desc: descpb.SchemaDescriptor{ + ID: 52, + ParentID: 51, + Name: "schema1", + }, + dbDesc: descpb.DatabaseDescriptor{ + ID: 51, + }, + }, + { // 2 + err: `not present in parent database [51] schemas mapping`, + desc: descpb.SchemaDescriptor{ + ID: 52, + ParentID: 51, + Name: "schema1", + }, + dbDesc: descpb.DatabaseDescriptor{ + ID: 51, + Schemas: map[string]descpb.DatabaseDescriptor_SchemaInfo{ + "foo": {ID: 52, Dropped: true}, + }, + }, + }, + { // 3 + err: `present in parent database [51] schemas mapping but marked as dropped`, + desc: descpb.SchemaDescriptor{ + ID: 52, + ParentID: 51, + Name: "schema1", + }, + dbDesc: descpb.DatabaseDescriptor{ + ID: 51, + Schemas: map[string]descpb.DatabaseDescriptor_SchemaInfo{ + "schema1": {ID: 52, Dropped: true}, + }, + }, + }, + { // 4 + err: `present in parent database [51] schemas mapping but under name "bad"`, + desc: descpb.SchemaDescriptor{ + ID: 52, + ParentID: 51, + Name: "schema1", + }, + dbDesc: descpb.DatabaseDescriptor{ + ID: 51, + Schemas: map[string]descpb.DatabaseDescriptor_SchemaInfo{ + "bad": {ID: 52}, + }, + }, + }, + { // 5 + err: `present in parent database [51] schemas mapping but name maps to other schema [500]`, + desc: descpb.SchemaDescriptor{ + ID: 52, + ParentID: 51, + Name: "schema1", + }, + dbDesc: descpb.DatabaseDescriptor{ + ID: 51, + Schemas: map[string]descpb.DatabaseDescriptor_SchemaInfo{ + "schema1": {ID: 500}, + }, + }, + }, + } + + for i, test := range tests { + privilege := descpb.NewDefaultPrivilegeDescriptor(security.AdminRoleName()) + descs := catalog.MapDescGetter{} + test.desc.Privileges = privilege + desc := schemadesc.NewImmutable(test.desc) + descs[test.desc.ID] = desc + test.dbDesc.Privileges = privilege + descs[test.dbDesc.ID] = dbdesc.NewImmutable(test.dbDesc) + expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.TypeName(), desc.GetName(), desc.GetID(), test.err) + const validateCrossReferencesOnly = catalog.ValidationLevelSelfAndCrossReferences &^ (catalog.ValidationLevelSelfAndCrossReferences >> 1) + if err := catalog.Validate(ctx, descs, validateCrossReferencesOnly, desc).CombinedError(); err == nil { + if test.err != "" { + t.Errorf("%d: expected \"%s\", but found success: %+v", i, expectedErr, test.desc) + } + } else if expectedErr != err.Error() { + t.Errorf("%d: expected \"%s\", but found \"%s\"", i, expectedErr, err.Error()) + } + } +} diff --git a/pkg/sql/catalog/tabledesc/BUILD.bazel b/pkg/sql/catalog/tabledesc/BUILD.bazel index 1d3039820d37..484565973824 100644 --- a/pkg/sql/catalog/tabledesc/BUILD.bazel +++ b/pkg/sql/catalog/tabledesc/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "structured.go", "table.go", "table_desc.go", + "validate.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc", visibility = ["//visibility:public"], diff --git a/pkg/sql/catalog/tabledesc/helpers_test.go b/pkg/sql/catalog/tabledesc/helpers_test.go index ccc39c92522c..9b1eac6fb624 100644 --- a/pkg/sql/catalog/tabledesc/helpers_test.go +++ b/pkg/sql/catalog/tabledesc/helpers_test.go @@ -11,30 +11,10 @@ package tabledesc import ( - "context" - "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/errors" ) -func ValidateTable(ctx context.Context, immI catalog.TableDescriptor) error { - imm, ok := immI.(*immutable) - if !ok { - return errors.Errorf("expected immutable descriptor") - } - return imm.ValidateSelf(ctx) -} - -func ValidateCrossReferences( - ctx context.Context, dg catalog.DescGetter, immI catalog.TableDescriptor, -) error { - imm, ok := immI.(*immutable) - if !ok { - return errors.Errorf("expected immutable descriptor") - } - return imm.validateCrossReferences(ctx, dg) -} - func ValidatePartitioning(immI catalog.TableDescriptor) error { imm, ok := immI.(*immutable) if !ok { diff --git a/pkg/sql/catalog/tabledesc/safe_format_test.go b/pkg/sql/catalog/tabledesc/safe_format_test.go index b83870f25d07..94a7a4d6046d 100644 --- a/pkg/sql/catalog/tabledesc/safe_format_test.go +++ b/pkg/sql/catalog/tabledesc/safe_format_test.go @@ -14,6 +14,7 @@ import ( "context" "testing" + "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -38,11 +39,11 @@ func TestSafeMessage(t *testing.T) { exp string }{ { - id: 12, + id: 112, parentID: 21, schema: "CREATE TABLE foo ()", exp: "tabledesc.Mutable: {" + - "ID: 12, Version: 1, IsUncommitted: true, " + + "ID: 112, Version: 1, IsUncommitted: true, " + "ModificationTime: \"0,0\", " + "ParentID: 21, ParentSchemaID: 29, " + "State: PUBLIC, " + @@ -56,11 +57,11 @@ func TestSafeMessage(t *testing.T) { "}", }, { - id: 12, + id: 112, parentID: 21, schema: "CREATE TABLE foo (i INT PRIMARY KEY, j INT, j_str STRING AS (j::STRING) STORED, INDEX (j_str))", exp: `tabledesc.immutable: {` + - `ID: 12, Version: 1, ModificationTime: "1.000000000,0", ` + + `ID: 112, Version: 1, ModificationTime: "1.000000000,0", ` + `ParentID: 21, ParentSchemaID: 29, State: PUBLIC, ` + `NextColumnID: 6, ` + `Columns: [` + @@ -76,7 +77,7 @@ func TestSafeMessage(t *testing.T) { `{MutationID: 3, JobID: 1234}` + `], ` + `Mutations: [` + - `{MutationID: 1, Direction: ADD, State: DELETE_AND_WRITE_ONLY, ConstraintType: FOREIGN_KEY, ForeignKey: {OriginTableID: 12, OriginColumns: [2], ReferencedTableID: 2, ReferencedColumnIDs: [3], Validity: Unvalidated, State: ADD, MutationID: 1}}, ` + + `{MutationID: 1, Direction: ADD, State: DELETE_AND_WRITE_ONLY, ConstraintType: FOREIGN_KEY, ForeignKey: {OriginTableID: 112, OriginColumns: [2], ReferencedTableID: 2, ReferencedColumnIDs: [3], Validity: Unvalidated, State: ADD, MutationID: 1}}, ` + `{MutationID: 2, Direction: ADD, State: DELETE_ONLY, Column: {ID: 5, TypeID: 20, Null: false, State: ADD, MutationID: 2}}, ` + `{MutationID: 3, Direction: ADD, State: DELETE_ONLY, ConstraintType: CHECK, NotNullColumn: 2, Check: {Columns: [2], Validity: Unvalidated, State: ADD, MutationID: 3}}, ` + `{MutationID: 3, Direction: ADD, State: DELETE_ONLY, Index: {ID: 3, Unique: false, Columns: [{ID: 3, Dir: ASC}, {ID: 2, Dir: DESC}], ExtraColumns: [1], StoreColumns: [5], State: ADD, MutationID: 3}}` + @@ -91,13 +92,13 @@ func TestSafeMessage(t *testing.T) { `{Columns: [2], Validity: Validated}` + `], ` + `Unique Without Index Constraints: [` + - `{TableID: 12, Columns: [2], Validity: Validated}` + + `{TableID: 112, Columns: [2], Validity: Validated}` + `], ` + `InboundFKs: [` + - `{OriginTableID: 2, OriginColumns: [3], ReferencedTableID: 12, ReferencedColumnIDs: [2], Validity: Validated}` + + `{OriginTableID: 2, OriginColumns: [3], ReferencedTableID: 112, ReferencedColumnIDs: [2], Validity: Validated}` + `], ` + `OutboundFKs: [` + - `{OriginTableID: 12, OriginColumns: [2], ReferencedTableID: 3, ReferencedColumnIDs: [1], Validity: Validated}` + + `{OriginTableID: 112, OriginColumns: [2], ReferencedTableID: 3, ReferencedColumnIDs: [1], Validity: Validated}` + `]}`, f: func(mutable *tabledesc.Mutable) catalog.TableDescriptor { // Add check constraints, unique without index constraints, foreign key @@ -111,7 +112,7 @@ func TestSafeMessage(t *testing.T) { mutable.UniqueWithoutIndexConstraints = append( mutable.UniqueWithoutIndexConstraints, descpb.UniqueWithoutIndexConstraint{ Name: "unique", - TableID: 12, + TableID: 112, Validity: descpb.ConstraintValidity_Validated, ColumnIDs: []descpb.ColumnID{2}, }, @@ -121,14 +122,14 @@ func TestSafeMessage(t *testing.T) { OriginTableID: 2, OriginColumnIDs: []descpb.ColumnID{3}, ReferencedColumnIDs: []descpb.ColumnID{2}, - ReferencedTableID: 12, + ReferencedTableID: 112, Validity: descpb.ConstraintValidity_Validated, OnDelete: descpb.ForeignKeyReference_CASCADE, Match: descpb.ForeignKeyReference_PARTIAL, }) mutable.OutboundFKs = append(mutable.OutboundFKs, descpb.ForeignKeyConstraint{ Name: "outbound_fk", - OriginTableID: 12, + OriginTableID: 112, OriginColumnIDs: []descpb.ColumnID{2}, ReferencedColumnIDs: []descpb.ColumnID{1}, ReferencedTableID: 3, @@ -145,7 +146,7 @@ func TestSafeMessage(t *testing.T) { Name: "outbound_fk_mutation", ForeignKey: descpb.ForeignKeyConstraint{ Name: "outbound_fk_mutation", - OriginTableID: 12, + OriginTableID: 112, OriginColumnIDs: []descpb.ColumnID{2}, ReferencedTableID: 2, ReferencedColumnIDs: []descpb.ColumnID{3}, @@ -233,11 +234,11 @@ func TestSafeMessage(t *testing.T) { }, }, { - id: 12, + id: 112, parentID: 21, schema: "CREATE TABLE foo ()", exp: "tabledesc.immutable: {" + - "ID: 12, Version: 1, " + + "ID: 112, Version: 1, " + "ModificationTime: \"0,0\", " + "ParentID: 21, ParentSchemaID: 29, " + "State: PUBLIC, " + @@ -256,7 +257,7 @@ func TestSafeMessage(t *testing.T) { }, } { t.Run("", func(t *testing.T) { - desc, err := sql.CreateTestTableDescriptor(ctx, tc.parentID, tc.id, tc.schema, &descpb.PrivilegeDescriptor{}) + desc, err := sql.CreateTestTableDescriptor(ctx, tc.parentID, tc.id, tc.schema, descpb.NewDefaultPrivilegeDescriptor(security.RootUserName())) require.NoError(t, err) var td catalog.TableDescriptor if tc.f != nil { @@ -265,7 +266,7 @@ func TestSafeMessage(t *testing.T) { td = desc } redacted := string(redact.Sprint(td).Redact()) - require.NoError(t, desc.ValidateSelf(ctx)) + require.NoError(t, catalog.ValidateSelf(desc)) require.Equal(t, tc.exp, redacted) var m map[string]interface{} require.NoError(t, yaml.UnmarshalStrict([]byte(redacted), &m), redacted) diff --git a/pkg/sql/catalog/tabledesc/structured.go b/pkg/sql/catalog/tabledesc/structured.go index f94c38234c33..63bcdac68872 100644 --- a/pkg/sql/catalog/tabledesc/structured.go +++ b/pkg/sql/catalog/tabledesc/structured.go @@ -22,20 +22,16 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" - "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" "github.com/cockroachdb/cockroach/pkg/util/hlc" - "github.com/cockroachdb/cockroach/pkg/util/interval" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" "github.com/lib/pq/oid" @@ -75,16 +71,18 @@ var ErrIndexGCMutationsList = errors.New("index in GC mutations list") // given TableDescriptor with the cluster version being the zero table. This // is for a table that is created in the transaction. func NewCreatedMutable(tbl descpb.TableDescriptor) *Mutable { - return &Mutable{wrapper: wrapper{TableDescriptor: tbl}} + m, _ := NewFilledInExistingMutable(context.TODO(), nil /* DescGetter */, false /* skipFKsWithMissingTable */, &tbl) + return &Mutable{wrapper: m.wrapper} } // NewExistingMutable returns a Mutable from the // given TableDescriptor with the cluster version also set to the descriptor. // This is for an existing table. func NewExistingMutable(tbl descpb.TableDescriptor) *Mutable { - m := NewCreatedMutable(tbl) - m.ClusterVersion = tbl - return m + return &Mutable{ + wrapper: wrapper{TableDescriptor: tbl}, + ClusterVersion: tbl, + } } // NewFilledInExistingMutable will construct a Mutable and potentially perform @@ -422,7 +420,11 @@ func maybeFillInDescriptor( skipFKsWithNoMatchingTable bool, ) (changes PostDeserializationTableDescriptorChanges, err error) { changes.UpgradedFormatVersion = maybeUpgradeFormatVersion(desc) - changes.FixedPrivileges = descpb.MaybeFixPrivileges(desc.ID, desc.Privileges) + + // Fill in any incorrect privileges that may have been missed due to mixed-versions. + // TODO(mberhault): remove this in 2.1 (maybe 2.2) when privilege-fixing migrations have been + // run again and mixed-version clusters always write "good" descriptors. + changes.FixedPrivileges = descpb.MaybeFixPrivileges(desc.ID, &desc.Privileges) if dg != nil { changes.UpgradedForeignKeyRepresentation, err = maybeUpgradeForeignKeyRepresentation( @@ -945,7 +947,7 @@ func (desc *Mutable) AllocateIDs(ctx context.Context) error { if desc.ID == 0 { desc.ID = keys.MinUserDescID } - err := desc.ValidateSelf(ctx) + err := catalog.ValidateSelf(desc) desc.ID = savedID return err } @@ -1252,387 +1254,6 @@ func (desc *Mutable) OriginalVersion() descpb.DescriptorVersion { return desc.ClusterVersion.Version } -type testingDescriptorValidation bool - -// PerformTestingDescriptorValidation can be set as a value on a context to -// ensure testing specific descriptor validation happens. -var PerformTestingDescriptorValidation testingDescriptorValidation = true - -// Validate performs ValidateSelf and then validates that -// each reference to another table is resolvable and that the necessary back -// references exist. -func (desc *wrapper) Validate(ctx context.Context, descGetter catalog.DescGetter) error { - if err := desc.ValidateSelf(ctx); err != nil { - return err - } - if desc.Dropped() || descGetter == nil { - return nil - } - - return errors.Wrapf(desc.validateCrossReferences(ctx, descGetter), "desc %d", desc.GetID()) -} - -// ValidateTxnCommit performs Validate and then performs additional -// pre-transaction-commit checks. -func (desc *wrapper) ValidateTxnCommit(ctx context.Context, descGetter catalog.DescGetter) error { - if err := desc.Validate(ctx, descGetter); err != nil { - return err - } - if desc.Dropped() { - return nil - } - // Pre-transaction commit table validations. - - // Check that primary key exists. - if !desc.HasPrimaryKey() { - return unimplemented.NewWithIssuef(48026, - "primary key of table %s dropped without subsequent addition of new primary key", - desc.GetName()) - } - - return nil -} - -// validateTableIfTesting is similar to validateTable, except it is only invoked -// when the context has the `PerformTestingDescriptorValidation` value set on it -// (dictated by ExecutorTestingKnobs). Any cross descriptor validations that may -// fail in the wild due to known bugs that have now been fixed should be added -// here instead of validateCrossReferences. -func (desc *wrapper) validateTableIfTesting(ctx context.Context) error { - if isTesting := ctx.Value(PerformTestingDescriptorValidation); isTesting == nil { - return nil - } - // TODO(arul): Fill this with testing only table validation - return nil -} - -// validateCrossReferencesIfTesting is similar to validateCrossReferences, -// except it is only invoked when the context has the -// `PerformTestingDescriptorValidation` value set on it -// (dictated by ExecutorTestingKnobs). Any cross reference descriptor validation -// that may fail in the wild due to known bugs that have now been fixed should -// be added here instead of validateCrossReferences. -func (desc *wrapper) validateCrossReferencesIfTesting( - ctx context.Context, _ catalog.DescGetter, -) error { - if isTesting := ctx.Value(PerformTestingDescriptorValidation); isTesting == nil { - return nil - } - // TODO(arul): Fill this with testing only cross reference validation - return nil -} - -// validateCrossReferences validates that each reference to another table is -// resolvable and that the necessary back references exist. -func (desc *wrapper) validateCrossReferences(ctx context.Context, dg catalog.DescGetter) error { - { - // Check that parent DB exists. - dbDesc, err := dg.GetDesc(ctx, desc.ParentID) - if err != nil { - return err - } - _, isDB := dbDesc.(catalog.DatabaseDescriptor) - - if !isDB { - return errors.AssertionFailedf("parentID %d does not exist", errors.Safe(desc.ParentID)) - } - - if err := desc.ValidateTableLocalityConfig(ctx, dg); err != nil { - return errors.AssertionFailedf("invalid locality config: %v", errors.Safe(err)) - } - } - - tablesByID := map[descpb.ID]catalog.TableDescriptor{desc.ID: desc} - getTable := func(id descpb.ID) (catalog.TableDescriptor, error) { - if table, ok := tablesByID[id]; ok { - return table, nil - } - table, err := catalog.GetTableDescFromID(ctx, dg, id) - if err != nil { - return nil, err - } - tablesByID[id] = table - return table, nil - } - - findTargetIndex := func(tableID descpb.ID, indexID descpb.IndexID) (catalog.TableDescriptor, *descpb.IndexDescriptor, error) { - targetTable, err := getTable(tableID) - if err != nil { - return nil, nil, errors.Wrapf(err, - "missing table=%d index=%d", errors.Safe(tableID), errors.Safe(indexID)) - } - targetIndex, err := targetTable.FindIndexWithID(indexID) - if err != nil { - return nil, nil, errors.Wrapf(err, - "missing table=%s index=%d", targetTable.GetName(), errors.Safe(indexID)) - } - return targetTable, targetIndex.IndexDesc(), nil - } - - // Check foreign keys. - for i := range desc.OutboundFKs { - fk := &desc.OutboundFKs[i] - referencedTable, err := getTable(fk.ReferencedTableID) - if err != nil { - return errors.Wrapf(err, - "invalid foreign key: missing table=%d", errors.Safe(fk.ReferencedTableID)) - } - found := false - _ = referencedTable.ForeachInboundFK(func(backref *descpb.ForeignKeyConstraint) error { - if !found && backref.OriginTableID == desc.ID && backref.Name == fk.Name { - found = true - } - return nil - }) - if found { - continue - } - // In 20.2 we introduced a bug where we fail to upgrade the FK references - // on the referenced descriptors from their pre-19.2 format when reading - // them during validation (#57032). So we account for the possibility of - // un-upgraded foreign key references on the other table. This logic - // somewhat parallels the logic in maybeUpgradeForeignKeyRepOnIndex. - unupgradedFKsPresent := false - if err := catalog.ForEachIndex(referencedTable, catalog.IndexOpts{}, func(referencedIdx catalog.Index) error { - if found { - // TODO (lucy): If we ever revisit the tabledesc.immutable methods, add - // a way to break out of the index loop. - return nil - } - if len(referencedIdx.IndexDesc().ReferencedBy) > 0 { - unupgradedFKsPresent = true - } else { - return nil - } - // Determine whether the index on the other table is a unique index that - // could support this FK constraint. - if !referencedIdx.IsValidReferencedUniqueConstraint(fk.ReferencedColumnIDs) { - return nil - } - // Now check the backreferences. Backreferences in ReferencedBy only had - // Index and Table populated. - for i := range referencedIdx.IndexDesc().ReferencedBy { - backref := &referencedIdx.IndexDesc().ReferencedBy[i] - if backref.Table != desc.ID { - continue - } - // Look up the index that the un-upgraded reference refers to and - // see if that index could support the foreign key reference. (Note - // that it shouldn't be possible for this index to not exist. See - // planner.MaybeUpgradeDependentOldForeignKeyVersionTables, which is - // called from the drop index implementation.) - originalOriginIndex, err := desc.FindIndexWithID(backref.Index) - if err != nil { - return errors.AssertionFailedf( - "missing index %d on %q from pre-19.2 foreign key "+ - "backreference %q on %q", - backref.Index, desc.Name, fk.Name, referencedTable.GetName(), - ) - } - if originalOriginIndex.IsValidOriginIndex(fk.OriginColumnIDs) { - found = true - break - } - } - return nil - }); err != nil { - return err - } - if found { - continue - } - if unupgradedFKsPresent { - return errors.AssertionFailedf("missing fk back reference %q to %q "+ - "from %q (un-upgraded foreign key references present)", - fk.Name, desc.Name, referencedTable.GetName()) - } - return errors.AssertionFailedf("missing fk back reference %q to %q from %q", - fk.Name, desc.Name, referencedTable.GetName()) - } - for i := range desc.InboundFKs { - backref := &desc.InboundFKs[i] - originTable, err := getTable(backref.OriginTableID) - if err != nil { - return errors.Wrapf(err, - "invalid foreign key backreference: missing table=%d", errors.Safe(backref.OriginTableID)) - } - found := false - _ = originTable.ForeachOutboundFK(func(fk *descpb.ForeignKeyConstraint) error { - if !found && fk.ReferencedTableID == desc.ID && fk.Name == backref.Name { - found = true - } - return nil - }) - if found { - continue - } - // In 20.2 we introduced a bug where we fail to upgrade the FK references - // on the referenced descriptors from their pre-19.2 format when reading - // them during validation (#57032). So we account for the possibility of - // un-upgraded foreign key references on the other table. This logic - // somewhat parallels the logic in maybeUpgradeForeignKeyRepOnIndex. - unupgradedFKsPresent := false - if err := catalog.ForEachIndex(originTable, catalog.IndexOpts{}, func(originIdx catalog.Index) error { - if found { - // TODO (lucy): If we ever revisit the tabledesc.immutable methods, add - // a way to break out of the index loop. - return nil - } - fk := originIdx.IndexDesc().ForeignKey - if fk.IsSet() { - unupgradedFKsPresent = true - } else { - return nil - } - // Determine whether the index on the other table is a index that could - // support this FK constraint on the referencing side. Such an index would - // have been required in earlier versions. - if !originIdx.IsValidOriginIndex(backref.OriginColumnIDs) { - return nil - } - if fk.Table != desc.ID { - return nil - } - // Look up the index that the un-upgraded reference refers to and - // see if that index could support the foreign key reference. (Note - // that it shouldn't be possible for this index to not exist. See - // planner.MaybeUpgradeDependentOldForeignKeyVersionTables, which is - // called from the drop index implementation.) - originalReferencedIndex, err := desc.FindIndexWithID(fk.Index) - if err != nil { - return errors.AssertionFailedf( - "missing index %d on %q from pre-19.2 foreign key forward reference %q on %q", - fk.Index, desc.Name, backref.Name, originTable.GetName(), - ) - } - if originalReferencedIndex.IsValidReferencedUniqueConstraint(backref.ReferencedColumnIDs) { - found = true - } - return nil - }); err != nil { - return err - } - if found { - continue - } - if unupgradedFKsPresent { - return errors.AssertionFailedf("missing fk forward reference %q to %q from %q "+ - "(un-upgraded foreign key references present)", - backref.Name, desc.Name, originTable.GetName()) - } - return errors.AssertionFailedf("missing fk forward reference %q to %q from %q", - backref.Name, desc.Name, originTable.GetName()) - } - - for _, indexI := range desc.ActiveIndexes() { - index := indexI.IndexDesc() - // Check partitioning is correctly set. - // We only check these for active indexes, as inactive indexes may be in the process - // of being backfilled without PartitionAllBy. - if desc.PartitionAllBy { - primaryIndexPartitioning := desc.PrimaryIndex.ColumnIDs[:desc.PrimaryIndex.Partitioning.NumColumns] - indexPartitioning := index.ColumnIDs[:index.Partitioning.NumColumns] - matchesPartitioning := false - if len(primaryIndexPartitioning) == len(indexPartitioning) { - matchesPartitioning = true - for i := range primaryIndexPartitioning { - if primaryIndexPartitioning[i] != indexPartitioning[i] { - matchesPartitioning = false - break - } - } - } - if !matchesPartitioning { - return errors.AssertionFailedf( - "table has PARTITION ALL BY defined, but index %s does not have matching PARTITION BY", - index.Name, - ) - } - } - } - - for _, indexI := range desc.NonDropIndexes() { - index := indexI.IndexDesc() - // Check interleaves. - if len(index.Interleave.Ancestors) > 0 { - // Only check the most recent ancestor, the rest of them don't point - // back. - ancestor := index.Interleave.Ancestors[len(index.Interleave.Ancestors)-1] - targetTable, targetIndex, err := findTargetIndex(ancestor.TableID, ancestor.IndexID) - if err != nil { - return errors.Wrapf(err, "invalid interleave") - } - found := false - for _, backref := range targetIndex.InterleavedBy { - if backref.Table == desc.ID && backref.Index == index.ID { - found = true - break - } - } - if !found { - return errors.AssertionFailedf( - "missing interleave back reference to %q@%q from %q@%q", - desc.Name, index.Name, targetTable.GetName(), targetIndex.Name) - } - } - interleaveBackrefs := make(map[descpb.ForeignKeyReference]struct{}) - for _, backref := range index.InterleavedBy { - if _, ok := interleaveBackrefs[backref]; ok { - return errors.AssertionFailedf("duplicated interleave backreference %+v", backref) - } - interleaveBackrefs[backref] = struct{}{} - targetTable, err := getTable(backref.Table) - if err != nil { - return errors.Wrapf(err, - "invalid interleave backreference table=%d index=%d", - backref.Table, backref.Index) - } - targetIndex, err := targetTable.FindIndexWithID(backref.Index) - if err != nil { - return errors.Wrapf(err, - "invalid interleave backreference table=%s index=%d", - targetTable.GetName(), backref.Index) - } - if targetIndex.NumInterleaveAncestors() == 0 { - return errors.AssertionFailedf( - "broken interleave backward reference from %q@%q to %q@%q", - desc.Name, index.Name, targetTable.GetName(), targetIndex.GetName()) - } - // The last ancestor is required to be a backreference. - ancestor := targetIndex.GetInterleaveAncestor(targetIndex.NumInterleaveAncestors() - 1) - if ancestor.TableID != desc.ID || ancestor.IndexID != index.ID { - return errors.AssertionFailedf( - "broken interleave backward reference from %q@%q to %q@%q", - desc.Name, index.Name, targetTable.GetName(), targetIndex.GetName()) - } - } - } - // TODO(dan): Also validate SharedPrefixLen in the interleaves. - - // Validate the all types present in the descriptor exist. - getType := getTypeGetter(ctx, dg) - parentDesc, err := dg.GetDesc(ctx, desc.ParentID) - if err != nil { - return err - } - dbDesc, isDB := parentDesc.(catalog.DatabaseDescriptor) - if !isDB { - return errors.AssertionFailedf("parent id %d is not a database", dbDesc.GetID()) - } - - typeIDs, err := desc.GetAllReferencedTypeIDs(dbDesc, getType) - if err != nil { - return err - } - for _, id := range typeIDs { - if _, err := getType(id); err != nil { - return err - } - } - - return desc.validateCrossReferencesIfTesting(ctx, dg) -} - // FormatTableLocalityConfig formats the table locality. func FormatTableLocalityConfig(c *descpb.TableDescriptor_LocalityConfig, f *tree.FmtCtx) error { switch v := c.Locality.(type) { @@ -1659,182 +1280,6 @@ func FormatTableLocalityConfig(c *descpb.TableDescriptor_LocalityConfig, f *tree return nil } -// ValidateTableLocalityConfig validates whether the descriptor's locality -// config is valid under the given database. -func (desc *wrapper) ValidateTableLocalityConfig(ctx context.Context, dg catalog.DescGetter) error { - dbDesc, err := dg.GetDesc(ctx, desc.ParentID) - if err != nil { - return err - } - db, isDB := dbDesc.(catalog.DatabaseDescriptor) - if !isDB { - return errors.AssertionFailedf("database %q with ID %d does not exist", - dbDesc.GetName(), errors.Safe(desc.ParentID)) - } - - if desc.LocalityConfig == nil { - if db.IsMultiRegion() { - return pgerror.Newf( - pgcode.InvalidTableDefinition, - "database %s is multi-region enabled, but table %s has no locality set", - db.DatabaseDesc().Name, - desc.Name, - ) - } - // Nothing to validate for non-multi-region databases. - return nil - } - - if !db.IsMultiRegion() { - s := tree.NewFmtCtx(tree.FmtSimple) - var locality string - // Formatting the table locality config should never fail; if it does, the - // error message is more clear if we construct a dummy locality here. - if err := FormatTableLocalityConfig(desc.LocalityConfig, s); err != nil { - locality = "INVALID LOCALITY" - } - locality = s.String() - return pgerror.Newf( - pgcode.InvalidTableDefinition, - "database %s is not multi-region enabled, but table %s has locality %s set", - db.DatabaseDesc().Name, - desc.Name, - locality, - ) - } - - regionsEnumID, err := db.MultiRegionEnumID() - if err != nil { - return err - } - regionsEnum, err := dg.GetDesc(ctx, regionsEnumID) - if err != nil { - return err - } - regionsEnumDesc, isTypeDesc := regionsEnum.(catalog.TypeDescriptor) - if !isTypeDesc { - return errors.AssertionFailedf("multi-region enum with ID %d does not exist", - errors.Safe(regionsEnumID)) - } - - // REGIONAL BY TABLE tables homed in the primary region should include a - // reference to the multi-region type descriptor and a corresponding - // backreference. All other patterns should only contain a reference if there - // is an explicit column which uses the multi-region type descriptor as its - // *types.T. While the specific cases are validated below, we search for the - // region enum ID in the references list just once, up top here. - getTypes := getTypeGetter(ctx, dg) - typeIDs, err := desc.GetAllReferencedTypeIDs(db, getTypes) - if err != nil { - return err - } - regionEnumIDReferenced := false - for _, typeID := range typeIDs { - if typeID == regionsEnumID { - regionEnumIDReferenced = true - break - } - } - columnTypesTypeIDs, err := desc.getAllReferencedTypesInTableColumns(getTypes) - if err != nil { - return err - } - switch lc := desc.LocalityConfig.Locality.(type) { - case *descpb.TableDescriptor_LocalityConfig_Global_: - if regionEnumIDReferenced { - if _, found := columnTypesTypeIDs[regionsEnumID]; !found { - return errors.AssertionFailedf( - "expected no region Enum ID to be referenced by a GLOBAL TABLE: %q"+ - " but found: %d", - desc.GetName(), - regionsEnumDesc.GetID(), - ) - } - } - case *descpb.TableDescriptor_LocalityConfig_RegionalByRow_: - if !desc.IsPartitionAllBy() { - return errors.AssertionFailedf("expected REGIONAL BY ROW table to have PartitionAllBy set") - } - case *descpb.TableDescriptor_LocalityConfig_RegionalByTable_: - - // Table is homed in an explicit (non-primary) region. - if lc.RegionalByTable.Region != nil { - foundRegion := false - regions, err := regionsEnumDesc.RegionNames() - if err != nil { - return err - } - for _, r := range regions { - if *lc.RegionalByTable.Region == r { - foundRegion = true - break - } - } - if !foundRegion { - return errors.WithHintf( - pgerror.Newf( - pgcode.InvalidTableDefinition, - `region "%s" has not been added to database "%s"`, - *lc.RegionalByTable.Region, - db.DatabaseDesc().Name, - ), - "available regions: %s", - strings.Join(regions.ToStrings(), ", "), - ) - } - if !regionEnumIDReferenced { - return errors.AssertionFailedf( - "expected multi-region enum ID %d to be referenced on REGIONAL BY TABLE: %q locality "+ - "config, but did not find it", - regionsEnumID, - desc.GetName(), - ) - } - } else { - if regionEnumIDReferenced { - // It may be the case that the multi-region type descriptor is used - // as the type of the table column. Validations should only fail if - // that is not the case. - if _, found := columnTypesTypeIDs[regionsEnumID]; !found { - return errors.AssertionFailedf( - "expected no region Enum ID to be referenced by a REGIONAL BY TABLE: %q homed in the "+ - "primary region, but found: %d", - desc.GetName(), - regionsEnumDesc.GetID(), - ) - } - } - } - default: - return pgerror.Newf( - pgcode.InvalidTableDefinition, - "unknown locality level: %T", - lc, - ) - } - return nil -} - -func getTypeGetter( - ctx context.Context, dg catalog.DescGetter, -) func(descpb.ID) (catalog.TypeDescriptor, error) { - // typeMap caches accesses to TypeDescriptors, and is wrapped by getType. - // TODO(ajwerner): generalize this to a cached implementation of the - // DescGetter. - typeMap := make(map[descpb.ID]catalog.TypeDescriptor) - return func(id descpb.ID) (catalog.TypeDescriptor, error) { - if typeDesc, ok := typeMap[id]; ok { - return typeDesc, nil - } - typeDesc, err := catalog.GetTypeDescFromID(ctx, dg, id) - if err != nil { - return nil, errors.Wrapf(err, "type ID %d in descriptor not found", id) - } - typeMap[id] = typeDesc - return typeDesc, nil - } -} - // ValidateIndexNameIsUnique validates that the index name does not exist. func (desc *wrapper) ValidateIndexNameIsUnique(indexName string) error { if catalog.FindNonDropIndex(desc, func(idx catalog.Index) bool { @@ -1845,783 +1290,20 @@ func (desc *wrapper) ValidateIndexNameIsUnique(indexName string) error { return nil } -// ValidateSelf validates that the table descriptor is well formed. Checks -// include validating the table, column and index names, verifying that column -// names and index names are unique and verifying that column IDs and index IDs -// are consistent. Use Validate to validate that cross-table references are -// correct. -// If version is supplied, the descriptor is checked for version incompatibilities. -func (desc *wrapper) ValidateSelf(ctx context.Context) error { - if err := catalog.ValidateName(desc.Name, "table"); err != nil { - return err - } - if desc.ID == 0 { - return errors.AssertionFailedf("invalid table ID %d", errors.Safe(desc.ID)) - } - - // ParentID is the ID of the database holding this table. - // It is often < ID, except when a table gets moved across databases. - if desc.ParentID == 0 && !desc.IsVirtualTable() { - return errors.AssertionFailedf("invalid parent ID %d", errors.Safe(desc.ParentID)) - } - - if desc.IsSequence() { - return nil - } - - if len(desc.Columns) == 0 { - return ErrMissingColumns - } - - columnNames := make(map[string]descpb.ColumnID, len(desc.Columns)) - columnIDs := make(map[descpb.ColumnID]*descpb.ColumnDescriptor, len(desc.Columns)) - if err := desc.validateColumns(columnNames, columnIDs); err != nil { - return err - } - - // TODO(dt, nathan): virtual descs don't validate (missing privs, PK, etc). - if desc.IsVirtualTable() { - return nil - } - - // We maintain forward compatibility, so if you see this error message with a - // version older that what this client supports, then there's a - // maybeFillInDescriptor missing from some codepath. - if v := desc.GetFormatVersion(); v != descpb.FamilyFormatVersion && v != descpb.InterleavedFormatVersion { - // TODO(dan): We're currently switching from FamilyFormatVersion to - // InterleavedFormatVersion. After a beta is released with this dual version - // support, then: - // - Upgrade the bidirectional reference version to that beta - // - Start constructing all TableDescriptors with InterleavedFormatVersion - // - Change maybeUpgradeFormatVersion to output InterleavedFormatVersion - // - Change this check to only allow InterleavedFormatVersion - return errors.AssertionFailedf( - "table %q is encoded using using version %d, but this client only supports version %d and %d", - desc.Name, errors.Safe(desc.GetFormatVersion()), - errors.Safe(descpb.FamilyFormatVersion), errors.Safe(descpb.InterleavedFormatVersion)) - } - - if err := desc.CheckUniqueConstraints(); err != nil { - return err - } - - for _, m := range desc.Mutations { - unSetEnums := m.State == descpb.DescriptorMutation_UNKNOWN || m.Direction == descpb.DescriptorMutation_NONE - switch desc := m.Descriptor_.(type) { - case *descpb.DescriptorMutation_Column: - col := desc.Column - if unSetEnums { - return errors.AssertionFailedf( - "mutation in state %s, direction %s, col %q, id %v", - errors.Safe(m.State), errors.Safe(m.Direction), col.Name, errors.Safe(col.ID)) - } - columnIDs[col.ID] = col - case *descpb.DescriptorMutation_Index: - if unSetEnums { - idx := desc.Index - return errors.AssertionFailedf( - "mutation in state %s, direction %s, index %s, id %v", - errors.Safe(m.State), errors.Safe(m.Direction), idx.Name, errors.Safe(idx.ID)) - } - case *descpb.DescriptorMutation_Constraint: - if unSetEnums { - return errors.AssertionFailedf( - "mutation in state %s, direction %s, constraint %v", - errors.Safe(m.State), errors.Safe(m.Direction), desc.Constraint.Name) - } - case *descpb.DescriptorMutation_PrimaryKeySwap: - if m.Direction == descpb.DescriptorMutation_NONE { - return errors.AssertionFailedf( - "primary key swap mutation in state %s, direction %s", errors.Safe(m.State), errors.Safe(m.Direction)) - } - case *descpb.DescriptorMutation_ComputedColumnSwap: - if m.Direction == descpb.DescriptorMutation_NONE { - return errors.AssertionFailedf( - "computed column swap mutation in state %s, direction %s", errors.Safe(m.State), errors.Safe(m.Direction)) - } - case *descpb.DescriptorMutation_MaterializedViewRefresh: - if m.Direction == descpb.DescriptorMutation_NONE { - return errors.AssertionFailedf( - "materialized view refresh mutation in state %s, direction %s", errors.Safe(m.State), errors.Safe(m.Direction)) - } - default: - return errors.AssertionFailedf( - "mutation in state %s, direction %s, and no column/index descriptor", - errors.Safe(m.State), errors.Safe(m.Direction)) - } - } - - // TODO(dt): Validate each column only appears at-most-once in any FKs. - - // Only validate column families, constraints, and indexes if this is - // actually a table, not if it's just a view. - if desc.IsPhysicalTable() { - if err := desc.validateColumnFamilies(columnIDs); err != nil { - return err - } - - if err := desc.validateCheckConstraints(columnIDs); err != nil { - return err - } - - if err := desc.validateUniqueWithoutIndexConstraints(columnIDs); err != nil { - return err - } - - if err := desc.validateTableIndexes(columnNames); err != nil { - return err - } - - if err := desc.validatePartitioning(); err != nil { - return err - } - } - - // Fill in any incorrect privileges that may have been missed due to mixed-versions. - // TODO(mberhault): remove this in 2.1 (maybe 2.2) when privilege-fixing migrations have been - // run again and mixed-version clusters always write "good" descriptors. - descpb.MaybeFixPrivileges(desc.GetID(), desc.Privileges) - - // Ensure that mutations cannot be queued if a primary key change or - // an alter column type schema change has either been started in - // this transaction, or is currently in progress. - var alterPKMutation descpb.MutationID - var alterColumnTypeMutation descpb.MutationID - var foundAlterPK bool - var foundAlterColumnType bool - - for _, m := range desc.Mutations { - // If we have seen an alter primary key mutation, then - // m we are considering right now is invalid. - if foundAlterPK { - if alterPKMutation == m.MutationID { - return unimplemented.NewWithIssue( - 45615, - "cannot perform other schema changes in the same transaction as a primary key change", - ) - } - return unimplemented.NewWithIssue( - 45615, - "cannot perform a schema change operation while a primary key change is in progress", - ) - } - if foundAlterColumnType { - if alterColumnTypeMutation == m.MutationID { - return unimplemented.NewWithIssue( - 47137, - "cannot perform other schema changes in the same transaction as an ALTER COLUMN TYPE schema change", - ) - } - return unimplemented.NewWithIssue( - 47137, - "cannot perform a schema change operation while an ALTER COLUMN TYPE schema change is in progress", - ) - } - if m.GetPrimaryKeySwap() != nil { - foundAlterPK = true - alterPKMutation = m.MutationID - } - if m.GetComputedColumnSwap() != nil { - foundAlterColumnType = true - alterColumnTypeMutation = m.MutationID - } - } - - if err := desc.validateTableIfTesting(ctx); err != nil { - return err - } - - // Validate the privilege descriptor. - return desc.Privileges.Validate(desc.GetID(), privilege.Table) -} - -func (desc *wrapper) validateColumns( - columnNames map[string]descpb.ColumnID, columnIDs map[descpb.ColumnID]*descpb.ColumnDescriptor, -) error { - for _, column := range desc.NonDropColumns() { - - if err := catalog.ValidateName(column.GetName(), "column"); err != nil { - return err - } - if column.GetID() == 0 { - return errors.AssertionFailedf("invalid column ID %d", errors.Safe(column.GetID())) - } - - if _, columnNameExists := columnNames[column.GetName()]; columnNameExists { - for i := range desc.Columns { - if desc.Columns[i].Name == column.GetName() { - return pgerror.Newf(pgcode.DuplicateColumn, - "duplicate column name: %q", column.GetName()) - } - } - return pgerror.Newf(pgcode.DuplicateColumn, - "duplicate: column %q in the middle of being added, not yet public", column.GetName()) - } - if colinfo.IsSystemColumnName(column.GetName()) { - return pgerror.Newf(pgcode.DuplicateColumn, - "column name %q conflicts with a system column name", column.GetName()) - } - columnNames[column.GetName()] = column.GetID() - - if other, ok := columnIDs[column.GetID()]; ok { - return fmt.Errorf("column %q duplicate ID of column %q: %d", - column.GetName(), other.Name, column.GetID()) - } - columnIDs[column.GetID()] = column.ColumnDesc() - - if column.GetID() >= desc.NextColumnID { - return errors.AssertionFailedf("column %q invalid ID (%d) >= next column ID (%d)", - column.GetName(), errors.Safe(column.GetID()), errors.Safe(desc.NextColumnID)) - } - - if column.IsComputed() { - // Verify that the computed column expression is valid. - expr, err := parser.ParseExpr(column.GetComputeExpr()) - if err != nil { - return err - } - valid, err := schemaexpr.HasValidColumnReferences(desc, expr) - if err != nil { - return err - } - if !valid { - return fmt.Errorf("computed column %q refers to unknown columns in expression: %s", - column.GetName(), column.GetComputeExpr()) - } - } else if column.IsVirtual() { - return fmt.Errorf("virtual column %q is not computed", column.GetName()) - } - } - return nil -} - -func (desc *wrapper) validateColumnFamilies( - columnIDs map[descpb.ColumnID]*descpb.ColumnDescriptor, -) error { - if len(desc.Families) < 1 { - return fmt.Errorf("at least 1 column family must be specified") - } - if desc.Families[0].ID != descpb.FamilyID(0) { - return fmt.Errorf("the 0th family must have ID 0") - } - - familyNames := map[string]struct{}{} - familyIDs := map[descpb.FamilyID]string{} - colIDToFamilyID := map[descpb.ColumnID]descpb.FamilyID{} - for i := range desc.Families { - family := &desc.Families[i] - if err := catalog.ValidateName(family.Name, "family"); err != nil { - return err - } - - if i != 0 { - prevFam := desc.Families[i-1] - if family.ID < prevFam.ID { - return errors.Newf( - "family %s at index %d has id %d less than family %s at index %d with id %d", - family.Name, i, family.ID, prevFam.Name, i-1, prevFam.ID) - } - } - - if _, ok := familyNames[family.Name]; ok { - return fmt.Errorf("duplicate family name: %q", family.Name) - } - familyNames[family.Name] = struct{}{} - - if other, ok := familyIDs[family.ID]; ok { - return fmt.Errorf("family %q duplicate ID of family %q: %d", - family.Name, other, family.ID) - } - familyIDs[family.ID] = family.Name - - if family.ID >= desc.NextFamilyID { - return fmt.Errorf("family %q invalid family ID (%d) > next family ID (%d)", - family.Name, family.ID, desc.NextFamilyID) - } - - if len(family.ColumnIDs) != len(family.ColumnNames) { - return fmt.Errorf("mismatched column ID size (%d) and name size (%d)", - len(family.ColumnIDs), len(family.ColumnNames)) - } - - for i, colID := range family.ColumnIDs { - col, ok := columnIDs[colID] - if !ok { - return fmt.Errorf("family %q contains unknown column \"%d\"", family.Name, colID) - } - if col.Name != family.ColumnNames[i] { - return fmt.Errorf("family %q column %d should have name %q, but found name %q", - family.Name, colID, col.Name, family.ColumnNames[i]) - } - if col.Virtual { - return fmt.Errorf("virtual computed column %q cannot be part of a family", col.Name) - } - } - - for _, colID := range family.ColumnIDs { - if famID, ok := colIDToFamilyID[colID]; ok { - return fmt.Errorf("column %d is in both family %d and %d", colID, famID, family.ID) - } - colIDToFamilyID[colID] = family.ID - } - } - for colID, colDesc := range columnIDs { - if !colDesc.Virtual { - if _, ok := colIDToFamilyID[colID]; !ok { - return fmt.Errorf("column %q is not in any column family", colDesc.Name) - } - } - } - return nil -} - -// validateCheckConstraints validates that check constraints are well formed. -// Checks include validating the column IDs and verifying that check expressions -// do not reference non-existent columns. -func (desc *wrapper) validateCheckConstraints( - columnIDs map[descpb.ColumnID]*descpb.ColumnDescriptor, -) error { - for _, chk := range desc.AllActiveAndInactiveChecks() { - // Verify that the check's column IDs are valid. - for _, colID := range chk.ColumnIDs { - _, ok := columnIDs[colID] - if !ok { - return fmt.Errorf("check constraint %q contains unknown column \"%d\"", chk.Name, colID) - } - } - - // Verify that the check's expression is valid. - expr, err := parser.ParseExpr(chk.Expr) - if err != nil { - return err - } - valid, err := schemaexpr.HasValidColumnReferences(desc, expr) - if err != nil { - return err - } - if !valid { - return fmt.Errorf("check constraint %q refers to unknown columns in expression: %s", - chk.Name, chk.Expr) - } - } - return nil -} - -// validateUniqueWithoutIndexConstraints validates that unique without index -// constraints are well formed. Checks include validating the column IDs and -// column names. -func (desc *wrapper) validateUniqueWithoutIndexConstraints( - columnIDs map[descpb.ColumnID]*descpb.ColumnDescriptor, -) error { - for _, c := range desc.AllActiveAndInactiveUniqueWithoutIndexConstraints() { - if err := catalog.ValidateName(c.Name, "unique without index constraint"); err != nil { - return err - } - - // Verify that the table ID is valid. - if c.TableID != desc.ID { - return fmt.Errorf( - "TableID mismatch for unique without index constraint %q: \"%d\" doesn't match descriptor: \"%d\"", - c.Name, c.TableID, desc.ID, - ) - } - - // Verify that the constraint's column IDs are valid and unique. - var seen util.FastIntSet - for _, colID := range c.ColumnIDs { - _, ok := columnIDs[colID] - if !ok { - return fmt.Errorf( - "unique without index constraint %q contains unknown column \"%d\"", c.Name, colID, - ) - } - if seen.Contains(int(colID)) { - return fmt.Errorf( - "unique without index constraint %q contains duplicate column \"%d\"", c.Name, colID, - ) - } - seen.Add(int(colID)) - } - - if c.IsPartial() { - expr, err := parser.ParseExpr(c.Predicate) - if err != nil { - return err - } - valid, err := schemaexpr.HasValidColumnReferences(desc, expr) - if err != nil { - return err - } - if !valid { - return fmt.Errorf( - "partial unique without index constraint %q refers to unknown columns in predicate: %s", - c.Name, - c.Predicate, - ) - } - } - } - - return nil -} - -// validateTableIndexes validates that indexes are well formed. Checks include -// validating the columns involved in the index, verifying the index names and -// IDs are unique, and the family of the primary key is 0. This does not check -// if indexes are unique (i.e. same set of columns, direction, and uniqueness) -// as there are practical uses for them. -func (desc *wrapper) validateTableIndexes(columnNames map[string]descpb.ColumnID) error { - if len(desc.PrimaryIndex.ColumnIDs) == 0 { - return ErrMissingPrimaryKey - } - - var virtualCols catalog.TableColSet - for i := range desc.Columns { - if desc.Columns[i].Virtual { - virtualCols.Add(desc.Columns[i].ID) - } - } - - // Verify that the primary index columns are not virtual. - for i, col := range desc.PrimaryIndex.ColumnIDs { - if virtualCols.Contains(col) { - return fmt.Errorf("primary index column %q cannot be virtual", desc.PrimaryIndex.ColumnNames[i]) - } - } - - indexNames := map[string]struct{}{} - indexIDs := map[descpb.IndexID]string{} - for _, indexI := range desc.NonDropIndexes() { - index := indexI.IndexDesc() - if err := catalog.ValidateName(index.Name, "index"); err != nil { - return err - } - if index.ID == 0 { - return fmt.Errorf("invalid index ID %d", index.ID) - } - - if _, indexNameExists := indexNames[index.Name]; indexNameExists { - for i := range desc.Indexes { - if desc.Indexes[i].Name == index.Name { - // This error should be caught in MakeIndexDescriptor or NewTableDesc. - return errors.HandleAsAssertionFailure(fmt.Errorf("duplicate index name: %q", index.Name)) - } - } - // This error should be caught in MakeIndexDescriptor. - return errors.HandleAsAssertionFailure(fmt.Errorf( - "duplicate: index %q in the middle of being added, not yet public", index.Name)) - } - indexNames[index.Name] = struct{}{} - - if other, ok := indexIDs[index.ID]; ok { - return fmt.Errorf("index %q duplicate ID of index %q: %d", - index.Name, other, index.ID) - } - indexIDs[index.ID] = index.Name - - if index.ID >= desc.NextIndexID { - return fmt.Errorf("index %q invalid index ID (%d) > next index ID (%d)", - index.Name, index.ID, desc.NextIndexID) - } - - if len(index.ColumnIDs) != len(index.ColumnNames) { - return fmt.Errorf("mismatched column IDs (%d) and names (%d)", - len(index.ColumnIDs), len(index.ColumnNames)) - } - if len(index.ColumnIDs) != len(index.ColumnDirections) { - return fmt.Errorf("mismatched column IDs (%d) and directions (%d)", - len(index.ColumnIDs), len(index.ColumnDirections)) - } - // In the old STORING encoding, stored columns are in ExtraColumnIDs; - // tolerate a longer list of column names. - if len(index.StoreColumnIDs) > len(index.StoreColumnNames) { - return fmt.Errorf("mismatched STORING column IDs (%d) and names (%d)", - len(index.StoreColumnIDs), len(index.StoreColumnNames)) - } - - if len(index.ColumnIDs) == 0 { - return fmt.Errorf("index %q must contain at least 1 column", index.Name) - } - - var validateIndexDup catalog.TableColSet - for i, name := range index.ColumnNames { - colID, ok := columnNames[name] - if !ok { - return fmt.Errorf("index %q contains unknown column %q", index.Name, name) - } - if colID != index.ColumnIDs[i] { - return fmt.Errorf("index %q column %q should have ID %d, but found ID %d", - index.Name, name, colID, index.ColumnIDs[i]) - } - if validateIndexDup.Contains(colID) { - return fmt.Errorf("index %q contains duplicate column %q", index.Name, name) - } - validateIndexDup.Add(colID) - } - if index.IsSharded() { - if err := desc.ensureShardedIndexNotComputed(index); err != nil { - return err - } - if _, exists := columnNames[index.Sharded.Name]; !exists { - return fmt.Errorf("index %q refers to non-existent shard column %q", - index.Name, index.Sharded.Name) - } - } - if index.IsPartial() { - expr, err := parser.ParseExpr(index.Predicate) - if err != nil { - return err - } - valid, err := schemaexpr.HasValidColumnReferences(desc, expr) - if err != nil { - return err - } - if !valid { - return fmt.Errorf("partial index %q refers to unknown columns in predicate: %s", - index.Name, index.Predicate) - } - } - // Ensure that indexes do not STORE virtual columns. - for _, col := range index.ExtraColumnIDs { - if virtualCols.Contains(col) { - return fmt.Errorf("index %q cannot store virtual column %d", index.Name, col) - } - } - for i, col := range index.StoreColumnIDs { - if virtualCols.Contains(col) { - return fmt.Errorf("index %q cannot store virtual column %q", index.Name, index.StoreColumnNames[i]) - } - } - } - - return nil -} - -// ensureShardedIndexNotComputed ensures that the sharded index is not based on a computed -// column. This is because the sharded index is based on a hidden computed shard column -// under the hood and we don't support transitively computed columns (computed column A -// based on another computed column B). -func (desc *wrapper) ensureShardedIndexNotComputed(index *descpb.IndexDescriptor) error { - for _, colName := range index.Sharded.ColumnNames { - col, err := desc.FindColumnWithName(tree.Name(colName)) - if err != nil { - return err - } - if col.IsComputed() { - return pgerror.Newf(pgcode.InvalidTableDefinition, - "cannot create a sharded index on a computed column") - } - } - return nil -} - -// PrimaryKeyString returns the pretty-printed primary key declaration for a -// table descriptor. -func (desc *wrapper) PrimaryKeyString() string { - var primaryKeyString strings.Builder - primaryKeyString.WriteString("PRIMARY KEY (%s)") - if desc.PrimaryIndex.IsSharded() { - fmt.Fprintf(&primaryKeyString, " USING HASH WITH BUCKET_COUNT = %v", - desc.PrimaryIndex.Sharded.ShardBuckets) +// PrimaryKeyString returns the pretty-printed primary key declaration for a +// table descriptor. +func (desc *wrapper) PrimaryKeyString() string { + var primaryKeyString strings.Builder + primaryKeyString.WriteString("PRIMARY KEY (%s)") + if desc.PrimaryIndex.IsSharded() { + fmt.Fprintf(&primaryKeyString, " USING HASH WITH BUCKET_COUNT = %v", + desc.PrimaryIndex.Sharded.ShardBuckets) } return fmt.Sprintf(primaryKeyString.String(), desc.PrimaryIndex.ColNamesString(), ) } -// validatePartitioningDescriptor validates that a PartitioningDescriptor, which -// may represent a subpartition, is well-formed. Checks include validating the -// index-level uniqueness of all partition names, validating that the encoded -// tuples match the corresponding column types, and that range partitions are -// stored sorted by upper bound. colOffset is non-zero for subpartitions and -// indicates how many index columns to skip over. -func (desc *wrapper) validatePartitioningDescriptor( - a *rowenc.DatumAlloc, - idxDesc *descpb.IndexDescriptor, - partDesc *descpb.PartitioningDescriptor, - colOffset int, - partitionNames map[string]string, -) error { - if partDesc.NumImplicitColumns > partDesc.NumColumns { - return errors.Newf( - "cannot have implicit partitioning columns (%d) > partitioning columns (%d)", - partDesc.NumImplicitColumns, - partDesc.NumColumns, - ) - } - if partDesc.NumColumns == 0 { - return nil - } - - // TODO(dan): The sqlccl.GenerateSubzoneSpans logic is easier if we disallow - // setting zone configs on indexes that are interleaved into another index. - // InterleavedBy is fine, so using the root of the interleave hierarchy will - // work. It is expected that this is sufficient for real-world use cases. - // Revisit this restriction if that expectation is wrong. - if len(idxDesc.Interleave.Ancestors) > 0 { - return errors.Errorf("cannot set a zone config for interleaved index %s; "+ - "set it on the root of the interleaved hierarchy instead", idxDesc.Name) - } - - // We don't need real prefixes in the DecodePartitionTuple calls because we're - // only using it to look for collisions and the prefix would be the same for - // all of them. Faking them out with DNull allows us to make O(list partition) - // calls to DecodePartitionTuple instead of O(list partition entry). - fakePrefixDatums := make([]tree.Datum, colOffset) - for i := range fakePrefixDatums { - fakePrefixDatums[i] = tree.DNull - } - - if len(partDesc.List) == 0 && len(partDesc.Range) == 0 { - return fmt.Errorf("at least one of LIST or RANGE partitioning must be used") - } - if len(partDesc.List) > 0 && len(partDesc.Range) > 0 { - return fmt.Errorf("only one LIST or RANGE partitioning may used") - } - - // Do not validate partitions which use unhydrated user-defined types. - // This should only happen at read time and descriptors should not become - // invalid at read time, only at write time. - { - numColumns := int(partDesc.NumColumns) - for i := colOffset; i < colOffset+numColumns; i++ { - // The partitioning descriptor may be invalid and refer to columns - // not stored in the index. In that case, skip this check as the - // validation will fail later. - if i >= len(idxDesc.ColumnIDs) { - continue - } - col, err := desc.FindColumnWithID(idxDesc.ColumnIDs[i]) - if err != nil { - return err - } - if col.GetType().UserDefined() && !col.GetType().IsHydrated() { - return nil - } - } - } - - checkName := func(name string) error { - if len(name) == 0 { - return fmt.Errorf("PARTITION name must be non-empty") - } - if indexName, exists := partitionNames[name]; exists { - if indexName == idxDesc.Name { - return fmt.Errorf("PARTITION %s: name must be unique (used twice in index %q)", - name, indexName) - } - } - partitionNames[name] = idxDesc.Name - return nil - } - - // Use the system-tenant SQL codec when validating the keys in the partition - // descriptor. We just want to know how the partitions relate to one another, - // so it's fine to ignore the tenant ID prefix. - codec := keys.SystemSQLCodec - - if len(partDesc.List) > 0 { - listValues := make(map[string]struct{}, len(partDesc.List)) - for _, p := range partDesc.List { - if err := checkName(p.Name); err != nil { - return err - } - - if len(p.Values) == 0 { - return fmt.Errorf("PARTITION %s: must contain values", p.Name) - } - // NB: key encoding is used to check uniqueness because it has - // to match the behavior of the value when indexed. - for _, valueEncBuf := range p.Values { - tuple, keyPrefix, err := rowenc.DecodePartitionTuple( - a, codec, desc, idxDesc, partDesc, valueEncBuf, fakePrefixDatums) - if err != nil { - return fmt.Errorf("PARTITION %s: %v", p.Name, err) - } - if _, exists := listValues[string(keyPrefix)]; exists { - return fmt.Errorf("%s cannot be present in more than one partition", tuple) - } - listValues[string(keyPrefix)] = struct{}{} - } - - newColOffset := colOffset + int(partDesc.NumColumns) - if err := desc.validatePartitioningDescriptor( - a, idxDesc, &p.Subpartitioning, newColOffset, partitionNames, - ); err != nil { - return err - } - } - } - - if len(partDesc.Range) > 0 { - tree := interval.NewTree(interval.ExclusiveOverlapper) - for _, p := range partDesc.Range { - if err := checkName(p.Name); err != nil { - return err - } - - // NB: key encoding is used to check uniqueness because it has to match - // the behavior of the value when indexed. - fromDatums, fromKey, err := rowenc.DecodePartitionTuple( - a, codec, desc, idxDesc, partDesc, p.FromInclusive, fakePrefixDatums) - if err != nil { - return fmt.Errorf("PARTITION %s: %v", p.Name, err) - } - toDatums, toKey, err := rowenc.DecodePartitionTuple( - a, codec, desc, idxDesc, partDesc, p.ToExclusive, fakePrefixDatums) - if err != nil { - return fmt.Errorf("PARTITION %s: %v", p.Name, err) - } - pi := partitionInterval{p.Name, fromKey, toKey} - if overlaps := tree.Get(pi.Range()); len(overlaps) > 0 { - return fmt.Errorf("partitions %s and %s overlap", - overlaps[0].(partitionInterval).name, p.Name) - } - if err := tree.Insert(pi, false /* fast */); errors.Is(err, interval.ErrEmptyRange) { - return fmt.Errorf("PARTITION %s: empty range: lower bound %s is equal to upper bound %s", - p.Name, fromDatums, toDatums) - } else if errors.Is(err, interval.ErrInvertedRange) { - return fmt.Errorf("PARTITION %s: empty range: lower bound %s is greater than upper bound %s", - p.Name, fromDatums, toDatums) - } else if err != nil { - return errors.Wrapf(err, "PARTITION %s", p.Name) - } - } - } - - return nil -} - -type partitionInterval struct { - name string - start roachpb.Key - end roachpb.Key -} - -var _ interval.Interface = partitionInterval{} - -// ID is part of `interval.Interface` but unused in validatePartitioningDescriptor. -func (ps partitionInterval) ID() uintptr { return 0 } - -// Range is part of `interval.Interface`. -func (ps partitionInterval) Range() interval.Range { - return interval.Range{Start: []byte(ps.start), End: []byte(ps.end)} -} - -// validatePartitioning validates that any PartitioningDescriptors contained in -// table indexes are well-formed. See validatePartitioningDesc for details. -func (desc *wrapper) validatePartitioning() error { - partitionNames := make(map[string]string) - - a := &rowenc.DatumAlloc{} - return catalog.ForEachNonDropIndex(desc, func(idx catalog.Index) error { - idxDesc := idx.IndexDesc() - return desc.validatePartitioningDescriptor( - a, idxDesc, &idxDesc.Partitioning, 0 /* colOffset */, partitionNames, - ) - }) -} - // FamilyHeuristicTargetBytes is the target total byte size of columns that the // current heuristic will assign to a family. const FamilyHeuristicTargetBytes = 256 diff --git a/pkg/sql/catalog/tabledesc/structured_test.go b/pkg/sql/catalog/tabledesc/structured_test.go index 3e2959a0d373..dd28704d4b12 100644 --- a/pkg/sql/catalog/tabledesc/structured_test.go +++ b/pkg/sql/catalog/tabledesc/structured_test.go @@ -22,13 +22,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/catconstants" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" . "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -137,1459 +133,6 @@ func TestAllocateIDs(t *testing.T) { t.Fatalf("expected %s, but found %s", a, b) } } -func TestValidateDatabaseDesc(t *testing.T) { - defer leaktest.AfterTest(t)() - ctx := context.Background() - - testData := []struct { - err string - desc *dbdesc.Immutable - }{ - {`invalid database ID 0`, - dbdesc.NewImmutable(descpb.DatabaseDescriptor{ - Name: "db", - ID: 0, - Privileges: &descpb.PrivilegeDescriptor{}, - }), - }, - { - `region "us-east-1" seen twice on db 200`, - dbdesc.NewImmutable(descpb.DatabaseDescriptor{ - Name: "multi-region-db", - ID: 200, - RegionConfig: &descpb.DatabaseDescriptor_RegionConfig{ - Regions: []descpb.DatabaseDescriptor_RegionConfig_Region{ - {Name: "us-east-1"}, - {Name: "us-east-1"}, - }, - PrimaryRegion: "us-east-1", - }, - Privileges: &descpb.PrivilegeDescriptor{}, - }), - }, - { - `primary region unset on a multi-region db 200`, - dbdesc.NewImmutable(descpb.DatabaseDescriptor{ - Name: "multi-region-db", - ID: 200, - RegionConfig: &descpb.DatabaseDescriptor_RegionConfig{ - Regions: []descpb.DatabaseDescriptor_RegionConfig_Region{ - {Name: "us-east-1"}, - }, - }, - Privileges: &descpb.PrivilegeDescriptor{}, - }), - }, - { - `primary region not found in list of regions on db 200`, - dbdesc.NewImmutable(descpb.DatabaseDescriptor{ - Name: "multi-region-db", - ID: 200, - RegionConfig: &descpb.DatabaseDescriptor_RegionConfig{ - Regions: []descpb.DatabaseDescriptor_RegionConfig_Region{ - {Name: "us-east-1"}, - }, - PrimaryRegion: "us-east-2", - }, - Privileges: &descpb.PrivilegeDescriptor{}, - }), - }, - } - for i, d := range testData { - t.Run(d.err, func(t *testing.T) { - if err := d.desc.Validate(ctx, nil /* descGetter */); err == nil { - t.Errorf("%d: expected \"%s\", but found success: %+v", i, d.err, d.desc) - } else if d.err != err.Error() && "internal error: "+d.err != err.Error() { - t.Errorf("%d: expected \"%s\", but found \"%+v\"", i, d.err, err) - } - }) - } -} - -func TestValidateTableDesc(t *testing.T) { - defer leaktest.AfterTest(t)() - - ctx := context.Background() - - computedExpr := "1 + 1" - - testData := []struct { - err string - desc descpb.TableDescriptor - }{ - {`empty table name`, - descpb.TableDescriptor{}}, - {`invalid table ID 0`, - descpb.TableDescriptor{ID: 0, Name: "foo"}}, - {`invalid parent ID 0`, - descpb.TableDescriptor{ID: 2, Name: "foo"}}, - {`table must contain at least 1 column`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - }}, - {`empty column name`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 0}, - }, - NextColumnID: 2, - }}, - {`table "foo" is encoded using using version 0, but this client only supports version 2 and 3`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - NextColumnID: 2, - }}, - {`virtual column "virt" is not computed`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - {ID: 2, Name: "virt", Virtual: true}, - }, - NextColumnID: 3, - }}, - {`invalid column ID 0`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 0, Name: "bar"}, - }, - NextColumnID: 2, - }}, - {`table must contain a primary key`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, - }, - NextColumnID: 2, - NextFamilyID: 1, - }}, - {`duplicate column name: "bar"`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - {ID: 1, Name: "bar"}, - }, - NextColumnID: 2, - }}, - {`duplicate column name: "bar"`, - descpb.TableDescriptor{ - ID: catconstants.CrdbInternalBackwardDependenciesTableID, - ParentID: 0, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - {ID: 1, Name: "bar"}, - }, - NextColumnID: 2, - }}, - {`column "blah" duplicate ID of column "bar": 1`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - {ID: 1, Name: "blah"}, - }, - NextColumnID: 2, - }}, - {`at least 1 column family must be specified`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - NextColumnID: 2, - }}, - {`the 0th family must have ID 0`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 1}, - }, - NextColumnID: 2, - }}, - {`duplicate family name: "baz"`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "baz"}, - {ID: 1, Name: "baz"}, - }, - NextColumnID: 2, - NextFamilyID: 2, - }}, - {`family "qux" duplicate ID of family "baz": 0`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "baz"}, - {ID: 0, Name: "qux"}, - }, - NextColumnID: 2, - NextFamilyID: 2, - }}, - {`duplicate family name: "baz"`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "baz"}, - {ID: 3, Name: "baz"}, - }, - NextColumnID: 2, - NextFamilyID: 2, - }}, - {`mismatched column ID size (1) and name size (0)`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "baz", ColumnIDs: []descpb.ColumnID{1}}, - }, - NextColumnID: 2, - NextFamilyID: 1, - }}, - {`family "baz" contains unknown column "2"`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "baz", ColumnIDs: []descpb.ColumnID{2}, ColumnNames: []string{"bar"}}, - }, - NextColumnID: 2, - NextFamilyID: 1, - }}, - {`family "baz" column 1 should have name "bar", but found name "qux"`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "baz", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"qux"}}, - }, - NextColumnID: 2, - NextFamilyID: 1, - }}, - {`column "bar" is not in any column family`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "baz"}, - }, - NextColumnID: 2, - NextFamilyID: 1, - }}, - {`column 1 is in both family 0 and 1`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "baz", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, - {ID: 1, Name: "qux", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, - }, - NextColumnID: 2, - NextFamilyID: 2, - }}, - {`virtual computed column "virt" cannot be part of a family`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - {ID: 2, Name: "virt", ComputeExpr: &computedExpr, Virtual: true}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "fam1", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, - {ID: 1, Name: "fam2", ColumnIDs: []descpb.ColumnID{2}, ColumnNames: []string{"virt"}}, - }, - NextColumnID: 3, - NextFamilyID: 2, - }}, - {`table must contain a primary key`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, - }, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 0, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}}, - NextColumnID: 2, - NextFamilyID: 1, - }}, - {`invalid index ID 0`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, - }, - PrimaryIndex: descpb.IndexDescriptor{ID: 0, Name: "bar", - ColumnIDs: []descpb.ColumnID{0}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}}, - NextColumnID: 2, - NextFamilyID: 1, - }}, - {`index "bar" must contain at least 1 column`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, - }, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 1, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - }, - Indexes: []descpb.IndexDescriptor{ - {ID: 2, Name: "bar"}, - }, - NextColumnID: 2, - NextFamilyID: 1, - NextIndexID: 3, - }}, - {`mismatched column IDs (1) and names (0)`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, - }, - PrimaryIndex: descpb.IndexDescriptor{ID: 1, Name: "bar", ColumnIDs: []descpb.ColumnID{1}}, - NextColumnID: 2, - NextFamilyID: 1, - NextIndexID: 2, - }}, - {`mismatched column IDs (1) and names (2)`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - {ID: 2, Name: "blah"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1, 2}, ColumnNames: []string{"bar", "blah"}}, - }, - PrimaryIndex: descpb.IndexDescriptor{ID: 1, Name: "bar", - ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar", "blah"}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - }, - NextColumnID: 3, - NextFamilyID: 1, - NextIndexID: 2, - }}, - {`duplicate index name: "bar"`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, - }, - PrimaryIndex: descpb.IndexDescriptor{ID: 1, Name: "bar", - ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - }, - Indexes: []descpb.IndexDescriptor{ - {ID: 2, Name: "bar", ColumnIDs: []descpb.ColumnID{1}, - ColumnNames: []string{"bar"}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - }, - }, - NextColumnID: 2, - NextFamilyID: 1, - NextIndexID: 3, - }}, - {`index "blah" duplicate ID of index "bar": 1`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, - }, - PrimaryIndex: descpb.IndexDescriptor{ID: 1, Name: "bar", ColumnIDs: []descpb.ColumnID{1}, - ColumnNames: []string{"bar"}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - }, - Indexes: []descpb.IndexDescriptor{ - {ID: 1, Name: "blah", ColumnIDs: []descpb.ColumnID{1}, - ColumnNames: []string{"bar"}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - }, - }, - NextColumnID: 2, - NextFamilyID: 1, - NextIndexID: 2, - }}, - {`index "bar" column "bar" should have ID 1, but found ID 2`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, - }, - PrimaryIndex: descpb.IndexDescriptor{ID: 1, Name: "bar", ColumnIDs: []descpb.ColumnID{2}, - ColumnNames: []string{"bar"}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - }, - NextColumnID: 2, - NextFamilyID: 1, - NextIndexID: 2, - }}, - {`index "bar" contains unknown column "blah"`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, - }, - PrimaryIndex: descpb.IndexDescriptor{ID: 1, Name: "bar", ColumnIDs: []descpb.ColumnID{1}, - ColumnNames: []string{"blah"}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - }, - NextColumnID: 2, - NextFamilyID: 1, - NextIndexID: 2, - }}, - {`mismatched column IDs (1) and directions (0)`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, - }, - PrimaryIndex: descpb.IndexDescriptor{ID: 1, Name: "bar", ColumnIDs: []descpb.ColumnID{1}, - ColumnNames: []string{"blah"}, - }, - NextColumnID: 2, - NextFamilyID: 1, - NextIndexID: 2, - }}, - {`mismatched STORING column IDs (1) and names (0)`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "c1"}, - {ID: 2, Name: "c2"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - { - ID: 0, - Name: "fam", - ColumnIDs: []descpb.ColumnID{1, 2}, - ColumnNames: []string{"c1", "c2"}, - }, - }, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 1, Name: "primary", - ColumnIDs: []descpb.ColumnID{1}, - ColumnNames: []string{"c1"}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - StoreColumnIDs: []descpb.ColumnID{2}, - }, - NextColumnID: 3, - NextFamilyID: 1, - NextIndexID: 2, - }}, - {`at least one of LIST or RANGE partitioning must be used`, - // Verify that validatePartitioning is hooked up. The rest of these - // tests are in TestValidatePartitionion. - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, - }, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 1, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - Partitioning: descpb.PartitioningDescriptor{ - NumColumns: 1, - }, - }, - NextColumnID: 2, - NextFamilyID: 1, - NextIndexID: 3, - }}, - {`index "foo_crdb_internal_bar_shard_5_bar_idx" refers to non-existent shard column "does not exist"`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - {ID: 2, Name: "crdb_internal_bar_shard_5"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", - ColumnIDs: []descpb.ColumnID{1, 2}, - ColumnNames: []string{"bar", "crdb_internal_bar_shard_5"}, - }, - }, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 1, Name: "primary", - Unique: true, - ColumnIDs: []descpb.ColumnID{1}, - ColumnNames: []string{"bar"}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - StoreColumnNames: []string{"crdb_internal_bar_shard_5"}, - StoreColumnIDs: []descpb.ColumnID{2}, - }, - Indexes: []descpb.IndexDescriptor{ - {ID: 2, Name: "foo_crdb_internal_bar_shard_5_bar_idx", - ColumnIDs: []descpb.ColumnID{2, 1}, - ColumnNames: []string{"crdb_internal_bar_shard_5", "bar"}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC, descpb.IndexDescriptor_ASC}, - Sharded: descpb.ShardedDescriptor{ - IsSharded: true, - Name: "does not exist", - ShardBuckets: 5, - }, - }, - }, - NextColumnID: 3, - NextFamilyID: 1, - NextIndexID: 3, - }}, - {`TableID mismatch for unique without index constraint "bar_unique": "1" doesn't match descriptor: "2"`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", - ColumnIDs: []descpb.ColumnID{1}, - ColumnNames: []string{"bar"}, - }, - }, - NextColumnID: 2, - NextFamilyID: 1, - UniqueWithoutIndexConstraints: []descpb.UniqueWithoutIndexConstraint{ - { - TableID: 1, - ColumnIDs: []descpb.ColumnID{1}, - Name: "bar_unique", - }, - }, - }}, - {`column-id "2" does not exist`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", - ColumnIDs: []descpb.ColumnID{1}, - ColumnNames: []string{"bar"}, - }, - }, - NextColumnID: 2, - NextFamilyID: 1, - UniqueWithoutIndexConstraints: []descpb.UniqueWithoutIndexConstraint{ - { - TableID: 2, - ColumnIDs: []descpb.ColumnID{1, 2}, - Name: "bar_unique", - }, - }, - }}, - {`unique without index constraint "bar_unique" contains duplicate column "1"`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", - ColumnIDs: []descpb.ColumnID{1}, - ColumnNames: []string{"bar"}, - }, - }, - NextColumnID: 2, - NextFamilyID: 1, - UniqueWithoutIndexConstraints: []descpb.UniqueWithoutIndexConstraint{ - { - TableID: 2, - ColumnIDs: []descpb.ColumnID{1, 1}, - Name: "bar_unique", - }, - }, - }}, - {`empty unique without index constraint name`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", - ColumnIDs: []descpb.ColumnID{1}, - ColumnNames: []string{"bar"}, - }, - }, - NextColumnID: 2, - NextFamilyID: 1, - UniqueWithoutIndexConstraints: []descpb.UniqueWithoutIndexConstraint{ - { - TableID: 2, - ColumnIDs: []descpb.ColumnID{1}, - }, - }, - }}, - {`primary index column "v" cannot be virtual`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar"}, - {ID: 2, Name: "v", ComputeExpr: &computedExpr, Virtual: true}, - }, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 1, - Name: "primary", - Unique: true, - ColumnIDs: []descpb.ColumnID{1, 2}, - ColumnNames: []string{"bar", "v"}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", - ColumnIDs: []descpb.ColumnID{1}, - ColumnNames: []string{"bar"}, - }, - }, - NextColumnID: 3, - NextFamilyID: 1, - }}, - {`index "sec" cannot store virtual column "v"`, - descpb.TableDescriptor{ - ID: 2, - ParentID: 1, - Name: "foo", - FormatVersion: descpb.FamilyFormatVersion, - Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "c1"}, - {ID: 2, Name: "c2"}, - {ID: 3, Name: "v", ComputeExpr: &computedExpr, Virtual: true}, - }, - Families: []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1, 2}, ColumnNames: []string{"c1", "c2"}}, - }, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 1, Name: "pri", ColumnIDs: []descpb.ColumnID{1}, - ColumnNames: []string{"c1"}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - }, - Indexes: []descpb.IndexDescriptor{ - {ID: 2, Name: "sec", ColumnIDs: []descpb.ColumnID{2}, - ColumnNames: []string{"c2"}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - StoreColumnNames: []string{"v"}, - StoreColumnIDs: []descpb.ColumnID{3}, - }, - }, - NextColumnID: 4, - NextFamilyID: 1, - NextIndexID: 3, - }}, - } - for i, d := range testData { - t.Run(d.err, func(t *testing.T) { - desc := NewImmutable(d.desc) - if err := ValidateTable(ctx, desc); err == nil { - t.Errorf("%d: expected \"%s\", but found success: %+v", i, d.err, d.desc) - } else if d.err != err.Error() && "internal error: "+d.err != err.Error() { - t.Errorf("%d: expected \"%s\", but found \"%+v\"", i, d.err, err) - } - }) - } -} - -func TestValidateCrossTableReferences(t *testing.T) { - defer leaktest.AfterTest(t)() - ctx := context.Background() - - pointer := func(s string) *string { - return &s - } - - tests := []struct { - err string - desc descpb.TableDescriptor - otherDescs []descpb.TableDescriptor - }{ - // Foreign keys - { // 0 - err: `invalid foreign key: missing table=52: descriptor not found`, - desc: descpb.TableDescriptor{ - Name: "foo", - ID: 51, - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - FormatVersion: descpb.InterleavedFormatVersion, - OutboundFKs: []descpb.ForeignKeyConstraint{ - { - Name: "fk", - ReferencedTableID: 52, - ReferencedColumnIDs: []descpb.ColumnID{1}, - OriginTableID: 51, - OriginColumnIDs: []descpb.ColumnID{1}, - }, - }, - }, - otherDescs: nil, - }, - { // 1 - err: `missing fk back reference "fk" to "foo" from "baz"`, - desc: descpb.TableDescriptor{ - ID: 51, - Name: "foo", - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - FormatVersion: descpb.InterleavedFormatVersion, - OutboundFKs: []descpb.ForeignKeyConstraint{ - { - Name: "fk", - ReferencedTableID: 52, - ReferencedColumnIDs: []descpb.ColumnID{1}, - OriginTableID: 51, - OriginColumnIDs: []descpb.ColumnID{1}, - }, - }, - }, - otherDescs: []descpb.TableDescriptor{{ - ID: 52, - Name: "baz", - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - FormatVersion: descpb.InterleavedFormatVersion, - }}, - }, - { // 2 - err: `invalid foreign key backreference: missing table=52: descriptor not found`, - desc: descpb.TableDescriptor{ - Name: "foo", - ID: 51, - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - FormatVersion: descpb.InterleavedFormatVersion, - InboundFKs: []descpb.ForeignKeyConstraint{ - { - Name: "fk", - ReferencedTableID: 51, - ReferencedColumnIDs: []descpb.ColumnID{1}, - OriginTableID: 52, - OriginColumnIDs: []descpb.ColumnID{1}, - }, - }, - }, - }, - { // 3 - err: `missing fk forward reference "fk" to "foo" from "baz"`, - desc: descpb.TableDescriptor{ - ID: 51, - Name: "foo", - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - FormatVersion: descpb.InterleavedFormatVersion, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 1, - Name: "bar", - }, - InboundFKs: []descpb.ForeignKeyConstraint{ - { - Name: "fk", - ReferencedTableID: 51, - ReferencedColumnIDs: []descpb.ColumnID{1}, - OriginTableID: 52, - OriginColumnIDs: []descpb.ColumnID{1}, - }, - }, - }, - otherDescs: []descpb.TableDescriptor{{ - ID: 52, - Name: "baz", - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - FormatVersion: descpb.InterleavedFormatVersion, - }}, - }, - { // 4 - // Regression test for #57066: We can handle one of the referenced tables - // having a pre-19.2 foreign key reference. - err: "", - desc: descpb.TableDescriptor{ - ID: 51, - Name: "foo", - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - FormatVersion: descpb.InterleavedFormatVersion, - Indexes: []descpb.IndexDescriptor{ - { - ID: 2, - ColumnIDs: []descpb.ColumnID{1, 2}, - }, - }, - OutboundFKs: []descpb.ForeignKeyConstraint{ - { - Name: "fk", - ReferencedTableID: 52, - ReferencedColumnIDs: []descpb.ColumnID{1}, - OriginTableID: 51, - OriginColumnIDs: []descpb.ColumnID{1}, - }, - }, - }, - otherDescs: []descpb.TableDescriptor{{ - ID: 52, - Name: "baz", - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - FormatVersion: descpb.InterleavedFormatVersion, - Indexes: []descpb.IndexDescriptor{ - { - Unique: true, - ColumnIDs: []descpb.ColumnID{1}, - ReferencedBy: []descpb.ForeignKeyReference{{Table: 51, Index: 2}}, - }, - }, - }}, - }, - { // 5 - // Regression test for #57066: We can handle one of the referenced tables - // having a pre-19.2 foreign key reference. - err: "", - desc: descpb.TableDescriptor{ - ID: 51, - Name: "foo", - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - FormatVersion: descpb.InterleavedFormatVersion, - Indexes: []descpb.IndexDescriptor{ - { - ID: 2, - ColumnIDs: []descpb.ColumnID{7}, - Unique: true, - }, - }, - InboundFKs: []descpb.ForeignKeyConstraint{ - { - Name: "fk", - ReferencedTableID: 51, - ReferencedColumnIDs: []descpb.ColumnID{7}, - OriginTableID: 52, - OriginColumnIDs: []descpb.ColumnID{1}, - }, - }, - }, - otherDescs: []descpb.TableDescriptor{{ - ID: 52, - Name: "baz", - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - FormatVersion: descpb.InterleavedFormatVersion, - Indexes: []descpb.IndexDescriptor{ - { - ID: 2, - Unique: true, - ColumnIDs: []descpb.ColumnID{1}, - ForeignKey: descpb.ForeignKeyReference{Table: 51, Index: 2}, - }, - }, - }}, - }, - - // Interleaves - { // 6 - err: `invalid interleave: missing table=52 index=2: descriptor not found`, - desc: descpb.TableDescriptor{ - Name: "foo", - ID: 51, - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - FormatVersion: descpb.InterleavedFormatVersion, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 1, - Interleave: descpb.InterleaveDescriptor{Ancestors: []descpb.InterleaveDescriptor_Ancestor{ - {TableID: 52, IndexID: 2}, - }}, - }, - }, - otherDescs: nil, - }, - { // 7 - err: `invalid interleave: missing table=baz index=2: index-id "2" does not exist`, - desc: descpb.TableDescriptor{ - Name: "foo", - ID: 51, - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - FormatVersion: descpb.InterleavedFormatVersion, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 1, - Interleave: descpb.InterleaveDescriptor{Ancestors: []descpb.InterleaveDescriptor_Ancestor{ - {TableID: 52, IndexID: 2}, - }}, - }, - }, - otherDescs: []descpb.TableDescriptor{{ - ID: 52, - Name: "baz", - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - }}, - }, - { // 8 - err: `missing interleave back reference to "foo"@"bar" from "baz"@"qux"`, - desc: descpb.TableDescriptor{ - Name: "foo", - ID: 51, - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 1, - Name: "bar", - Interleave: descpb.InterleaveDescriptor{Ancestors: []descpb.InterleaveDescriptor_Ancestor{ - {TableID: 52, IndexID: 2}, - }}, - }, - }, - otherDescs: []descpb.TableDescriptor{{ - ID: 52, - Name: "baz", - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 2, - Name: "qux", - }, - }}, - }, - { // 9 - err: `invalid interleave backreference table=52 index=2: descriptor not found`, - desc: descpb.TableDescriptor{ - Name: "foo", - ID: 51, - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 1, - InterleavedBy: []descpb.ForeignKeyReference{{Table: 52, Index: 2}}, - }, - }, - }, - { // 10 - err: `invalid interleave backreference table=baz index=2: index-id "2" does not exist`, - desc: descpb.TableDescriptor{ - Name: "foo", - ID: 51, - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 1, - InterleavedBy: []descpb.ForeignKeyReference{{Table: 52, Index: 2}}, - }, - }, - otherDescs: []descpb.TableDescriptor{{ - ID: 52, - Name: "baz", - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - }}, - }, - { // 11 - err: `broken interleave backward reference from "foo"@"bar" to "baz"@"qux"`, - desc: descpb.TableDescriptor{ - Name: "foo", - ID: 51, - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 1, - Name: "bar", - InterleavedBy: []descpb.ForeignKeyReference{{Table: 52, Index: 2}}, - }, - }, - otherDescs: []descpb.TableDescriptor{{ - Name: "baz", - ID: 52, - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 2, - Name: "qux", - }, - }}, - }, - { // 12 - err: `type ID 500 in descriptor not found: descriptor not found`, - desc: descpb.TableDescriptor{ - Name: "foo", - ID: 51, - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 1, - Name: "bar", - ColumnIDs: []descpb.ColumnID{1}, - ColumnNames: []string{"a"}, - }, - Columns: []descpb.ColumnDescriptor{ - { - Name: "a", - ID: 1, - Type: types.MakeEnum(typedesc.TypeIDToOID(500), typedesc.TypeIDToOID(100500)), - }, - }, - }, - }, - // Add some expressions with invalid type references. - { // 13 - err: `type ID 500 in descriptor not found: descriptor not found`, - desc: descpb.TableDescriptor{ - Name: "foo", - ID: 51, - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 1, - Name: "bar", - ColumnIDs: []descpb.ColumnID{1}, - ColumnNames: []string{"a"}, - }, - Columns: []descpb.ColumnDescriptor{ - { - Name: "a", - ID: 1, - Type: types.Int, - DefaultExpr: pointer("a::@100500"), - }, - }, - }, - }, - { // 14 - err: `type ID 500 in descriptor not found: descriptor not found`, - desc: descpb.TableDescriptor{ - Name: "foo", - ID: 51, - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - PrimaryIndex: descpb.IndexDescriptor{ - ID: 1, - Name: "bar", - ColumnIDs: []descpb.ColumnID{1}, - ColumnNames: []string{"a"}, - }, - Columns: []descpb.ColumnDescriptor{ - { - Name: "a", - ID: 1, - Type: types.Int, - ComputeExpr: pointer("a:::@100500"), - }, - }, - }, - }, - { // 15 - err: `type ID 500 in descriptor not found: descriptor not found`, - desc: descpb.TableDescriptor{ - Name: "foo", - ID: 51, - ParentID: 1, - UnexposedParentSchemaID: keys.PublicSchemaID, - Checks: []*descpb.TableDescriptor_CheckConstraint{ - { - Expr: "a::@100500", - }, - }, - }, - }, - } - - for i, test := range tests { - descs := catalog.MapDescGetter{} - descs[1] = dbdesc.NewImmutable(descpb.DatabaseDescriptor{ID: 1}) - for _, otherDesc := range test.otherDescs { - otherDesc.Privileges = descpb.NewDefaultPrivilegeDescriptor(security.AdminRoleName()) - descs[otherDesc.ID] = NewImmutable(otherDesc) - } - desc := NewImmutable(test.desc) - if err := ValidateCrossReferences(ctx, descs, desc); err == nil { - if test.err != "" { - t.Errorf("%d: expected \"%s\", but found success: %+v", i, test.err, test.desc) - } - } else if test.err != err.Error() && "internal error: "+test.err != err.Error() { - t.Errorf("%d: expected \"%s\", but found \"%s\"", i, test.err, err.Error()) - } - } -} - -func TestValidatePartitioning(t *testing.T) { - defer leaktest.AfterTest(t)() - - tests := []struct { - err string - desc descpb.TableDescriptor - }{ - {"at least one of LIST or RANGE partitioning must be used", - descpb.TableDescriptor{ - PrimaryIndex: descpb.IndexDescriptor{ - Partitioning: descpb.PartitioningDescriptor{ - NumColumns: 1, - }, - }, - }, - }, - {"PARTITION p1: must contain values", - descpb.TableDescriptor{ - PrimaryIndex: descpb.IndexDescriptor{ - Partitioning: descpb.PartitioningDescriptor{ - NumColumns: 1, - List: []descpb.PartitioningDescriptor_List{{Name: "p1"}}, - }, - }, - }, - }, - {"not enough columns in index for this partitioning", - descpb.TableDescriptor{ - PrimaryIndex: descpb.IndexDescriptor{ - Partitioning: descpb.PartitioningDescriptor{ - NumColumns: 1, - List: []descpb.PartitioningDescriptor_List{{Name: "p1", Values: [][]byte{{}}}}, - }, - }, - }, - }, - {"only one LIST or RANGE partitioning may used", - descpb.TableDescriptor{ - PrimaryIndex: descpb.IndexDescriptor{ - ColumnIDs: []descpb.ColumnID{1}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - Partitioning: descpb.PartitioningDescriptor{ - NumColumns: 1, - List: []descpb.PartitioningDescriptor_List{{}}, - Range: []descpb.PartitioningDescriptor_Range{{}}, - }, - }, - }, - }, - {"PARTITION name must be non-empty", - descpb.TableDescriptor{ - Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, - PrimaryIndex: descpb.IndexDescriptor{ - ColumnIDs: []descpb.ColumnID{1}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - Partitioning: descpb.PartitioningDescriptor{ - NumColumns: 1, - List: []descpb.PartitioningDescriptor_List{{}}, - }, - }, - }, - }, - {"PARTITION p1: must contain values", - descpb.TableDescriptor{ - Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, - PrimaryIndex: descpb.IndexDescriptor{ - ColumnIDs: []descpb.ColumnID{1}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - Partitioning: descpb.PartitioningDescriptor{ - NumColumns: 1, - List: []descpb.PartitioningDescriptor_List{{Name: "p1"}}, - }, - }, - }, - }, - {"PARTITION p1: decoding: empty array", - descpb.TableDescriptor{ - Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, - PrimaryIndex: descpb.IndexDescriptor{ - ColumnIDs: []descpb.ColumnID{1}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - Partitioning: descpb.PartitioningDescriptor{ - NumColumns: 1, - List: []descpb.PartitioningDescriptor_List{{ - Name: "p1", Values: [][]byte{{}}, - }}, - }, - }, - }, - }, - {"PARTITION p1: decoding: int64 varint decoding failed: 0", - descpb.TableDescriptor{ - Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, - PrimaryIndex: descpb.IndexDescriptor{ - ColumnIDs: []descpb.ColumnID{1}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - Partitioning: descpb.PartitioningDescriptor{ - NumColumns: 1, - List: []descpb.PartitioningDescriptor_List{ - {Name: "p1", Values: [][]byte{{0x03}}}, - }, - }, - }, - }, - }, - {"PARTITION p1: superfluous data in encoded value", - descpb.TableDescriptor{ - Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, - PrimaryIndex: descpb.IndexDescriptor{ - ColumnIDs: []descpb.ColumnID{1}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - Partitioning: descpb.PartitioningDescriptor{ - NumColumns: 1, - List: []descpb.PartitioningDescriptor_List{ - {Name: "p1", Values: [][]byte{{0x03, 0x02, 0x00}}}, - }, - }, - }, - }, - }, - {"partitions p1 and p2 overlap", - descpb.TableDescriptor{ - Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, - PrimaryIndex: descpb.IndexDescriptor{ - ColumnIDs: []descpb.ColumnID{1, 1}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC, descpb.IndexDescriptor_ASC}, - Partitioning: descpb.PartitioningDescriptor{ - NumColumns: 1, - Range: []descpb.PartitioningDescriptor_Range{ - {Name: "p1", FromInclusive: []byte{0x03, 0x02}, ToExclusive: []byte{0x03, 0x04}}, - {Name: "p2", FromInclusive: []byte{0x03, 0x02}, ToExclusive: []byte{0x03, 0x04}}, - }, - }, - }, - }, - }, - {"PARTITION p1: name must be unique", - descpb.TableDescriptor{ - Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, - PrimaryIndex: descpb.IndexDescriptor{ - ColumnIDs: []descpb.ColumnID{1}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - Partitioning: descpb.PartitioningDescriptor{ - NumColumns: 1, - List: []descpb.PartitioningDescriptor_List{ - {Name: "p1", Values: [][]byte{{0x03, 0x02}}}, - {Name: "p1", Values: [][]byte{{0x03, 0x04}}}, - }, - }, - }, - }, - }, - {"not enough columns in index for this partitioning", - descpb.TableDescriptor{ - Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, - PrimaryIndex: descpb.IndexDescriptor{ - ColumnIDs: []descpb.ColumnID{1}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, - Partitioning: descpb.PartitioningDescriptor{ - NumColumns: 1, - List: []descpb.PartitioningDescriptor_List{{ - Name: "p1", - Values: [][]byte{{0x03, 0x02}}, - Subpartitioning: descpb.PartitioningDescriptor{ - NumColumns: 1, - List: []descpb.PartitioningDescriptor_List{{Name: "p1_1", Values: [][]byte{{}}}}, - }, - }}, - }, - }, - }, - }, - {"PARTITION p1: name must be unique", - descpb.TableDescriptor{ - Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, - PrimaryIndex: descpb.IndexDescriptor{ - ColumnIDs: []descpb.ColumnID{1, 1}, - ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC, descpb.IndexDescriptor_ASC}, - Partitioning: descpb.PartitioningDescriptor{ - NumColumns: 1, - List: []descpb.PartitioningDescriptor_List{ - {Name: "p1", Values: [][]byte{{0x03, 0x02}}}, - { - Name: "p2", - Values: [][]byte{{0x03, 0x04}}, - Subpartitioning: descpb.PartitioningDescriptor{ - NumColumns: 1, - List: []descpb.PartitioningDescriptor_List{ - {Name: "p1", Values: [][]byte{{0x03, 0x02}}}, - }, - }, - }, - }, - }, - }, - }, - }, - } - for i, test := range tests { - t.Run(test.err, func(t *testing.T) { - desc := NewImmutable(test.desc) - err := ValidatePartitioning(desc) - if !testutils.IsError(err, test.err) { - t.Errorf(`%d: got "%v" expected "%v"`, i, err, test.err) - } - }) - } -} func TestColumnTypeSQLString(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/sql/catalog/tabledesc/table.go b/pkg/sql/catalog/tabledesc/table.go index 1f3f33eed55a..e36c99137286 100644 --- a/pkg/sql/catalog/tabledesc/table.go +++ b/pkg/sql/catalog/tabledesc/table.go @@ -177,16 +177,8 @@ func GetShardColumnName(colNames []string, buckets int32) string { } // GetConstraintInfo returns a summary of all constraints on the table. -func (desc *wrapper) GetConstraintInfo( - ctx context.Context, dg catalog.DescGetter, -) (map[string]descpb.ConstraintDetail, error) { - var tableLookup catalog.TableLookupFn - if dg != nil { - tableLookup = func(id descpb.ID) (catalog.TableDescriptor, error) { - return catalog.GetTableDescFromID(ctx, dg, id) - } - } - return desc.collectConstraintInfo(tableLookup) +func (desc *wrapper) GetConstraintInfo() (map[string]descpb.ConstraintDetail, error) { + return desc.collectConstraintInfo(nil) } // GetConstraintInfoWithLookup returns a summary of all constraints on the diff --git a/pkg/sql/catalog/tabledesc/validate.go b/pkg/sql/catalog/tabledesc/validate.go new file mode 100644 index 000000000000..bf191a7159a4 --- /dev/null +++ b/pkg/sql/catalog/tabledesc/validate.go @@ -0,0 +1,1351 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tabledesc + +import ( + "fmt" + "strings" + + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" + "github.com/cockroachdb/cockroach/pkg/sql/parser" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" + "github.com/cockroachdb/cockroach/pkg/util/interval" + "github.com/cockroachdb/errors" + "github.com/lib/pq/oid" +) + +// ValidateTxnCommit performs pre-transaction-commit checks. +func (desc *wrapper) ValidateTxnCommit( + vea catalog.ValidationErrorAccumulator, _ catalog.ValidationDescGetter, +) { + // Check that primary key exists. + if !desc.HasPrimaryKey() { + vea.Report(unimplemented.NewWithIssue(48026, + "primary key dropped without subsequent addition of new primary key in same transaction")) + } +} + +// GetReferencedDescIDs returns the IDs of all descriptors referenced by +// this descriptor, including itself. +func (desc *wrapper) GetReferencedDescIDs() catalog.DescriptorIDSet { + ids := catalog.MakeDescriptorIDSet(desc.GetID(), desc.GetParentID()) + if desc.GetParentSchemaID() != keys.PublicSchemaID { + ids.Add(desc.GetParentSchemaID()) + } + // Collect referenced table IDs in foreign keys and interleaves. + for _, fk := range desc.OutboundFKs { + ids.Add(fk.ReferencedTableID) + } + for _, fk := range desc.InboundFKs { + ids.Add(fk.OriginTableID) + } + for _, idx := range desc.NonDropIndexes() { + for i := 0; i < idx.NumInterleaveAncestors(); i++ { + ids.Add(idx.GetInterleaveAncestor(i).TableID) + } + for i := 0; i < idx.NumInterleavedBy(); i++ { + ids.Add(idx.GetInterleavedBy(i).Table) + } + } + // Collect user defined type Oids and sequence references in columns. + for _, col := range desc.DeletableColumns() { + for id := range typedesc.GetTypeDescriptorClosure(col.GetType()) { + ids.Add(id) + } + for i := 0; i < col.NumUsesSequences(); i++ { + ids.Add(col.GetUsesSequenceID(i)) + } + } + // Collect user defined type IDs in expressions. + // All serialized expressions within a table descriptor are serialized + // with type annotations as IDs, so this visitor will collect them all. + visitor := &tree.TypeCollectorVisitor{OIDs: make(map[oid.Oid]struct{})} + _ = ForEachExprStringInTableDesc(desc, func(expr *string) error { + if parsedExpr, err := parser.ParseExpr(*expr); err == nil { + // ignore errors + tree.WalkExpr(visitor, parsedExpr) + } + return nil + }) + // Add collected Oids to return set. + for oid := range visitor.OIDs { + ids.Add(typedesc.UserDefinedTypeOIDToID(oid)) + } + // Add view dependencies. + for _, id := range desc.GetDependsOn() { + ids.Add(id) + } + for _, ref := range desc.GetDependedOnBy() { + ids.Add(ref.ID) + } + // Add sequence dependencies + return ids +} + +// ValidateCrossReferences validates that each reference to another table is +// resolvable and that the necessary back references exist. +func (desc *wrapper) ValidateCrossReferences( + vea catalog.ValidationErrorAccumulator, vdg catalog.ValidationDescGetter, +) { + // Check that parent DB exists. + dbDesc, err := vdg.GetDatabaseDescriptor(desc.GetParentID()) + if err != nil { + vea.Report(err) + } + + // Check that parent schema exists. + if desc.GetParentSchemaID() != keys.PublicSchemaID && !desc.IsTemporary() { + schemaDesc, err := vdg.GetSchemaDescriptor(desc.GetParentSchemaID()) + if err != nil { + vea.Report(err) + } + if schemaDesc != nil && dbDesc != nil && schemaDesc.GetParentID() != dbDesc.GetID() { + vea.Report(errors.AssertionFailedf("parent schema %d is in different database %d", + desc.GetParentSchemaID(), schemaDesc.GetParentID())) + } + } + + if dbDesc != nil { + // Validate the all types present in the descriptor exist. + typeIDs, err := desc.GetAllReferencedTypeIDs(dbDesc, vdg.GetTypeDescriptor) + if err != nil { + vea.Report(err) + } else { + for _, id := range typeIDs { + _, err := vdg.GetTypeDescriptor(id) + vea.Report(err) + } + } + + // Validate table locality. + if err := desc.validateTableLocalityConfig(dbDesc, vdg); err != nil { + vea.Report(errors.Wrap(err, "invalid locality config")) + return + } + } + + // Check foreign keys. + for i := range desc.OutboundFKs { + vea.Report(desc.validateOutboundFK(&desc.OutboundFKs[i], vdg)) + } + for i := range desc.InboundFKs { + vea.Report(desc.validateInboundFK(&desc.InboundFKs[i], vdg)) + } + + // Check partitioning is correctly set. + // We only check these for active indexes, as inactive indexes may be in the + // process of being backfilled without PartitionAllBy. + // This check cannot be performed in ValidateSelf due to a conflict with + // AllocateIDs. + if desc.PartitionAllBy { + for _, indexI := range desc.ActiveIndexes() { + if !desc.matchingPartitionbyAll(indexI) { + vea.Report(errors.AssertionFailedf( + "table has PARTITION ALL BY defined, but index %s does not have matching PARTITION BY", + indexI.GetName(), + )) + } + } + } + + // Check interleaves. + for _, indexI := range desc.NonDropIndexes() { + vea.Report(desc.validateIndexInterleave(indexI, vdg)) + } + // TODO(dan): Also validate SharedPrefixLen in the interleaves. +} + +func (desc *wrapper) validateIndexInterleave( + indexI catalog.Index, vdg catalog.ValidationDescGetter, +) error { + // Check interleaves. + if indexI.NumInterleaveAncestors() > 0 { + // Only check the most recent ancestor, the rest of them don't point + // back. + ancestor := indexI.GetInterleaveAncestor(indexI.NumInterleaveAncestors() - 1) + targetTable, err := vdg.GetTableDescriptor(ancestor.TableID) + if err != nil { + return errors.Wrapf(err, + "invalid interleave: missing table=%d index=%d", ancestor.TableID, errors.Safe(ancestor.IndexID)) + } + targetIndex, err := targetTable.FindIndexWithID(ancestor.IndexID) + if err != nil { + return errors.Wrapf(err, + "invalid interleave: missing table=%s index=%d", targetTable.GetName(), errors.Safe(ancestor.IndexID)) + } + + found := false + for j := 0; j < targetIndex.NumInterleavedBy(); j++ { + backref := targetIndex.GetInterleavedBy(j) + if backref.Table == desc.ID && backref.Index == indexI.GetID() { + found = true + break + } + } + if !found { + return errors.AssertionFailedf( + "missing interleave back reference to %q@%q from %q@%q", + desc.Name, indexI.GetName(), targetTable.GetName(), targetIndex.GetName()) + } + } + + interleaveBackrefs := make(map[descpb.ForeignKeyReference]struct{}) + for j := 0; j < indexI.NumInterleavedBy(); j++ { + backref := indexI.GetInterleavedBy(j) + if _, ok := interleaveBackrefs[backref]; ok { + return errors.AssertionFailedf("duplicated interleave backreference %+v", backref) + } + interleaveBackrefs[backref] = struct{}{} + targetTable, err := vdg.GetTableDescriptor(backref.Table) + if err != nil { + return errors.Wrapf(err, + "invalid interleave backreference table=%d index=%d", + backref.Table, backref.Index) + } + targetIndex, err := targetTable.FindIndexWithID(backref.Index) + if err != nil { + return errors.Wrapf(err, + "invalid interleave backreference table=%s index=%d", + targetTable.GetName(), backref.Index) + } + if targetIndex.NumInterleaveAncestors() == 0 { + return errors.AssertionFailedf( + "broken interleave backward reference from %q@%q to %q@%q", + desc.Name, indexI.GetName(), targetTable.GetName(), targetIndex.GetName()) + } + // The last ancestor is required to be a backreference. + ancestor := targetIndex.GetInterleaveAncestor(targetIndex.NumInterleaveAncestors() - 1) + if ancestor.TableID != desc.ID || ancestor.IndexID != indexI.GetID() { + return errors.AssertionFailedf( + "broken interleave backward reference from %q@%q to %q@%q", + desc.Name, indexI.GetName(), targetTable.GetName(), targetIndex.GetName()) + } + } + + return nil +} + +func (desc *wrapper) validateOutboundFK( + fk *descpb.ForeignKeyConstraint, vdg catalog.ValidationDescGetter, +) error { + referencedTable, err := vdg.GetTableDescriptor(fk.ReferencedTableID) + if err != nil { + return errors.Wrapf(err, + "invalid foreign key: missing table=%d", fk.ReferencedTableID) + } + found := false + _ = referencedTable.ForeachInboundFK(func(backref *descpb.ForeignKeyConstraint) error { + if !found && backref.OriginTableID == desc.ID && backref.Name == fk.Name { + found = true + } + return nil + }) + if found { + return nil + } + // In 20.2 we introduced a bug where we fail to upgrade the FK references + // on the referenced descriptors from their pre-19.2 format when reading + // them during validation (#57032). So we account for the possibility of + // un-upgraded foreign key references on the other table. This logic + // somewhat parallels the logic in maybeUpgradeForeignKeyRepOnIndex. + unupgradedFKsPresent := false + if err := catalog.ForEachIndex(referencedTable, catalog.IndexOpts{}, func(referencedIdx catalog.Index) error { + if found { + // TODO (lucy): If we ever revisit the tabledesc.immutable methods, add + // a way to break out of the index loop. + return nil + } + if len(referencedIdx.IndexDesc().ReferencedBy) > 0 { + unupgradedFKsPresent = true + } else { + return nil + } + // Determine whether the index on the other table is a unique index that + // could support this FK constraint. + if !referencedIdx.IsValidReferencedUniqueConstraint(fk.ReferencedColumnIDs) { + return nil + } + // Now check the backreferences. Backreferences in ReferencedBy only had + // Index and Table populated. + for i := range referencedIdx.IndexDesc().ReferencedBy { + backref := &referencedIdx.IndexDesc().ReferencedBy[i] + if backref.Table != desc.ID { + continue + } + // Look up the index that the un-upgraded reference refers to and + // see if that index could support the foreign key reference. (Note + // that it shouldn't be possible for this index to not exist. See + // planner.MaybeUpgradeDependentOldForeignKeyVersionTables, which is + // called from the drop index implementation.) + originalOriginIndex, err := desc.FindIndexWithID(backref.Index) + if err != nil { + return errors.AssertionFailedf( + "missing index %d on %q from pre-19.2 foreign key "+ + "backreference %q on %q", + backref.Index, desc.Name, fk.Name, referencedTable.GetName(), + ) + } + if originalOriginIndex.IsValidOriginIndex(fk.OriginColumnIDs) { + found = true + break + } + } + return nil + }); err != nil { + return err + } + if found { + return nil + } + if unupgradedFKsPresent { + return errors.AssertionFailedf("missing fk back reference %q to %q "+ + "from %q (un-upgraded foreign key references present)", + fk.Name, desc.Name, referencedTable.GetName()) + } + return errors.AssertionFailedf("missing fk back reference %q to %q from %q", + fk.Name, desc.Name, referencedTable.GetName()) +} + +func (desc *wrapper) validateInboundFK( + backref *descpb.ForeignKeyConstraint, vdg catalog.ValidationDescGetter, +) error { + originTable, err := vdg.GetTableDescriptor(backref.OriginTableID) + if err != nil { + return errors.Wrapf(err, + "invalid foreign key backreference: missing table=%d", backref.OriginTableID) + } + found := false + _ = originTable.ForeachOutboundFK(func(fk *descpb.ForeignKeyConstraint) error { + if !found && fk.ReferencedTableID == desc.ID && fk.Name == backref.Name { + found = true + } + return nil + }) + if found { + return nil + } + // In 20.2 we introduced a bug where we fail to upgrade the FK references + // on the referenced descriptors from their pre-19.2 format when reading + // them during validation (#57032). So we account for the possibility of + // un-upgraded foreign key references on the other table. This logic + // somewhat parallels the logic in maybeUpgradeForeignKeyRepOnIndex. + unupgradedFKsPresent := false + if err := catalog.ForEachIndex(originTable, catalog.IndexOpts{}, func(originIdx catalog.Index) error { + if found { + // TODO (lucy): If we ever revisit the tabledesc.immutable methods, add + // a way to break out of the index loop. + return nil + } + fk := originIdx.IndexDesc().ForeignKey + if fk.IsSet() { + unupgradedFKsPresent = true + } else { + return nil + } + // Determine whether the index on the other table is a index that could + // support this FK constraint on the referencing side. Such an index would + // have been required in earlier versions. + if !originIdx.IsValidOriginIndex(backref.OriginColumnIDs) { + return nil + } + if fk.Table != desc.ID { + return nil + } + // Look up the index that the un-upgraded reference refers to and + // see if that index could support the foreign key reference. (Note + // that it shouldn't be possible for this index to not exist. See + // planner.MaybeUpgradeDependentOldForeignKeyVersionTables, which is + // called from the drop index implementation.) + originalReferencedIndex, err := desc.FindIndexWithID(fk.Index) + if err != nil { + return errors.AssertionFailedf( + "missing index %d on %q from pre-19.2 foreign key forward reference %q on %q", + fk.Index, desc.Name, backref.Name, originTable.GetName(), + ) + } + if originalReferencedIndex.IsValidReferencedUniqueConstraint(backref.ReferencedColumnIDs) { + found = true + } + return nil + }); err != nil { + return err + } + if found { + return nil + } + if unupgradedFKsPresent { + return errors.AssertionFailedf("missing fk forward reference %q to %q from %q "+ + "(un-upgraded foreign key references present)", + backref.Name, desc.Name, originTable.GetName()) + } + return errors.AssertionFailedf("missing fk forward reference %q to %q from %q", + backref.Name, desc.Name, originTable.GetName()) +} + +func (desc *wrapper) matchingPartitionbyAll(indexI catalog.Index) bool { + primaryIndexPartitioning := desc.PrimaryIndex.ColumnIDs[:desc.PrimaryIndex.Partitioning.NumColumns] + indexPartitioning := indexI.IndexDesc().ColumnIDs[:indexI.GetPartitioning().NumColumns] + if len(primaryIndexPartitioning) != len(indexPartitioning) { + return false + } + for i, id := range primaryIndexPartitioning { + if id != indexPartitioning[i] { + return false + } + } + return true +} + +func validateMutation(m *descpb.DescriptorMutation) error { + unSetEnums := m.State == descpb.DescriptorMutation_UNKNOWN || m.Direction == descpb.DescriptorMutation_NONE + switch desc := m.Descriptor_.(type) { + case *descpb.DescriptorMutation_Column: + col := desc.Column + if unSetEnums { + return errors.AssertionFailedf( + "mutation in state %s, direction %s, col %q, id %v", + errors.Safe(m.State), errors.Safe(m.Direction), col.Name, errors.Safe(col.ID)) + } + case *descpb.DescriptorMutation_Index: + if unSetEnums { + idx := desc.Index + return errors.AssertionFailedf( + "mutation in state %s, direction %s, index %s, id %v", + errors.Safe(m.State), errors.Safe(m.Direction), idx.Name, errors.Safe(idx.ID)) + } + case *descpb.DescriptorMutation_Constraint: + if unSetEnums { + return errors.AssertionFailedf( + "mutation in state %s, direction %s, constraint %v", + errors.Safe(m.State), errors.Safe(m.Direction), desc.Constraint.Name) + } + case *descpb.DescriptorMutation_PrimaryKeySwap: + if m.Direction == descpb.DescriptorMutation_NONE { + return errors.AssertionFailedf( + "primary key swap mutation in state %s, direction %s", errors.Safe(m.State), errors.Safe(m.Direction)) + } + case *descpb.DescriptorMutation_ComputedColumnSwap: + if m.Direction == descpb.DescriptorMutation_NONE { + return errors.AssertionFailedf( + "computed column swap mutation in state %s, direction %s", errors.Safe(m.State), errors.Safe(m.Direction)) + } + case *descpb.DescriptorMutation_MaterializedViewRefresh: + if m.Direction == descpb.DescriptorMutation_NONE { + return errors.AssertionFailedf( + "materialized view refresh mutation in state %s, direction %s", errors.Safe(m.State), errors.Safe(m.Direction)) + } + default: + return errors.AssertionFailedf( + "mutation in state %s, direction %s, and no column/index descriptor", + errors.Safe(m.State), errors.Safe(m.Direction)) + } + return nil +} + +// ValidateSelf validates that the table descriptor is well formed. Checks +// include validating the table, column and index names, verifying that column +// names and index names are unique and verifying that column IDs and index IDs +// are consistent. Use Validate to validate that cross-table references are +// correct. +// If version is supplied, the descriptor is checked for version incompatibilities. +func (desc *wrapper) ValidateSelf(vea catalog.ValidationErrorAccumulator) { + // Validate local properties of the descriptor. + vea.Report(catalog.ValidateName(desc.Name, "table")) + if desc.GetID() == descpb.InvalidID { + vea.Report(errors.AssertionFailedf("invalid table ID %d", desc.GetID())) + } + if desc.GetParentSchemaID() == descpb.InvalidID { + vea.Report(errors.AssertionFailedf("invalid parent schema ID %d", desc.GetParentSchemaID())) + } + + // ParentID is the ID of the database holding this table. + // It is often < ID, except when a table gets moved across databases. + if desc.GetParentID() == descpb.InvalidID && !desc.IsVirtualTable() { + vea.Report(errors.AssertionFailedf("invalid parent ID %d", desc.GetParentID())) + } + + if desc.IsSequence() { + return + } + + if len(desc.Columns) == 0 { + vea.Report(ErrMissingColumns) + return + } + + columnNames := make(map[string]descpb.ColumnID, len(desc.Columns)) + columnIDs := make(map[descpb.ColumnID]*descpb.ColumnDescriptor, len(desc.Columns)) + if err := desc.validateColumns(columnNames, columnIDs); err != nil { + vea.Report(err) + return + } + + // TODO(dt, nathan): virtual descs don't validate (missing privs, PK, etc). + if desc.IsVirtualTable() { + return + } + + // We maintain forward compatibility, so if you see this error message with a + // version older that what this client supports, then there's a + // maybeFillInDescriptor missing from some codepath. + if v := desc.GetFormatVersion(); v != descpb.FamilyFormatVersion && v != descpb.InterleavedFormatVersion { + // TODO(dan): We're currently switching from FamilyFormatVersion to + // InterleavedFormatVersion. After a beta is released with this dual version + // support, then: + // - Upgrade the bidirectional reference version to that beta + // - Start constructing all TableDescriptors with InterleavedFormatVersion + // - Change maybeUpgradeFormatVersion to output InterleavedFormatVersion + // - Change this check to only allow InterleavedFormatVersion + vea.Report(errors.AssertionFailedf( + "table %q is encoded using using version %d, but this client only supports version %d and %d", + desc.Name, errors.Safe(desc.GetFormatVersion()), + errors.Safe(descpb.FamilyFormatVersion), errors.Safe(descpb.InterleavedFormatVersion))) + return + } + + if err := desc.CheckUniqueConstraints(); err != nil { + vea.Report(err) + return + } + + mutationsHaveErrs := false + for _, m := range desc.Mutations { + if err := validateMutation(&m); err != nil { + vea.Report(err) + mutationsHaveErrs = true + } + switch desc := m.Descriptor_.(type) { + case *descpb.DescriptorMutation_Column: + col := desc.Column + columnIDs[col.ID] = col + } + } + + if mutationsHaveErrs { + return + } + + // TODO(dt): Validate each column only appears at-most-once in any FKs. + + // Only validate column families, constraints, and indexes if this is + // actually a table, not if it's just a view. + if desc.IsPhysicalTable() { + newErrs := []error{ + desc.validateColumnFamilies(columnIDs), + desc.validateCheckConstraints(columnIDs), + desc.validateUniqueWithoutIndexConstraints(columnIDs), + desc.validateTableIndexes(columnNames), + desc.validatePartitioning(), + } + hasErrs := false + for _, err := range newErrs { + if err != nil { + vea.Report(err) + hasErrs = true + } + } + if hasErrs { + return + } + } + + // Fill in any incorrect privileges that may have been missed due to mixed-versions. + // TODO(mberhault): remove this in 2.1 (maybe 2.2) when privilege-fixing migrations have been + // run again and mixed-version clusters always write "good" descriptors. + descpb.MaybeFixPrivileges(desc.ID, &desc.Privileges) + + // Validate the privilege descriptor. + vea.Report(desc.Privileges.Validate(desc.GetID(), privilege.Table)) + + // Ensure that mutations cannot be queued if a primary key change or + // an alter column type schema change has either been started in + // this transaction, or is currently in progress. + var alterPKMutation descpb.MutationID + var alterColumnTypeMutation descpb.MutationID + var foundAlterPK bool + var foundAlterColumnType bool + + for _, m := range desc.Mutations { + // If we have seen an alter primary key mutation, then + // m we are considering right now is invalid. + if foundAlterPK { + if alterPKMutation == m.MutationID { + vea.Report(unimplemented.NewWithIssue( + 45615, + "cannot perform other schema changes in the same transaction as a primary key change", + )) + } else { + vea.Report(unimplemented.NewWithIssue( + 45615, + "cannot perform a schema change operation while a primary key change is in progress", + )) + } + return + } + if foundAlterColumnType { + if alterColumnTypeMutation == m.MutationID { + vea.Report(unimplemented.NewWithIssue( + 47137, + "cannot perform other schema changes in the same transaction as an ALTER COLUMN TYPE schema change", + )) + } else { + vea.Report(unimplemented.NewWithIssue( + 47137, + "cannot perform a schema change operation while an ALTER COLUMN TYPE schema change is in progress", + )) + } + return + } + if m.GetPrimaryKeySwap() != nil { + foundAlterPK = true + alterPKMutation = m.MutationID + } + if m.GetComputedColumnSwap() != nil { + foundAlterColumnType = true + alterColumnTypeMutation = m.MutationID + } + } + + // Check that all expression strings can be parsed. + _ = ForEachExprStringInTableDesc(desc, func(expr *string) error { + _, err := parser.ParseExpr(*expr) + vea.Report(err) + return nil + }) +} + +func (desc *wrapper) validateColumns( + columnNames map[string]descpb.ColumnID, columnIDs map[descpb.ColumnID]*descpb.ColumnDescriptor, +) error { + for _, column := range desc.NonDropColumns() { + + if err := catalog.ValidateName(column.GetName(), "column"); err != nil { + return err + } + if column.GetID() == 0 { + return errors.AssertionFailedf("invalid column ID %d", errors.Safe(column.GetID())) + } + + if _, columnNameExists := columnNames[column.GetName()]; columnNameExists { + for i := range desc.Columns { + if desc.Columns[i].Name == column.GetName() { + return pgerror.Newf(pgcode.DuplicateColumn, + "duplicate column name: %q", column.GetName()) + } + } + return pgerror.Newf(pgcode.DuplicateColumn, + "duplicate: column %q in the middle of being added, not yet public", column.GetName()) + } + if colinfo.IsSystemColumnName(column.GetName()) { + return pgerror.Newf(pgcode.DuplicateColumn, + "column name %q conflicts with a system column name", column.GetName()) + } + columnNames[column.GetName()] = column.GetID() + + if other, ok := columnIDs[column.GetID()]; ok { + return fmt.Errorf("column %q duplicate ID of column %q: %d", + column.GetName(), other.Name, column.GetID()) + } + columnIDs[column.GetID()] = column.ColumnDesc() + + if column.GetID() >= desc.NextColumnID { + return errors.AssertionFailedf("column %q invalid ID (%d) >= next column ID (%d)", + column.GetName(), errors.Safe(column.GetID()), errors.Safe(desc.NextColumnID)) + } + + if column.IsComputed() { + // Verify that the computed column expression is valid. + expr, err := parser.ParseExpr(column.GetComputeExpr()) + if err != nil { + return err + } + valid, err := schemaexpr.HasValidColumnReferences(desc, expr) + if err != nil { + return err + } + if !valid { + return fmt.Errorf("computed column %q refers to unknown columns in expression: %s", + column.GetName(), column.GetComputeExpr()) + } + } else if column.IsVirtual() { + return fmt.Errorf("virtual column %q is not computed", column.GetName()) + } + } + return nil +} + +func (desc *wrapper) validateColumnFamilies( + columnIDs map[descpb.ColumnID]*descpb.ColumnDescriptor, +) error { + if len(desc.Families) < 1 { + return fmt.Errorf("at least 1 column family must be specified") + } + if desc.Families[0].ID != descpb.FamilyID(0) { + return fmt.Errorf("the 0th family must have ID 0") + } + + familyNames := map[string]struct{}{} + familyIDs := map[descpb.FamilyID]string{} + colIDToFamilyID := map[descpb.ColumnID]descpb.FamilyID{} + for i := range desc.Families { + family := &desc.Families[i] + if err := catalog.ValidateName(family.Name, "family"); err != nil { + return err + } + + if i != 0 { + prevFam := desc.Families[i-1] + if family.ID < prevFam.ID { + return errors.Newf( + "family %s at index %d has id %d less than family %s at index %d with id %d", + family.Name, i, family.ID, prevFam.Name, i-1, prevFam.ID) + } + } + + if _, ok := familyNames[family.Name]; ok { + return fmt.Errorf("duplicate family name: %q", family.Name) + } + familyNames[family.Name] = struct{}{} + + if other, ok := familyIDs[family.ID]; ok { + return fmt.Errorf("family %q duplicate ID of family %q: %d", + family.Name, other, family.ID) + } + familyIDs[family.ID] = family.Name + + if family.ID >= desc.NextFamilyID { + return fmt.Errorf("family %q invalid family ID (%d) > next family ID (%d)", + family.Name, family.ID, desc.NextFamilyID) + } + + if len(family.ColumnIDs) != len(family.ColumnNames) { + return fmt.Errorf("mismatched column ID size (%d) and name size (%d)", + len(family.ColumnIDs), len(family.ColumnNames)) + } + + for i, colID := range family.ColumnIDs { + col, ok := columnIDs[colID] + if !ok { + return fmt.Errorf("family %q contains unknown column \"%d\"", family.Name, colID) + } + if col.Name != family.ColumnNames[i] { + return fmt.Errorf("family %q column %d should have name %q, but found name %q", + family.Name, colID, col.Name, family.ColumnNames[i]) + } + if col.Virtual { + return fmt.Errorf("virtual computed column %q cannot be part of a family", col.Name) + } + } + + for _, colID := range family.ColumnIDs { + if famID, ok := colIDToFamilyID[colID]; ok { + return fmt.Errorf("column %d is in both family %d and %d", colID, famID, family.ID) + } + colIDToFamilyID[colID] = family.ID + } + } + for colID, colDesc := range columnIDs { + if !colDesc.Virtual { + if _, ok := colIDToFamilyID[colID]; !ok { + return fmt.Errorf("column %q is not in any column family", colDesc.Name) + } + } + } + return nil +} + +// validateCheckConstraints validates that check constraints are well formed. +// Checks include validating the column IDs and verifying that check expressions +// do not reference non-existent columns. +func (desc *wrapper) validateCheckConstraints( + columnIDs map[descpb.ColumnID]*descpb.ColumnDescriptor, +) error { + for _, chk := range desc.AllActiveAndInactiveChecks() { + // Verify that the check's column IDs are valid. + for _, colID := range chk.ColumnIDs { + _, ok := columnIDs[colID] + if !ok { + return fmt.Errorf("check constraint %q contains unknown column \"%d\"", chk.Name, colID) + } + } + + // Verify that the check's expression is valid. + expr, err := parser.ParseExpr(chk.Expr) + if err != nil { + return err + } + valid, err := schemaexpr.HasValidColumnReferences(desc, expr) + if err != nil { + return err + } + if !valid { + return fmt.Errorf("check constraint %q refers to unknown columns in expression: %s", + chk.Name, chk.Expr) + } + } + return nil +} + +// validateUniqueWithoutIndexConstraints validates that unique without index +// constraints are well formed. Checks include validating the column IDs and +// column names. +func (desc *wrapper) validateUniqueWithoutIndexConstraints( + columnIDs map[descpb.ColumnID]*descpb.ColumnDescriptor, +) error { + for _, c := range desc.AllActiveAndInactiveUniqueWithoutIndexConstraints() { + if err := catalog.ValidateName(c.Name, "unique without index constraint"); err != nil { + return err + } + + // Verify that the table ID is valid. + if c.TableID != desc.ID { + return fmt.Errorf( + "TableID mismatch for unique without index constraint %q: \"%d\" doesn't match descriptor: \"%d\"", + c.Name, c.TableID, desc.ID, + ) + } + + // Verify that the constraint's column IDs are valid and unique. + var seen util.FastIntSet + for _, colID := range c.ColumnIDs { + _, ok := columnIDs[colID] + if !ok { + return fmt.Errorf( + "unique without index constraint %q contains unknown column \"%d\"", c.Name, colID, + ) + } + if seen.Contains(int(colID)) { + return fmt.Errorf( + "unique without index constraint %q contains duplicate column \"%d\"", c.Name, colID, + ) + } + seen.Add(int(colID)) + } + + if c.IsPartial() { + expr, err := parser.ParseExpr(c.Predicate) + if err != nil { + return err + } + valid, err := schemaexpr.HasValidColumnReferences(desc, expr) + if err != nil { + return err + } + if !valid { + return fmt.Errorf( + "partial unique without index constraint %q refers to unknown columns in predicate: %s", + c.Name, + c.Predicate, + ) + } + } + } + + return nil +} + +// validateTableIndexes validates that indexes are well formed. Checks include +// validating the columns involved in the index, verifying the index names and +// IDs are unique, and the family of the primary key is 0. This does not check +// if indexes are unique (i.e. same set of columns, direction, and uniqueness) +// as there are practical uses for them. +func (desc *wrapper) validateTableIndexes(columnNames map[string]descpb.ColumnID) error { + if len(desc.PrimaryIndex.ColumnIDs) == 0 { + return ErrMissingPrimaryKey + } + + var virtualCols catalog.TableColSet + for i := range desc.Columns { + if desc.Columns[i].Virtual { + virtualCols.Add(desc.Columns[i].ID) + } + } + + // Verify that the primary index columns are not virtual. + for i, col := range desc.PrimaryIndex.ColumnIDs { + if virtualCols.Contains(col) { + return fmt.Errorf("primary index column %q cannot be virtual", desc.PrimaryIndex.ColumnNames[i]) + } + } + + indexNames := map[string]struct{}{} + indexIDs := map[descpb.IndexID]string{} + for _, indexI := range desc.NonDropIndexes() { + index := indexI.IndexDesc() + if err := catalog.ValidateName(index.Name, "index"); err != nil { + return err + } + if index.ID == 0 { + return fmt.Errorf("invalid index ID %d", index.ID) + } + + if _, indexNameExists := indexNames[index.Name]; indexNameExists { + for i := range desc.Indexes { + if desc.Indexes[i].Name == index.Name { + // This error should be caught in MakeIndexDescriptor or NewTableDesc. + return errors.HandleAsAssertionFailure(fmt.Errorf("duplicate index name: %q", index.Name)) + } + } + // This error should be caught in MakeIndexDescriptor. + return errors.HandleAsAssertionFailure(fmt.Errorf( + "duplicate: index %q in the middle of being added, not yet public", index.Name)) + } + indexNames[index.Name] = struct{}{} + + if other, ok := indexIDs[index.ID]; ok { + return fmt.Errorf("index %q duplicate ID of index %q: %d", + index.Name, other, index.ID) + } + indexIDs[index.ID] = index.Name + + if index.ID >= desc.NextIndexID { + return fmt.Errorf("index %q invalid index ID (%d) > next index ID (%d)", + index.Name, index.ID, desc.NextIndexID) + } + + if len(index.ColumnIDs) != len(index.ColumnNames) { + return fmt.Errorf("mismatched column IDs (%d) and names (%d)", + len(index.ColumnIDs), len(index.ColumnNames)) + } + if len(index.ColumnIDs) != len(index.ColumnDirections) { + return fmt.Errorf("mismatched column IDs (%d) and directions (%d)", + len(index.ColumnIDs), len(index.ColumnDirections)) + } + // In the old STORING encoding, stored columns are in ExtraColumnIDs; + // tolerate a longer list of column names. + if len(index.StoreColumnIDs) > len(index.StoreColumnNames) { + return fmt.Errorf("mismatched STORING column IDs (%d) and names (%d)", + len(index.StoreColumnIDs), len(index.StoreColumnNames)) + } + + if len(index.ColumnIDs) == 0 { + return fmt.Errorf("index %q must contain at least 1 column", index.Name) + } + + var validateIndexDup catalog.TableColSet + for i, name := range index.ColumnNames { + colID, ok := columnNames[name] + if !ok { + return fmt.Errorf("index %q contains unknown column %q", index.Name, name) + } + if colID != index.ColumnIDs[i] { + return fmt.Errorf("index %q column %q should have ID %d, but found ID %d", + index.Name, name, colID, index.ColumnIDs[i]) + } + if validateIndexDup.Contains(colID) { + return fmt.Errorf("index %q contains duplicate column %q", index.Name, name) + } + validateIndexDup.Add(colID) + } + if index.IsSharded() { + if err := desc.ensureShardedIndexNotComputed(index); err != nil { + return err + } + if _, exists := columnNames[index.Sharded.Name]; !exists { + return fmt.Errorf("index %q refers to non-existent shard column %q", + index.Name, index.Sharded.Name) + } + } + if index.IsPartial() { + expr, err := parser.ParseExpr(index.Predicate) + if err != nil { + return err + } + valid, err := schemaexpr.HasValidColumnReferences(desc, expr) + if err != nil { + return err + } + if !valid { + return fmt.Errorf("partial index %q refers to unknown columns in predicate: %s", + index.Name, index.Predicate) + } + } + // Ensure that indexes do not STORE virtual columns. + for _, col := range index.ExtraColumnIDs { + if virtualCols.Contains(col) { + return fmt.Errorf("index %q cannot store virtual column %d", index.Name, col) + } + } + for i, col := range index.StoreColumnIDs { + if virtualCols.Contains(col) { + return fmt.Errorf("index %q cannot store virtual column %q", index.Name, index.StoreColumnNames[i]) + } + } + } + + return nil +} + +// ensureShardedIndexNotComputed ensures that the sharded index is not based on a computed +// column. This is because the sharded index is based on a hidden computed shard column +// under the hood and we don't support transitively computed columns (computed column A +// based on another computed column B). +func (desc *wrapper) ensureShardedIndexNotComputed(index *descpb.IndexDescriptor) error { + for _, colName := range index.Sharded.ColumnNames { + col, err := desc.FindColumnWithName(tree.Name(colName)) + if err != nil { + return err + } + if col.IsComputed() { + return pgerror.Newf(pgcode.InvalidTableDefinition, + "cannot create a sharded index on a computed column") + } + } + return nil +} + +// validatePartitioningDescriptor validates that a PartitioningDescriptor, which +// may represent a subpartition, is well-formed. Checks include validating the +// index-level uniqueness of all partition names, validating that the encoded +// tuples match the corresponding column types, and that range partitions are +// stored sorted by upper bound. colOffset is non-zero for subpartitions and +// indicates how many index columns to skip over. +func (desc *wrapper) validatePartitioningDescriptor( + a *rowenc.DatumAlloc, + idxDesc *descpb.IndexDescriptor, + partDesc *descpb.PartitioningDescriptor, + colOffset int, + partitionNames map[string]string, +) error { + if partDesc.NumImplicitColumns > partDesc.NumColumns { + return errors.Newf( + "cannot have implicit partitioning columns (%d) > partitioning columns (%d)", + partDesc.NumImplicitColumns, + partDesc.NumColumns, + ) + } + if partDesc.NumColumns == 0 { + return nil + } + + // TODO(dan): The sqlccl.GenerateSubzoneSpans logic is easier if we disallow + // setting zone configs on indexes that are interleaved into another index. + // InterleavedBy is fine, so using the root of the interleave hierarchy will + // work. It is expected that this is sufficient for real-world use cases. + // Revisit this restriction if that expectation is wrong. + if len(idxDesc.Interleave.Ancestors) > 0 { + return errors.Errorf("cannot set a zone config for interleaved index %s; "+ + "set it on the root of the interleaved hierarchy instead", idxDesc.Name) + } + + // We don't need real prefixes in the DecodePartitionTuple calls because we're + // only using it to look for collisions and the prefix would be the same for + // all of them. Faking them out with DNull allows us to make O(list partition) + // calls to DecodePartitionTuple instead of O(list partition entry). + fakePrefixDatums := make([]tree.Datum, colOffset) + for i := range fakePrefixDatums { + fakePrefixDatums[i] = tree.DNull + } + + if len(partDesc.List) == 0 && len(partDesc.Range) == 0 { + return fmt.Errorf("at least one of LIST or RANGE partitioning must be used") + } + if len(partDesc.List) > 0 && len(partDesc.Range) > 0 { + return fmt.Errorf("only one LIST or RANGE partitioning may used") + } + + // Do not validate partitions which use unhydrated user-defined types. + // This should only happen at read time and descriptors should not become + // invalid at read time, only at write time. + { + numColumns := int(partDesc.NumColumns) + for i := colOffset; i < colOffset+numColumns; i++ { + // The partitioning descriptor may be invalid and refer to columns + // not stored in the index. In that case, skip this check as the + // validation will fail later. + if i >= len(idxDesc.ColumnIDs) { + continue + } + col, err := desc.FindColumnWithID(idxDesc.ColumnIDs[i]) + if err != nil { + return err + } + if col.GetType().UserDefined() && !col.GetType().IsHydrated() { + return nil + } + } + } + + checkName := func(name string) error { + if len(name) == 0 { + return fmt.Errorf("PARTITION name must be non-empty") + } + if indexName, exists := partitionNames[name]; exists { + if indexName == idxDesc.Name { + return fmt.Errorf("PARTITION %s: name must be unique (used twice in index %q)", + name, indexName) + } + } + partitionNames[name] = idxDesc.Name + return nil + } + + // Use the system-tenant SQL codec when validating the keys in the partition + // descriptor. We just want to know how the partitions relate to one another, + // so it's fine to ignore the tenant ID prefix. + codec := keys.SystemSQLCodec + + if len(partDesc.List) > 0 { + listValues := make(map[string]struct{}, len(partDesc.List)) + for _, p := range partDesc.List { + if err := checkName(p.Name); err != nil { + return err + } + + if len(p.Values) == 0 { + return fmt.Errorf("PARTITION %s: must contain values", p.Name) + } + // NB: key encoding is used to check uniqueness because it has + // to match the behavior of the value when indexed. + for _, valueEncBuf := range p.Values { + tuple, keyPrefix, err := rowenc.DecodePartitionTuple( + a, codec, desc, idxDesc, partDesc, valueEncBuf, fakePrefixDatums) + if err != nil { + return fmt.Errorf("PARTITION %s: %v", p.Name, err) + } + if _, exists := listValues[string(keyPrefix)]; exists { + return fmt.Errorf("%s cannot be present in more than one partition", tuple) + } + listValues[string(keyPrefix)] = struct{}{} + } + + newColOffset := colOffset + int(partDesc.NumColumns) + if err := desc.validatePartitioningDescriptor( + a, idxDesc, &p.Subpartitioning, newColOffset, partitionNames, + ); err != nil { + return err + } + } + } + + if len(partDesc.Range) > 0 { + tree := interval.NewTree(interval.ExclusiveOverlapper) + for _, p := range partDesc.Range { + if err := checkName(p.Name); err != nil { + return err + } + + // NB: key encoding is used to check uniqueness because it has to match + // the behavior of the value when indexed. + fromDatums, fromKey, err := rowenc.DecodePartitionTuple( + a, codec, desc, idxDesc, partDesc, p.FromInclusive, fakePrefixDatums) + if err != nil { + return fmt.Errorf("PARTITION %s: %v", p.Name, err) + } + toDatums, toKey, err := rowenc.DecodePartitionTuple( + a, codec, desc, idxDesc, partDesc, p.ToExclusive, fakePrefixDatums) + if err != nil { + return fmt.Errorf("PARTITION %s: %v", p.Name, err) + } + pi := partitionInterval{p.Name, fromKey, toKey} + if overlaps := tree.Get(pi.Range()); len(overlaps) > 0 { + return fmt.Errorf("partitions %s and %s overlap", + overlaps[0].(partitionInterval).name, p.Name) + } + if err := tree.Insert(pi, false /* fast */); errors.Is(err, interval.ErrEmptyRange) { + return fmt.Errorf("PARTITION %s: empty range: lower bound %s is equal to upper bound %s", + p.Name, fromDatums, toDatums) + } else if errors.Is(err, interval.ErrInvertedRange) { + return fmt.Errorf("PARTITION %s: empty range: lower bound %s is greater than upper bound %s", + p.Name, fromDatums, toDatums) + } else if err != nil { + return errors.Wrapf(err, "PARTITION %s", p.Name) + } + } + } + + return nil +} + +type partitionInterval struct { + name string + start roachpb.Key + end roachpb.Key +} + +var _ interval.Interface = partitionInterval{} + +// ID is part of `interval.Interface` but unused in validatePartitioningDescriptor. +func (ps partitionInterval) ID() uintptr { return 0 } + +// Range is part of `interval.Interface`. +func (ps partitionInterval) Range() interval.Range { + return interval.Range{Start: []byte(ps.start), End: []byte(ps.end)} +} + +// validatePartitioning validates that any PartitioningDescriptors contained in +// table indexes are well-formed. See validatePartitioningDesc for details. +func (desc *wrapper) validatePartitioning() error { + partitionNames := make(map[string]string) + + a := &rowenc.DatumAlloc{} + return catalog.ForEachNonDropIndex(desc, func(idx catalog.Index) error { + idxDesc := idx.IndexDesc() + return desc.validatePartitioningDescriptor( + a, idxDesc, &idxDesc.Partitioning, 0 /* colOffset */, partitionNames, + ) + }) +} + +// validateTableLocalityConfig validates whether the descriptor's locality +// config is valid under the given database. +func (desc *wrapper) validateTableLocalityConfig( + db catalog.DatabaseDescriptor, vdg catalog.ValidationDescGetter, +) error { + + if desc.LocalityConfig == nil { + if db.IsMultiRegion() { + return pgerror.Newf( + pgcode.InvalidTableDefinition, + "database %s is multi-region enabled, but table %s has no locality set", + db.GetName(), + desc.GetName(), + ) + } + // Nothing to validate for non-multi-region databases. + return nil + } + + if !db.IsMultiRegion() { + s := tree.NewFmtCtx(tree.FmtSimple) + var locality string + // Formatting the table locality config should never fail; if it does, the + // error message is more clear if we construct a dummy locality here. + if err := FormatTableLocalityConfig(desc.LocalityConfig, s); err != nil { + locality = "INVALID LOCALITY" + } + locality = s.String() + return pgerror.Newf( + pgcode.InvalidTableDefinition, + "database %s is not multi-region enabled, but table %s has locality %s set", + db.GetName(), + desc.GetName(), + locality, + ) + } + + regionsEnumID, err := db.MultiRegionEnumID() + if err != nil { + return err + } + regionsEnumDesc, err := vdg.GetTypeDescriptor(regionsEnumID) + if err != nil { + return errors.Wrapf(err, "multi-region enum with ID %d does not exist", regionsEnumID) + } + + // REGIONAL BY TABLE tables homed in the primary region should include a + // reference to the multi-region type descriptor and a corresponding + // backreference. All other patterns should only contain a reference if there + // is an explicit column which uses the multi-region type descriptor as its + // *types.T. While the specific cases are validated below, we search for the + // region enum ID in the references list just once, up top here. + typeIDs, err := desc.GetAllReferencedTypeIDs(db, vdg.GetTypeDescriptor) + if err != nil { + return err + } + regionEnumIDReferenced := false + for _, typeID := range typeIDs { + if typeID == regionsEnumID { + regionEnumIDReferenced = true + break + } + } + columnTypesTypeIDs, err := desc.getAllReferencedTypesInTableColumns(vdg.GetTypeDescriptor) + if err != nil { + return err + } + switch lc := desc.LocalityConfig.Locality.(type) { + case *descpb.TableDescriptor_LocalityConfig_Global_: + if regionEnumIDReferenced { + if _, found := columnTypesTypeIDs[regionsEnumID]; !found { + return errors.AssertionFailedf( + "expected no region Enum ID to be referenced by a GLOBAL TABLE: %q"+ + " but found: %d", + desc.GetName(), + regionsEnumDesc.GetID(), + ) + } + } + case *descpb.TableDescriptor_LocalityConfig_RegionalByRow_: + if !desc.IsPartitionAllBy() { + return errors.AssertionFailedf("expected REGIONAL BY ROW table to have PartitionAllBy set") + } + case *descpb.TableDescriptor_LocalityConfig_RegionalByTable_: + + // Table is homed in an explicit (non-primary) region. + if lc.RegionalByTable.Region != nil { + foundRegion := false + regions, err := regionsEnumDesc.RegionNames() + if err != nil { + return err + } + for _, r := range regions { + if *lc.RegionalByTable.Region == r { + foundRegion = true + break + } + } + if !foundRegion { + return errors.WithHintf( + pgerror.Newf( + pgcode.InvalidTableDefinition, + `region "%s" has not been added to database "%s"`, + *lc.RegionalByTable.Region, + db.DatabaseDesc().Name, + ), + "available regions: %s", + strings.Join(regions.ToStrings(), ", "), + ) + } + if !regionEnumIDReferenced { + return errors.AssertionFailedf( + "expected multi-region enum ID %d to be referenced on REGIONAL BY TABLE: %q locality "+ + "config, but did not find it", + regionsEnumID, + desc.GetName(), + ) + } + } else { + if regionEnumIDReferenced { + // It may be the case that the multi-region type descriptor is used + // as the type of the table column. Validations should only fail if + // that is not the case. + if _, found := columnTypesTypeIDs[regionsEnumID]; !found { + return errors.AssertionFailedf( + "expected no region Enum ID to be referenced by a REGIONAL BY TABLE: %q homed in the "+ + "primary region, but found: %d", + desc.GetName(), + regionsEnumDesc.GetID(), + ) + } + } + } + default: + return pgerror.Newf( + pgcode.InvalidTableDefinition, + "unknown locality level: %T", + lc, + ) + } + return nil +} diff --git a/pkg/sql/catalog/tabledesc/validate_test.go b/pkg/sql/catalog/tabledesc/validate_test.go index bae4d5ad6ee5..95abfcf53e9d 100644 --- a/pkg/sql/catalog/tabledesc/validate_test.go +++ b/pkg/sql/catalog/tabledesc/validate_test.go @@ -11,10 +11,21 @@ package tabledesc import ( + "context" + "fmt" "reflect" "testing" + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/security" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catconstants" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" + "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" ) type validateStatus int @@ -264,3 +275,1401 @@ func TestValidateCoversAllDescriptorFields(t *testing.T) { } } } + +func TestValidateTableDesc(t *testing.T) { + defer leaktest.AfterTest(t)() + + computedExpr := "1 + 1" + + testData := []struct { + err string + desc descpb.TableDescriptor + }{ + {`empty table name`, + descpb.TableDescriptor{}}, + {`invalid table ID 0`, + descpb.TableDescriptor{ID: 0, Name: "foo"}}, + {`invalid parent ID 0`, + descpb.TableDescriptor{ID: 2, Name: "foo"}}, + {`table must contain at least 1 column`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + }}, + {`empty column name`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 0}, + }, + NextColumnID: 2, + }}, + {`table "foo" is encoded using using version 0, but this client only supports version 2 and 3`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + NextColumnID: 2, + }}, + {`virtual column "virt" is not computed`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + {ID: 2, Name: "virt", Virtual: true}, + }, + NextColumnID: 3, + }}, + {`invalid column ID 0`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 0, Name: "bar"}, + }, + NextColumnID: 2, + }}, + {`table must contain a primary key`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, + }, + NextColumnID: 2, + NextFamilyID: 1, + }}, + {`duplicate column name: "bar"`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + {ID: 1, Name: "bar"}, + }, + NextColumnID: 2, + }}, + {`duplicate column name: "bar"`, + descpb.TableDescriptor{ + ID: catconstants.CrdbInternalBackwardDependenciesTableID, + ParentID: 0, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + {ID: 1, Name: "bar"}, + }, + NextColumnID: 2, + }}, + {`column "blah" duplicate ID of column "bar": 1`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + {ID: 1, Name: "blah"}, + }, + NextColumnID: 2, + }}, + {`at least 1 column family must be specified`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + NextColumnID: 2, + }}, + {`the 0th family must have ID 0`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 1}, + }, + NextColumnID: 2, + }}, + {`duplicate family name: "baz"`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "baz"}, + {ID: 1, Name: "baz"}, + }, + NextColumnID: 2, + NextFamilyID: 2, + }}, + {`family "qux" duplicate ID of family "baz": 0`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "baz"}, + {ID: 0, Name: "qux"}, + }, + NextColumnID: 2, + NextFamilyID: 2, + }}, + {`duplicate family name: "baz"`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "baz"}, + {ID: 3, Name: "baz"}, + }, + NextColumnID: 2, + NextFamilyID: 2, + }}, + {`mismatched column ID size (1) and name size (0)`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "baz", ColumnIDs: []descpb.ColumnID{1}}, + }, + NextColumnID: 2, + NextFamilyID: 1, + }}, + {`family "baz" contains unknown column "2"`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "baz", ColumnIDs: []descpb.ColumnID{2}, ColumnNames: []string{"bar"}}, + }, + NextColumnID: 2, + NextFamilyID: 1, + }}, + {`family "baz" column 1 should have name "bar", but found name "qux"`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "baz", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"qux"}}, + }, + NextColumnID: 2, + NextFamilyID: 1, + }}, + {`column "bar" is not in any column family`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "baz"}, + }, + NextColumnID: 2, + NextFamilyID: 1, + }}, + {`column 1 is in both family 0 and 1`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "baz", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, + {ID: 1, Name: "qux", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, + }, + NextColumnID: 2, + NextFamilyID: 2, + }}, + {`virtual computed column "virt" cannot be part of a family`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + {ID: 2, Name: "virt", ComputeExpr: &computedExpr, Virtual: true}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "fam1", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, + {ID: 1, Name: "fam2", ColumnIDs: []descpb.ColumnID{2}, ColumnNames: []string{"virt"}}, + }, + NextColumnID: 3, + NextFamilyID: 2, + }}, + {`table must contain a primary key`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, + }, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 0, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}}, + NextColumnID: 2, + NextFamilyID: 1, + }}, + {`invalid index ID 0`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, + }, + PrimaryIndex: descpb.IndexDescriptor{ID: 0, Name: "bar", + ColumnIDs: []descpb.ColumnID{0}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}}, + NextColumnID: 2, + NextFamilyID: 1, + }}, + {`index "bar" must contain at least 1 column`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, + }, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + }, + Indexes: []descpb.IndexDescriptor{ + {ID: 2, Name: "bar"}, + }, + NextColumnID: 2, + NextFamilyID: 1, + NextIndexID: 3, + }}, + {`mismatched column IDs (1) and names (0)`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, + }, + PrimaryIndex: descpb.IndexDescriptor{ID: 1, Name: "bar", ColumnIDs: []descpb.ColumnID{1}}, + NextColumnID: 2, + NextFamilyID: 1, + NextIndexID: 2, + }}, + {`mismatched column IDs (1) and names (2)`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + {ID: 2, Name: "blah"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1, 2}, ColumnNames: []string{"bar", "blah"}}, + }, + PrimaryIndex: descpb.IndexDescriptor{ID: 1, Name: "bar", + ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar", "blah"}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + }, + NextColumnID: 3, + NextFamilyID: 1, + NextIndexID: 2, + }}, + {`duplicate index name: "bar"`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, + }, + PrimaryIndex: descpb.IndexDescriptor{ID: 1, Name: "bar", + ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + }, + Indexes: []descpb.IndexDescriptor{ + {ID: 2, Name: "bar", ColumnIDs: []descpb.ColumnID{1}, + ColumnNames: []string{"bar"}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + }, + }, + NextColumnID: 2, + NextFamilyID: 1, + NextIndexID: 3, + }}, + {`index "blah" duplicate ID of index "bar": 1`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, + }, + PrimaryIndex: descpb.IndexDescriptor{ID: 1, Name: "bar", ColumnIDs: []descpb.ColumnID{1}, + ColumnNames: []string{"bar"}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + }, + Indexes: []descpb.IndexDescriptor{ + {ID: 1, Name: "blah", ColumnIDs: []descpb.ColumnID{1}, + ColumnNames: []string{"bar"}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + }, + }, + NextColumnID: 2, + NextFamilyID: 1, + NextIndexID: 2, + }}, + {`index "bar" column "bar" should have ID 1, but found ID 2`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, + }, + PrimaryIndex: descpb.IndexDescriptor{ID: 1, Name: "bar", ColumnIDs: []descpb.ColumnID{2}, + ColumnNames: []string{"bar"}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + }, + NextColumnID: 2, + NextFamilyID: 1, + NextIndexID: 2, + }}, + {`index "bar" contains unknown column "blah"`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, + }, + PrimaryIndex: descpb.IndexDescriptor{ID: 1, Name: "bar", ColumnIDs: []descpb.ColumnID{1}, + ColumnNames: []string{"blah"}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + }, + NextColumnID: 2, + NextFamilyID: 1, + NextIndexID: 2, + }}, + {`mismatched column IDs (1) and directions (0)`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, + }, + PrimaryIndex: descpb.IndexDescriptor{ID: 1, Name: "bar", ColumnIDs: []descpb.ColumnID{1}, + ColumnNames: []string{"blah"}, + }, + NextColumnID: 2, + NextFamilyID: 1, + NextIndexID: 2, + }}, + {`mismatched STORING column IDs (1) and names (0)`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "c1"}, + {ID: 2, Name: "c2"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + { + ID: 0, + Name: "fam", + ColumnIDs: []descpb.ColumnID{1, 2}, + ColumnNames: []string{"c1", "c2"}, + }, + }, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, Name: "primary", + ColumnIDs: []descpb.ColumnID{1}, + ColumnNames: []string{"c1"}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + StoreColumnIDs: []descpb.ColumnID{2}, + }, + NextColumnID: 3, + NextFamilyID: 1, + NextIndexID: 2, + }}, + {`at least one of LIST or RANGE partitioning must be used`, + // Verify that validatePartitioning is hooked up. The rest of these + // tests are in TestValidatePartitionion. + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}}, + }, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, Name: "primary", ColumnIDs: []descpb.ColumnID{1}, ColumnNames: []string{"bar"}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + Partitioning: descpb.PartitioningDescriptor{ + NumColumns: 1, + }, + }, + NextColumnID: 2, + NextFamilyID: 1, + NextIndexID: 3, + }}, + {`index "foo_crdb_internal_bar_shard_5_bar_idx" refers to non-existent shard column "does not exist"`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + {ID: 2, Name: "crdb_internal_bar_shard_5"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", + ColumnIDs: []descpb.ColumnID{1, 2}, + ColumnNames: []string{"bar", "crdb_internal_bar_shard_5"}, + }, + }, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, Name: "primary", + Unique: true, + ColumnIDs: []descpb.ColumnID{1}, + ColumnNames: []string{"bar"}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + StoreColumnNames: []string{"crdb_internal_bar_shard_5"}, + StoreColumnIDs: []descpb.ColumnID{2}, + }, + Indexes: []descpb.IndexDescriptor{ + {ID: 2, Name: "foo_crdb_internal_bar_shard_5_bar_idx", + ColumnIDs: []descpb.ColumnID{2, 1}, + ColumnNames: []string{"crdb_internal_bar_shard_5", "bar"}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC, descpb.IndexDescriptor_ASC}, + Sharded: descpb.ShardedDescriptor{ + IsSharded: true, + Name: "does not exist", + ShardBuckets: 5, + }, + }, + }, + NextColumnID: 3, + NextFamilyID: 1, + NextIndexID: 3, + }}, + {`TableID mismatch for unique without index constraint "bar_unique": "1" doesn't match descriptor: "2"`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", + ColumnIDs: []descpb.ColumnID{1}, + ColumnNames: []string{"bar"}, + }, + }, + NextColumnID: 2, + NextFamilyID: 1, + UniqueWithoutIndexConstraints: []descpb.UniqueWithoutIndexConstraint{ + { + TableID: 1, + ColumnIDs: []descpb.ColumnID{1}, + Name: "bar_unique", + }, + }, + }}, + {`column-id "2" does not exist`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", + ColumnIDs: []descpb.ColumnID{1}, + ColumnNames: []string{"bar"}, + }, + }, + NextColumnID: 2, + NextFamilyID: 1, + UniqueWithoutIndexConstraints: []descpb.UniqueWithoutIndexConstraint{ + { + TableID: 2, + ColumnIDs: []descpb.ColumnID{1, 2}, + Name: "bar_unique", + }, + }, + }}, + {`unique without index constraint "bar_unique" contains duplicate column "1"`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", + ColumnIDs: []descpb.ColumnID{1}, + ColumnNames: []string{"bar"}, + }, + }, + NextColumnID: 2, + NextFamilyID: 1, + UniqueWithoutIndexConstraints: []descpb.UniqueWithoutIndexConstraint{ + { + TableID: 2, + ColumnIDs: []descpb.ColumnID{1, 1}, + Name: "bar_unique", + }, + }, + }}, + {`empty unique without index constraint name`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", + ColumnIDs: []descpb.ColumnID{1}, + ColumnNames: []string{"bar"}, + }, + }, + NextColumnID: 2, + NextFamilyID: 1, + UniqueWithoutIndexConstraints: []descpb.UniqueWithoutIndexConstraint{ + { + TableID: 2, + ColumnIDs: []descpb.ColumnID{1}, + }, + }, + }}, + {`primary index column "v" cannot be virtual`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "bar"}, + {ID: 2, Name: "v", ComputeExpr: &computedExpr, Virtual: true}, + }, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, + Name: "primary", + Unique: true, + ColumnIDs: []descpb.ColumnID{1, 2}, + ColumnNames: []string{"bar", "v"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", + ColumnIDs: []descpb.ColumnID{1}, + ColumnNames: []string{"bar"}, + }, + }, + NextColumnID: 3, + NextFamilyID: 1, + }}, + {`index "sec" cannot store virtual column "v"`, + descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.FamilyFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "c1"}, + {ID: 2, Name: "c2"}, + {ID: 3, Name: "v", ComputeExpr: &computedExpr, Virtual: true}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1, 2}, ColumnNames: []string{"c1", "c2"}}, + }, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, Name: "pri", ColumnIDs: []descpb.ColumnID{1}, + ColumnNames: []string{"c1"}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + }, + Indexes: []descpb.IndexDescriptor{ + {ID: 2, Name: "sec", ColumnIDs: []descpb.ColumnID{2}, + ColumnNames: []string{"c2"}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + StoreColumnNames: []string{"v"}, + StoreColumnIDs: []descpb.ColumnID{3}, + }, + }, + NextColumnID: 4, + NextFamilyID: 1, + NextIndexID: 3, + }}, + } + for i, d := range testData { + t.Run(d.err, func(t *testing.T) { + desc := NewImmutable(d.desc) + expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.TypeName(), desc.GetName(), desc.GetID(), d.err) + if err := catalog.ValidateSelf(desc); err == nil { + t.Errorf("%d: expected \"%s\", but found success: %+v", i, expectedErr, d.desc) + } else if expectedErr != err.Error() { + t.Errorf("%d: expected \"%s\", but found \"%+v\"", i, expectedErr, err) + } + }) + } +} + +func TestValidateCrossTableReferences(t *testing.T) { + defer leaktest.AfterTest(t)() + ctx := context.Background() + + pointer := func(s string) *string { + return &s + } + + tests := []struct { + err string + desc descpb.TableDescriptor + otherDescs []descpb.TableDescriptor + }{ + // Foreign keys + { // 0 + err: `invalid foreign key: missing table=52: referenced table ID 52: descriptor not found`, + desc: descpb.TableDescriptor{ + Name: "foo", + ID: 51, + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + FormatVersion: descpb.InterleavedFormatVersion, + OutboundFKs: []descpb.ForeignKeyConstraint{ + { + Name: "fk", + ReferencedTableID: 52, + ReferencedColumnIDs: []descpb.ColumnID{1}, + OriginTableID: 51, + OriginColumnIDs: []descpb.ColumnID{1}, + }, + }, + }, + otherDescs: nil, + }, + { // 1 + err: `missing fk back reference "fk" to "foo" from "baz"`, + desc: descpb.TableDescriptor{ + ID: 51, + Name: "foo", + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + FormatVersion: descpb.InterleavedFormatVersion, + OutboundFKs: []descpb.ForeignKeyConstraint{ + { + Name: "fk", + ReferencedTableID: 52, + ReferencedColumnIDs: []descpb.ColumnID{1}, + OriginTableID: 51, + OriginColumnIDs: []descpb.ColumnID{1}, + }, + }, + }, + otherDescs: []descpb.TableDescriptor{{ + ID: 52, + Name: "baz", + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + FormatVersion: descpb.InterleavedFormatVersion, + }}, + }, + { // 2 + err: `invalid foreign key backreference: missing table=52: referenced table ID 52: descriptor not found`, + desc: descpb.TableDescriptor{ + Name: "foo", + ID: 51, + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + FormatVersion: descpb.InterleavedFormatVersion, + InboundFKs: []descpb.ForeignKeyConstraint{ + { + Name: "fk", + ReferencedTableID: 51, + ReferencedColumnIDs: []descpb.ColumnID{1}, + OriginTableID: 52, + OriginColumnIDs: []descpb.ColumnID{1}, + }, + }, + }, + }, + { // 3 + err: `missing fk forward reference "fk" to "foo" from "baz"`, + desc: descpb.TableDescriptor{ + ID: 51, + Name: "foo", + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + FormatVersion: descpb.InterleavedFormatVersion, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, + Name: "bar", + }, + InboundFKs: []descpb.ForeignKeyConstraint{ + { + Name: "fk", + ReferencedTableID: 51, + ReferencedColumnIDs: []descpb.ColumnID{1}, + OriginTableID: 52, + OriginColumnIDs: []descpb.ColumnID{1}, + }, + }, + }, + otherDescs: []descpb.TableDescriptor{{ + ID: 52, + Name: "baz", + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + FormatVersion: descpb.InterleavedFormatVersion, + }}, + }, + { // 4 + // Regression test for #57066: We can handle one of the referenced tables + // having a pre-19.2 foreign key reference. + err: "", + desc: descpb.TableDescriptor{ + ID: 51, + Name: "foo", + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + FormatVersion: descpb.InterleavedFormatVersion, + Indexes: []descpb.IndexDescriptor{ + { + ID: 2, + ColumnIDs: []descpb.ColumnID{1, 2}, + }, + }, + OutboundFKs: []descpb.ForeignKeyConstraint{ + { + Name: "fk", + ReferencedTableID: 52, + ReferencedColumnIDs: []descpb.ColumnID{1}, + OriginTableID: 51, + OriginColumnIDs: []descpb.ColumnID{1}, + }, + }, + }, + otherDescs: []descpb.TableDescriptor{{ + ID: 52, + Name: "baz", + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + FormatVersion: descpb.InterleavedFormatVersion, + Indexes: []descpb.IndexDescriptor{ + { + Unique: true, + ColumnIDs: []descpb.ColumnID{1}, + ReferencedBy: []descpb.ForeignKeyReference{{Table: 51, Index: 2}}, + }, + }, + }}, + }, + { // 5 + // Regression test for #57066: We can handle one of the referenced tables + // having a pre-19.2 foreign key reference. + err: "", + desc: descpb.TableDescriptor{ + ID: 51, + Name: "foo", + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + FormatVersion: descpb.InterleavedFormatVersion, + Indexes: []descpb.IndexDescriptor{ + { + ID: 2, + ColumnIDs: []descpb.ColumnID{7}, + Unique: true, + }, + }, + InboundFKs: []descpb.ForeignKeyConstraint{ + { + Name: "fk", + ReferencedTableID: 51, + ReferencedColumnIDs: []descpb.ColumnID{7}, + OriginTableID: 52, + OriginColumnIDs: []descpb.ColumnID{1}, + }, + }, + }, + otherDescs: []descpb.TableDescriptor{{ + ID: 52, + Name: "baz", + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + FormatVersion: descpb.InterleavedFormatVersion, + Indexes: []descpb.IndexDescriptor{ + { + ID: 2, + Unique: true, + ColumnIDs: []descpb.ColumnID{1}, + ForeignKey: descpb.ForeignKeyReference{Table: 51, Index: 2}, + }, + }, + }}, + }, + + // Interleaves + { // 6 + err: `invalid interleave: missing table=52 index=2: referenced table ID 52: descriptor not found`, + desc: descpb.TableDescriptor{ + Name: "foo", + ID: 51, + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + FormatVersion: descpb.InterleavedFormatVersion, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, + Interleave: descpb.InterleaveDescriptor{Ancestors: []descpb.InterleaveDescriptor_Ancestor{ + {TableID: 52, IndexID: 2}, + }}, + }, + }, + otherDescs: nil, + }, + { // 7 + err: `invalid interleave: missing table=baz index=2: index-id "2" does not exist`, + desc: descpb.TableDescriptor{ + Name: "foo", + ID: 51, + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + FormatVersion: descpb.InterleavedFormatVersion, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, + Interleave: descpb.InterleaveDescriptor{Ancestors: []descpb.InterleaveDescriptor_Ancestor{ + {TableID: 52, IndexID: 2}, + }}, + }, + }, + otherDescs: []descpb.TableDescriptor{{ + ID: 52, + Name: "baz", + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + }}, + }, + { // 8 + err: `missing interleave back reference to "foo"@"bar" from "baz"@"qux"`, + desc: descpb.TableDescriptor{ + Name: "foo", + ID: 51, + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, + Name: "bar", + Interleave: descpb.InterleaveDescriptor{Ancestors: []descpb.InterleaveDescriptor_Ancestor{ + {TableID: 52, IndexID: 2}, + }}, + }, + }, + otherDescs: []descpb.TableDescriptor{{ + ID: 52, + Name: "baz", + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 2, + Name: "qux", + }, + }}, + }, + { // 9 + err: `invalid interleave backreference table=52 index=2: referenced table ID 52: descriptor not found`, + desc: descpb.TableDescriptor{ + Name: "foo", + ID: 51, + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, + InterleavedBy: []descpb.ForeignKeyReference{{Table: 52, Index: 2}}, + }, + }, + }, + { // 10 + err: `invalid interleave backreference table=baz index=2: index-id "2" does not exist`, + desc: descpb.TableDescriptor{ + Name: "foo", + ID: 51, + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, + InterleavedBy: []descpb.ForeignKeyReference{{Table: 52, Index: 2}}, + }, + }, + otherDescs: []descpb.TableDescriptor{{ + ID: 52, + Name: "baz", + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + }}, + }, + { // 11 + err: `broken interleave backward reference from "foo"@"bar" to "baz"@"qux"`, + desc: descpb.TableDescriptor{ + Name: "foo", + ID: 51, + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, + Name: "bar", + InterleavedBy: []descpb.ForeignKeyReference{{Table: 52, Index: 2}}, + }, + }, + otherDescs: []descpb.TableDescriptor{{ + Name: "baz", + ID: 52, + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 2, + Name: "qux", + }, + }}, + }, + { // 12 + err: `referenced type ID 500: descriptor not found`, + desc: descpb.TableDescriptor{ + Name: "foo", + ID: 51, + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, + Name: "bar", + ColumnIDs: []descpb.ColumnID{1}, + ColumnNames: []string{"a"}, + }, + Columns: []descpb.ColumnDescriptor{ + { + Name: "a", + ID: 1, + Type: types.MakeEnum(typedesc.TypeIDToOID(500), typedesc.TypeIDToOID(100500)), + }, + }, + }, + }, + // Add some expressions with invalid type references. + { // 13 + err: `referenced type ID 500: descriptor not found`, + desc: descpb.TableDescriptor{ + Name: "foo", + ID: 51, + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, + Name: "bar", + ColumnIDs: []descpb.ColumnID{1}, + ColumnNames: []string{"a"}, + }, + Columns: []descpb.ColumnDescriptor{ + { + Name: "a", + ID: 1, + Type: types.Int, + DefaultExpr: pointer("a::@100500"), + }, + }, + }, + }, + { // 14 + err: `referenced type ID 500: descriptor not found`, + desc: descpb.TableDescriptor{ + Name: "foo", + ID: 51, + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, + Name: "bar", + ColumnIDs: []descpb.ColumnID{1}, + ColumnNames: []string{"a"}, + }, + Columns: []descpb.ColumnDescriptor{ + { + Name: "a", + ID: 1, + Type: types.Int, + ComputeExpr: pointer("a:::@100500"), + }, + }, + }, + }, + { // 15 + err: `referenced type ID 500: descriptor not found`, + desc: descpb.TableDescriptor{ + Name: "foo", + ID: 51, + ParentID: 1, + UnexposedParentSchemaID: keys.PublicSchemaID, + Checks: []*descpb.TableDescriptor_CheckConstraint{ + { + Expr: "a::@100500", + }, + }, + }, + }, + // Temporary tables. + { // 16 + err: "", + desc: descpb.TableDescriptor{ + Name: "foo", + ID: 51, + ParentID: 1, + UnexposedParentSchemaID: 12345, + FormatVersion: descpb.InterleavedFormatVersion, + Temporary: true, + }, + }, + } + + for i, test := range tests { + descs := catalog.MapDescGetter{} + descs[1] = dbdesc.NewImmutable(descpb.DatabaseDescriptor{ID: 1}) + for _, otherDesc := range test.otherDescs { + otherDesc.Privileges = descpb.NewDefaultPrivilegeDescriptor(security.AdminRoleName()) + descs[otherDesc.ID] = NewImmutable(otherDesc) + } + desc := NewImmutable(test.desc) + expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.TypeName(), desc.GetName(), desc.GetID(), test.err) + const validateCrossReferencesOnly = catalog.ValidationLevelSelfAndCrossReferences &^ (catalog.ValidationLevelSelfAndCrossReferences >> 1) + if err := catalog.Validate(ctx, descs, validateCrossReferencesOnly, desc).CombinedError(); err == nil { + if test.err != "" { + t.Errorf("%d: expected \"%s\", but found success: %+v", i, expectedErr, test.desc) + } + } else if expectedErr != err.Error() { + t.Errorf("%d: expected \"%s\", but found \"%s\"", i, expectedErr, err.Error()) + } + } +} + +func TestValidatePartitioning(t *testing.T) { + defer leaktest.AfterTest(t)() + + tests := []struct { + err string + desc descpb.TableDescriptor + }{ + {"at least one of LIST or RANGE partitioning must be used", + descpb.TableDescriptor{ + PrimaryIndex: descpb.IndexDescriptor{ + Partitioning: descpb.PartitioningDescriptor{ + NumColumns: 1, + }, + }, + }, + }, + {"PARTITION p1: must contain values", + descpb.TableDescriptor{ + PrimaryIndex: descpb.IndexDescriptor{ + Partitioning: descpb.PartitioningDescriptor{ + NumColumns: 1, + List: []descpb.PartitioningDescriptor_List{{Name: "p1"}}, + }, + }, + }, + }, + {"not enough columns in index for this partitioning", + descpb.TableDescriptor{ + PrimaryIndex: descpb.IndexDescriptor{ + Partitioning: descpb.PartitioningDescriptor{ + NumColumns: 1, + List: []descpb.PartitioningDescriptor_List{{Name: "p1", Values: [][]byte{{}}}}, + }, + }, + }, + }, + {"only one LIST or RANGE partitioning may used", + descpb.TableDescriptor{ + PrimaryIndex: descpb.IndexDescriptor{ + ColumnIDs: []descpb.ColumnID{1}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + Partitioning: descpb.PartitioningDescriptor{ + NumColumns: 1, + List: []descpb.PartitioningDescriptor_List{{}}, + Range: []descpb.PartitioningDescriptor_Range{{}}, + }, + }, + }, + }, + {"PARTITION name must be non-empty", + descpb.TableDescriptor{ + Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, + PrimaryIndex: descpb.IndexDescriptor{ + ColumnIDs: []descpb.ColumnID{1}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + Partitioning: descpb.PartitioningDescriptor{ + NumColumns: 1, + List: []descpb.PartitioningDescriptor_List{{}}, + }, + }, + }, + }, + {"PARTITION p1: must contain values", + descpb.TableDescriptor{ + Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, + PrimaryIndex: descpb.IndexDescriptor{ + ColumnIDs: []descpb.ColumnID{1}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + Partitioning: descpb.PartitioningDescriptor{ + NumColumns: 1, + List: []descpb.PartitioningDescriptor_List{{Name: "p1"}}, + }, + }, + }, + }, + {"PARTITION p1: decoding: empty array", + descpb.TableDescriptor{ + Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, + PrimaryIndex: descpb.IndexDescriptor{ + ColumnIDs: []descpb.ColumnID{1}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + Partitioning: descpb.PartitioningDescriptor{ + NumColumns: 1, + List: []descpb.PartitioningDescriptor_List{{ + Name: "p1", Values: [][]byte{{}}, + }}, + }, + }, + }, + }, + {"PARTITION p1: decoding: int64 varint decoding failed: 0", + descpb.TableDescriptor{ + Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, + PrimaryIndex: descpb.IndexDescriptor{ + ColumnIDs: []descpb.ColumnID{1}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + Partitioning: descpb.PartitioningDescriptor{ + NumColumns: 1, + List: []descpb.PartitioningDescriptor_List{ + {Name: "p1", Values: [][]byte{{0x03}}}, + }, + }, + }, + }, + }, + {"PARTITION p1: superfluous data in encoded value", + descpb.TableDescriptor{ + Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, + PrimaryIndex: descpb.IndexDescriptor{ + ColumnIDs: []descpb.ColumnID{1}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + Partitioning: descpb.PartitioningDescriptor{ + NumColumns: 1, + List: []descpb.PartitioningDescriptor_List{ + {Name: "p1", Values: [][]byte{{0x03, 0x02, 0x00}}}, + }, + }, + }, + }, + }, + {"partitions p1 and p2 overlap", + descpb.TableDescriptor{ + Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, + PrimaryIndex: descpb.IndexDescriptor{ + ColumnIDs: []descpb.ColumnID{1, 1}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC, descpb.IndexDescriptor_ASC}, + Partitioning: descpb.PartitioningDescriptor{ + NumColumns: 1, + Range: []descpb.PartitioningDescriptor_Range{ + {Name: "p1", FromInclusive: []byte{0x03, 0x02}, ToExclusive: []byte{0x03, 0x04}}, + {Name: "p2", FromInclusive: []byte{0x03, 0x02}, ToExclusive: []byte{0x03, 0x04}}, + }, + }, + }, + }, + }, + {"PARTITION p1: name must be unique", + descpb.TableDescriptor{ + Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, + PrimaryIndex: descpb.IndexDescriptor{ + ColumnIDs: []descpb.ColumnID{1}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + Partitioning: descpb.PartitioningDescriptor{ + NumColumns: 1, + List: []descpb.PartitioningDescriptor_List{ + {Name: "p1", Values: [][]byte{{0x03, 0x02}}}, + {Name: "p1", Values: [][]byte{{0x03, 0x04}}}, + }, + }, + }, + }, + }, + {"not enough columns in index for this partitioning", + descpb.TableDescriptor{ + Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, + PrimaryIndex: descpb.IndexDescriptor{ + ColumnIDs: []descpb.ColumnID{1}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + Partitioning: descpb.PartitioningDescriptor{ + NumColumns: 1, + List: []descpb.PartitioningDescriptor_List{{ + Name: "p1", + Values: [][]byte{{0x03, 0x02}}, + Subpartitioning: descpb.PartitioningDescriptor{ + NumColumns: 1, + List: []descpb.PartitioningDescriptor_List{{Name: "p1_1", Values: [][]byte{{}}}}, + }, + }}, + }, + }, + }, + }, + {"PARTITION p1: name must be unique", + descpb.TableDescriptor{ + Columns: []descpb.ColumnDescriptor{{ID: 1, Type: types.Int}}, + PrimaryIndex: descpb.IndexDescriptor{ + ColumnIDs: []descpb.ColumnID{1, 1}, + ColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC, descpb.IndexDescriptor_ASC}, + Partitioning: descpb.PartitioningDescriptor{ + NumColumns: 1, + List: []descpb.PartitioningDescriptor_List{ + {Name: "p1", Values: [][]byte{{0x03, 0x02}}}, + { + Name: "p2", + Values: [][]byte{{0x03, 0x04}}, + Subpartitioning: descpb.PartitioningDescriptor{ + NumColumns: 1, + List: []descpb.PartitioningDescriptor_List{ + {Name: "p1", Values: [][]byte{{0x03, 0x02}}}, + }, + }, + }, + }, + }, + }, + }, + }, + } + for i, test := range tests { + t.Run(test.err, func(t *testing.T) { + desc := NewImmutable(test.desc) + err := ValidatePartitioning(desc) + if !testutils.IsError(err, test.err) { + t.Errorf(`%d: got "%v" expected "%v"`, i, err, test.err) + } + }) + } +} diff --git a/pkg/sql/catalog/typedesc/safe_format_test.go b/pkg/sql/catalog/typedesc/safe_format_test.go index 162e07b63d19..bbfcc0fba39c 100644 --- a/pkg/sql/catalog/typedesc/safe_format_test.go +++ b/pkg/sql/catalog/typedesc/safe_format_test.go @@ -13,6 +13,7 @@ package typedesc_test import ( "testing" + "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" @@ -38,7 +39,7 @@ func TestSafeMessage(t *testing.T) { Name: "bar", }, }, - Privileges: &descpb.PrivilegeDescriptor{}, + Privileges: descpb.NewDefaultPrivilegeDescriptor(security.RootUserName()), ParentID: 2, ParentSchemaID: 29, ArrayTypeID: 117, @@ -62,7 +63,7 @@ func TestSafeMessage(t *testing.T) { Name: "bar", }, }, - Privileges: &descpb.PrivilegeDescriptor{}, + Privileges: descpb.NewDefaultPrivilegeDescriptor(security.RootUserName()), ParentID: 2, ParentSchemaID: 29, ArrayTypeID: 117, diff --git a/pkg/sql/catalog/typedesc/type_desc.go b/pkg/sql/catalog/typedesc/type_desc.go index afcffdd9cc48..eb5208edc554 100644 --- a/pkg/sql/catalog/typedesc/type_desc.go +++ b/pkg/sql/catalog/typedesc/type_desc.go @@ -452,268 +452,233 @@ func isBeingDropped(member *descpb.TypeDescriptor_EnumMember) bool { } // ValidateSelf performs validation on the TypeDescriptor. -func (desc *Immutable) ValidateSelf(_ context.Context) error { +func (desc *Immutable) ValidateSelf(vea catalog.ValidationErrorAccumulator) { // Validate local properties of the descriptor. - if err := catalog.ValidateName(desc.Name, "type"); err != nil { - return err + vea.Report(catalog.ValidateName(desc.Name, "type")) + if desc.GetID() == descpb.InvalidID { + vea.Report(errors.AssertionFailedf("invalid ID %d", desc.GetID())) } - - if desc.ID == descpb.InvalidID { - return errors.AssertionFailedf("invalid ID %d", errors.Safe(desc.ID)) + if desc.GetParentID() == descpb.InvalidID { + vea.Report(errors.AssertionFailedf("invalid parentID %d", desc.GetParentID())) } - if desc.ParentID == descpb.InvalidID { - return errors.AssertionFailedf("invalid parentID %d", errors.Safe(desc.ParentID)) + if desc.GetParentSchemaID() == descpb.InvalidID { + vea.Report(errors.AssertionFailedf("invalid parent schema ID %d", desc.GetParentSchemaID())) } switch desc.Kind { case descpb.TypeDescriptor_MULTIREGION_ENUM: - // In the case of the multi-region enum, we also keep the logical descriptors - // sorted. Validate that's the case. - for i := 0; i < len(desc.EnumMembers)-1; i++ { - if desc.EnumMembers[i].LogicalRepresentation > desc.EnumMembers[i+1].LogicalRepresentation { - return errors.AssertionFailedf( - "multi-region enum is out of order %q > %q", - desc.EnumMembers[i].LogicalRepresentation, - desc.EnumMembers[i+1].LogicalRepresentation, - ) - } - } - // Now do all of the checking for ordinary enums, which also apply to multi-region enums. - fallthrough - case descpb.TypeDescriptor_ENUM: - // All of the enum members should be in sorted order. - if !sort.IsSorted(EnumMembers(desc.EnumMembers)) { - return errors.AssertionFailedf("enum members are not sorted %v", desc.EnumMembers) - } - // Ensure there are no duplicate enum physical reps. - for i := 0; i < len(desc.EnumMembers)-1; i++ { - if bytes.Equal(desc.EnumMembers[i].PhysicalRepresentation, desc.EnumMembers[i+1].PhysicalRepresentation) { - return errors.AssertionFailedf("duplicate enum physical rep %v", desc.EnumMembers[i].PhysicalRepresentation) - } - } - // Ensure there are no duplicate enum values. - members := make(map[string]struct{}, len(desc.EnumMembers)) - for i := range desc.EnumMembers { - _, ok := members[desc.EnumMembers[i].LogicalRepresentation] - if ok { - return errors.AssertionFailedf("duplicate enum member %q", desc.EnumMembers[i].LogicalRepresentation) - } - members[desc.EnumMembers[i].LogicalRepresentation] = struct{}{} - } - - // Ensure the sanity of enum capabilities and transition directions. - for _, member := range desc.EnumMembers { - switch member.Capability { - case descpb.TypeDescriptor_EnumMember_READ_ONLY: - if member.Direction == descpb.TypeDescriptor_EnumMember_NONE { - return errors.AssertionFailedf( - "read only capability member must have transition direction set") - } - case descpb.TypeDescriptor_EnumMember_ALL: - if member.Direction != descpb.TypeDescriptor_EnumMember_NONE { - return errors.AssertionFailedf("public enum member can not have transition direction set") + vea.Report(desc.Privileges.Validate(desc.ID, privilege.Type)) + // Check presence of region config + if desc.RegionConfig == nil { + vea.Report(errors.AssertionFailedf("no region config on %s type desc", desc.Kind.String())) + } + if desc.validateEnumMembers(vea) { + // In the case of the multi-region enum, we also keep the logical descriptors + // sorted. Validate that's the case. + for i := 0; i < len(desc.EnumMembers)-1; i++ { + if desc.EnumMembers[i].LogicalRepresentation > desc.EnumMembers[i+1].LogicalRepresentation { + vea.Report(errors.AssertionFailedf( + "multi-region enum is out of order %q > %q", + desc.EnumMembers[i].LogicalRepresentation, + desc.EnumMembers[i+1].LogicalRepresentation, + )) } - default: - return errors.AssertionFailedf("invalid member capability %s", member.Capability) } } - - // Validate the Privileges of the descriptor. - if err := desc.Privileges.Validate(desc.ID, privilege.Type); err != nil { - return err + case descpb.TypeDescriptor_ENUM: + vea.Report(desc.Privileges.Validate(desc.ID, privilege.Type)) + if desc.RegionConfig != nil { + vea.Report(errors.AssertionFailedf("found region config on %s type desc", desc.Kind.String())) } + desc.validateEnumMembers(vea) case descpb.TypeDescriptor_ALIAS: + if desc.RegionConfig != nil { + vea.Report(errors.AssertionFailedf("found region config on %s type desc", desc.Kind.String())) + } if desc.Alias == nil { - return errors.AssertionFailedf("ALIAS type desc has nil alias type") + vea.Report(errors.AssertionFailedf("ALIAS type desc has nil alias type")) + } + if desc.GetArrayTypeID() != descpb.InvalidID { + vea.Report(errors.AssertionFailedf("ALIAS type desc has array type ID %d", desc.GetArrayTypeID())) } default: - return errors.AssertionFailedf("invalid desc kind %s", desc.Kind.String()) + vea.Report(errors.AssertionFailedf("invalid type descriptor kind %s", desc.Kind.String())) } +} - switch desc.Kind { - case descpb.TypeDescriptor_MULTIREGION_ENUM: - if desc.RegionConfig == nil { - return errors.AssertionFailedf("no region config on %s type desc", desc.Kind.String()) +// validateEnumMembers performs enum member checks. +// Returns true iff the enums are sorted. +func (desc *Immutable) validateEnumMembers(vea catalog.ValidationErrorAccumulator) (isSorted bool) { + // All of the enum members should be in sorted order. + isSorted = sort.IsSorted(EnumMembers(desc.EnumMembers)) + if !isSorted { + vea.Report(errors.AssertionFailedf("enum members are not sorted %v", desc.EnumMembers)) + } + // Ensure there are no duplicate enum physical and logical reps. + physicalMap := make(map[string]struct{}, len(desc.EnumMembers)) + logicalMap := make(map[string]struct{}, len(desc.EnumMembers)) + for _, member := range desc.EnumMembers { + // Ensure there are no duplicate enum physical reps. + _, duplicatePhysical := physicalMap[string(member.PhysicalRepresentation)] + if duplicatePhysical { + vea.Report(errors.AssertionFailedf("duplicate enum physical rep %v", member.PhysicalRepresentation)) } - default: - if desc.RegionConfig != nil { - return errors.AssertionFailedf("found region config on %s type desc", desc.Kind.String()) + physicalMap[string(member.PhysicalRepresentation)] = struct{}{} + // Ensure there are no duplicate enum logical reps. + _, duplicateLogical := logicalMap[member.LogicalRepresentation] + if duplicateLogical { + vea.Report(errors.AssertionFailedf("duplicate enum member %q", member.LogicalRepresentation)) + } + logicalMap[member.LogicalRepresentation] = struct{}{} + // Ensure the sanity of enum capabilities and transition directions. + switch member.Capability { + case descpb.TypeDescriptor_EnumMember_READ_ONLY: + if member.Direction == descpb.TypeDescriptor_EnumMember_NONE { + vea.Report(errors.AssertionFailedf( + "read only capability member must have transition direction set")) + } + case descpb.TypeDescriptor_EnumMember_ALL: + if member.Direction != descpb.TypeDescriptor_EnumMember_NONE { + vea.Report(errors.AssertionFailedf("public enum member can not have transition direction set")) + } + default: + vea.Report(errors.AssertionFailedf("invalid member capability %s", member.Capability)) } } - - return nil + return isSorted } -// Validate performs ValidateSelf followed by -// cross reference checks on the descriptor. -func (desc *Immutable) Validate(ctx context.Context, descGetter catalog.DescGetter) error { - if err := desc.ValidateSelf(ctx); err != nil { - return err +// GetReferencedDescIDs returns the IDs of all descriptors referenced by +// this descriptor, including itself. +func (desc *Immutable) GetReferencedDescIDs() catalog.DescriptorIDSet { + ids := catalog.MakeDescriptorIDSet(desc.GetReferencingDescriptorIDs()...) + ids.Add(desc.GetParentID()) + if desc.GetParentSchemaID() != keys.PublicSchemaID { + ids.Add(desc.GetParentSchemaID()) } - - // Don't validate cross-references for dropped descriptors. - if desc.Dropped() || descGetter == nil { - return nil + for id := range desc.GetIDClosure() { + ids.Add(id) } + return ids +} - // Validate all cross references on the descriptor. - - // Buffer all the requested requests and error checks together to run at once. - var checks []func(got catalog.Descriptor) error - var reqs []descpb.ID - +// ValidateCrossReferences performs cross reference checks on the type descriptor. +func (desc *Immutable) ValidateCrossReferences( + vea catalog.ValidationErrorAccumulator, vdg catalog.ValidationDescGetter, +) { // Validate the parentID. - reqs = append(reqs, desc.ParentID) - checks = append(checks, func(got catalog.Descriptor) error { - if _, isDB := got.(catalog.DatabaseDescriptor); !isDB { - return errors.AssertionFailedf("parentID %d does not exist", errors.Safe(desc.ParentID)) - } - return nil - }) - - switch desc.Kind { - case descpb.TypeDescriptor_MULTIREGION_ENUM: - // Validate regions on the parent database and the type descriptor are - // consistent. - reqs = append(reqs, desc.ParentID) - checks = append(checks, func(got catalog.Descriptor) error { - dbDesc, isDB := got.(catalog.DatabaseDescriptor) - if !isDB { - return errors.AssertionFailedf("parentID %d does not exist", errors.Safe(desc.ParentID)) - } - // Parent database must be a multi-region database if it includes a - // multi-region enum. - - if !dbDesc.IsMultiRegion() { - return errors.AssertionFailedf("parent database is not a multi-region database") - } - dbRegions, err := dbDesc.RegionNames() - if err != nil { - return err - } - - // Count the number of regions that aren't being dropped. - numRegions := 0 - for _, member := range desc.EnumMembers { - if !isBeingDropped(&member) { - numRegions++ - } - } - if numRegions != len(dbRegions) { - return errors.AssertionFailedf( - "unexpected number of regions on db desc: %d expected %d", - len(dbRegions), len(desc.EnumMembers)) - } - - regions := make(map[descpb.RegionName]struct{}, len(dbRegions)) - for _, region := range dbRegions { - regions[region] = struct{}{} - } + dbDesc, err := vdg.GetDatabaseDescriptor(desc.GetParentID()) + if err != nil { + vea.Report(err) + } - for _, member := range desc.EnumMembers { - if isBeingDropped(&member) { - continue - } - enumRegion := descpb.RegionName(member.LogicalRepresentation) - if _, ok := regions[enumRegion]; !ok { - return errors.AssertionFailedf("did not find %q region on database descriptor", enumRegion) - } - } - return nil - }) - - // Validate the primary region on the parent database and the type - // descriptor is consistent. - reqs = append(reqs, desc.ParentID) - checks = append(checks, func(got catalog.Descriptor) error { - dbDesc, isDB := got.(catalog.DatabaseDescriptor) - if !isDB { - return errors.AssertionFailedf("parentID %d does not exist", errors.Safe(desc.ParentID)) - } - dbPrimaryRegion, err := dbDesc.PrimaryRegionName() - if err != nil { - return err - } - primaryRegion, err := desc.PrimaryRegionName() - if err != nil { - return err - } - if dbPrimaryRegion != primaryRegion { - return errors.AssertionFailedf("unexpected primary region on db desc: %q expected %q", - dbPrimaryRegion, primaryRegion) - } - return nil - }) + // Check that the parent schema exists. + if desc.GetParentSchemaID() != keys.PublicSchemaID { + schemaDesc, err := vdg.GetSchemaDescriptor(desc.GetParentSchemaID()) + vea.Report(err) + if schemaDesc != nil && dbDesc != nil && schemaDesc.GetParentID() != dbDesc.GetID() { + vea.Report(errors.AssertionFailedf("parent schema %d is in different database %d", + desc.GetParentSchemaID(), schemaDesc.GetParentID())) + } } - // Validate the parentSchemaID. - if desc.ParentSchemaID != keys.PublicSchemaID { - reqs = append(reqs, desc.ParentSchemaID) - checks = append(checks, func(got catalog.Descriptor) error { - if _, isSchema := got.(catalog.SchemaDescriptor); !isSchema { - return errors.AssertionFailedf("parentSchemaID %d does not exist", errors.Safe(desc.ParentSchemaID)) - } - return nil - }) + if desc.GetKind() == descpb.TypeDescriptor_MULTIREGION_ENUM && dbDesc != nil { + desc.validateMultiRegion(dbDesc, vea) } - switch desc.Kind { + // Validate that the referenced types exist. + switch desc.GetKind() { case descpb.TypeDescriptor_ENUM, descpb.TypeDescriptor_MULTIREGION_ENUM: // Ensure that the referenced array type exists. - reqs = append(reqs, desc.ArrayTypeID) - checks = append(checks, func(got catalog.Descriptor) error { - if _, isType := got.(catalog.TypeDescriptor); !isType { - return errors.AssertionFailedf("arrayTypeID %d does not exist for %q", errors.Safe(desc.ArrayTypeID), desc.Kind.String()) - } - return nil - }) + if _, err := vdg.GetTypeDescriptor(desc.GetArrayTypeID()); err != nil { + vea.Report(errors.Wrapf(err, "arrayTypeID %d does not exist for %q", desc.GetArrayTypeID(), desc.GetKind())) + } case descpb.TypeDescriptor_ALIAS: - if desc.ArrayTypeID != descpb.InvalidID { - return errors.AssertionFailedf("ALIAS type desc has array type ID %d", desc.ArrayTypeID) + if desc.GetAlias().UserDefined() { + aliasedID := UserDefinedTypeOIDToID(desc.GetAlias().Oid()) + if _, err := vdg.GetTypeDescriptor(aliasedID); err != nil { + vea.Report(errors.Wrapf(err, "aliased type %d does not exist", aliasedID)) + } } - default: - return errors.New("unknown type descriptor type") } // Validate that all of the referencing descriptors exist. - tableExists := func(id descpb.ID) func(got catalog.Descriptor) error { - return func(got catalog.Descriptor) error { - tableDesc, isTable := got.(catalog.TableDescriptor) - if !isTable { - return errors.AssertionFailedf("referencing descriptor %d does not exist", id) - } - if tableDesc.Dropped() { - return errors.AssertionFailedf( - "referencing descriptor %d was dropped without dependency unlinking", id) - } - return nil + for _, id := range desc.GetReferencingDescriptorIDs() { + tableDesc, err := vdg.GetTableDescriptor(id) + if err != nil { + vea.Report(err) + continue } - } - if !desc.Dropped() { - for _, id := range desc.ReferencingDescriptorIDs { - reqs = append(reqs, id) - checks = append(checks, tableExists(id)) + if tableDesc.Dropped() { + vea.Report(errors.AssertionFailedf( + "referencing table %d was dropped without dependency unlinking", id)) } } +} + +func (desc *Immutable) validateMultiRegion( + dbDesc catalog.DatabaseDescriptor, vea catalog.ValidationErrorAccumulator, +) { + // Parent database must be a multi-region database if it includes a + // multi-region enum. + if !dbDesc.IsMultiRegion() { + vea.Report(errors.AssertionFailedf("parent database is not a multi-region database")) + return + } - descs, err := catalog.GetDescs(ctx, descGetter, reqs) + dbRegions, err := dbDesc.RegionNames() if err != nil { - return err + vea.Report(err) + return } - // For each result in the batch, apply the corresponding check. - for i := range checks { - if err := checks[i](descs[i]); err != nil { - return err + // Count the number of regions that aren't being dropped. + numRegions := 0 + for _, member := range desc.EnumMembers { + if !isBeingDropped(&member) { + numRegions++ } } + if numRegions != len(dbRegions) { + vea.Report(errors.AssertionFailedf( + "unexpected number of regions on db desc: %d expected %d", + len(dbRegions), len(desc.EnumMembers))) + } - return nil + regions := make(map[descpb.RegionName]struct{}, len(dbRegions)) + for _, region := range dbRegions { + regions[region] = struct{}{} + } + + for _, member := range desc.EnumMembers { + if isBeingDropped(&member) { + continue + } + enumRegion := descpb.RegionName(member.LogicalRepresentation) + if _, ok := regions[enumRegion]; !ok { + vea.Report(errors.AssertionFailedf("did not find %q region on database descriptor", enumRegion)) + } + } + + dbPrimaryRegion, err := dbDesc.PrimaryRegionName() + if err != nil { + vea.Report(err) + } + primaryRegion, err := desc.PrimaryRegionName() + if err != nil { + vea.Report(err) + } + if dbPrimaryRegion != primaryRegion { + vea.Report(errors.AssertionFailedf("unexpected primary region on db desc: %q expected %q", + dbPrimaryRegion, primaryRegion)) + } } -// ValidateTxnCommit punts to Validate. -func (desc *Immutable) ValidateTxnCommit(ctx context.Context, descGetter catalog.DescGetter) error { - return desc.Validate(ctx, descGetter) +// ValidateTxnCommit implements the catalog.Descriptor interface. +func (desc *Immutable) ValidateTxnCommit( + _ catalog.ValidationErrorAccumulator, _ catalog.ValidationDescGetter, +) { + // No-op. } // TypeLookupFunc is a type alias for a function that looks up a type by ID. diff --git a/pkg/sql/catalog/typedesc/type_desc_test.go b/pkg/sql/catalog/typedesc/type_desc_test.go index 8bca7922714d..1b4f99147338 100644 --- a/pkg/sql/catalog/typedesc/type_desc_test.go +++ b/pkg/sql/catalog/typedesc/type_desc_test.go @@ -12,6 +12,7 @@ package typedesc_test import ( "context" + "fmt" "testing" "github.com/cockroachdb/cockroach/pkg/keys" @@ -353,8 +354,9 @@ func TestValidateTypeDesc(t *testing.T) { ID: 100, }) descs[101] = schemadesc.NewImmutable(descpb.SchemaDescriptor{ - ID: 101, - Name: "schema", + ID: 101, + ParentID: 100, + Name: "schema", }) descs[102] = typedesc.NewImmutable(descpb.TypeDescriptor{ ID: 102, @@ -400,13 +402,23 @@ func TestValidateTypeDesc(t *testing.T) { Privileges: defaultPrivileges, }, }, + + { + `invalid parent schema ID 0`, + descpb.TypeDescriptor{ + Name: "t", + ID: typeDescID, + ParentID: 100, + Privileges: defaultPrivileges, + }, + }, { `enum members are not sorted [{[2] a ALL NONE} {[1] b ALL NONE}]`, descpb.TypeDescriptor{ - Name: "t", - ID: typeDescID, - ParentID: 1, - Kind: descpb.TypeDescriptor_ENUM, + Name: "t", + ID: typeDescID, + ParentID: 100, + ParentSchemaID: keys.PublicSchemaID, EnumMembers: []descpb.TypeDescriptor_EnumMember{ { LogicalRepresentation: "a", @@ -423,10 +435,11 @@ func TestValidateTypeDesc(t *testing.T) { { `duplicate enum physical rep [1]`, descpb.TypeDescriptor{ - Name: "t", - ID: typeDescID, - ParentID: 1, - Kind: descpb.TypeDescriptor_ENUM, + Name: "t", + ID: typeDescID, + ParentID: 100, + ParentSchemaID: keys.PublicSchemaID, + Kind: descpb.TypeDescriptor_ENUM, EnumMembers: []descpb.TypeDescriptor_EnumMember{ { LogicalRepresentation: "a", @@ -443,10 +456,14 @@ func TestValidateTypeDesc(t *testing.T) { { `duplicate enum physical rep [1]`, descpb.TypeDescriptor{ - Name: "t", - ID: typeDescID, - ParentID: 200, - Kind: descpb.TypeDescriptor_MULTIREGION_ENUM, + Name: "t", + ID: typeDescID, + ParentID: 200, + ParentSchemaID: keys.PublicSchemaID, + Kind: descpb.TypeDescriptor_MULTIREGION_ENUM, + RegionConfig: &descpb.TypeDescriptor_RegionConfig{ + PrimaryRegion: "us-east-1", + }, EnumMembers: []descpb.TypeDescriptor_EnumMember{ { LogicalRepresentation: "us-east-1", @@ -463,10 +480,11 @@ func TestValidateTypeDesc(t *testing.T) { { `duplicate enum member "a"`, descpb.TypeDescriptor{ - Name: "t", - ID: typeDescID, - ParentID: 1, - Kind: descpb.TypeDescriptor_ENUM, + Name: "t", + ID: typeDescID, + ParentID: 100, + ParentSchemaID: keys.PublicSchemaID, + Kind: descpb.TypeDescriptor_ENUM, EnumMembers: []descpb.TypeDescriptor_EnumMember{ { LogicalRepresentation: "a", @@ -483,10 +501,11 @@ func TestValidateTypeDesc(t *testing.T) { { `duplicate enum member "us-east-1"`, descpb.TypeDescriptor{ - Name: "t", - ID: typeDescID, - ParentID: 1, - Kind: descpb.TypeDescriptor_ENUM, + Name: "t", + ID: typeDescID, + ParentID: 200, + ParentSchemaID: keys.PublicSchemaID, + Kind: descpb.TypeDescriptor_ENUM, EnumMembers: []descpb.TypeDescriptor_EnumMember{ { LogicalRepresentation: "us-east-1", @@ -503,10 +522,11 @@ func TestValidateTypeDesc(t *testing.T) { { `read only capability member must have transition direction set`, descpb.TypeDescriptor{ - Name: "t", - ID: typeDescID, - ParentID: 1, - Kind: descpb.TypeDescriptor_ENUM, + Name: "t", + ID: typeDescID, + ParentID: 100, + ParentSchemaID: keys.PublicSchemaID, + Kind: descpb.TypeDescriptor_ENUM, EnumMembers: []descpb.TypeDescriptor_EnumMember{ { LogicalRepresentation: "a", @@ -521,10 +541,11 @@ func TestValidateTypeDesc(t *testing.T) { { `public enum member can not have transition direction set`, descpb.TypeDescriptor{ - Name: "t", - ID: typeDescID, - ParentID: 1, - Kind: descpb.TypeDescriptor_ENUM, + Name: "t", + ID: typeDescID, + ParentID: 100, + ParentSchemaID: keys.PublicSchemaID, + Kind: descpb.TypeDescriptor_ENUM, EnumMembers: []descpb.TypeDescriptor_EnumMember{ { LogicalRepresentation: "a", @@ -539,10 +560,14 @@ func TestValidateTypeDesc(t *testing.T) { { `public enum member can not have transition direction set`, descpb.TypeDescriptor{ - Name: "t", - ID: typeDescID, - ParentID: 1, - Kind: descpb.TypeDescriptor_MULTIREGION_ENUM, + Name: "t", + ID: typeDescID, + ParentID: 100, + ParentSchemaID: keys.PublicSchemaID, + Kind: descpb.TypeDescriptor_MULTIREGION_ENUM, + RegionConfig: &descpb.TypeDescriptor_RegionConfig{ + PrimaryRegion: "us-east1", + }, EnumMembers: []descpb.TypeDescriptor_EnumMember{ { LogicalRepresentation: "us-east1", @@ -557,26 +582,28 @@ func TestValidateTypeDesc(t *testing.T) { { `ALIAS type desc has nil alias type`, descpb.TypeDescriptor{ - Name: "t", - ID: typeDescID, - ParentID: 1, - Kind: descpb.TypeDescriptor_ALIAS, - Privileges: defaultPrivileges, + Name: "t", + ID: typeDescID, + ParentID: 100, + ParentSchemaID: keys.PublicSchemaID, + Kind: descpb.TypeDescriptor_ALIAS, + Privileges: defaultPrivileges, }, }, { - `parentID 500 does not exist`, + `referenced database ID 500: descriptor not found`, descpb.TypeDescriptor{ - Name: "t", - ID: typeDescID, - ParentID: 500, - Kind: descpb.TypeDescriptor_ALIAS, - Alias: types.Int, - Privileges: defaultPrivileges, + Name: "t", + ID: typeDescID, + ParentID: 500, + ParentSchemaID: keys.PublicSchemaID, + Kind: descpb.TypeDescriptor_ALIAS, + Alias: types.Int, + Privileges: defaultPrivileges, }, }, { - `parentSchemaID 500 does not exist`, + `referenced schema ID 500: descriptor not found`, descpb.TypeDescriptor{ Name: "t", ID: typeDescID, @@ -588,7 +615,7 @@ func TestValidateTypeDesc(t *testing.T) { }, }, { - `arrayTypeID 500 does not exist for "ENUM"`, + `arrayTypeID 500 does not exist for "ENUM": referenced type ID 500: descriptor not found`, descpb.TypeDescriptor{ Name: "t", ID: typeDescID, @@ -600,12 +627,12 @@ func TestValidateTypeDesc(t *testing.T) { }, }, { - `arrayTypeID 500 does not exist for "MULTIREGION_ENUM"`, + `arrayTypeID 500 does not exist for "MULTIREGION_ENUM": referenced type ID 500: descriptor not found`, descpb.TypeDescriptor{ Name: "t", ID: typeDescID, ParentID: 200, - ParentSchemaID: 101, + ParentSchemaID: keys.PublicSchemaID, Kind: descpb.TypeDescriptor_MULTIREGION_ENUM, RegionConfig: &descpb.TypeDescriptor_RegionConfig{ PrimaryRegion: "us-east-1", @@ -621,12 +648,12 @@ func TestValidateTypeDesc(t *testing.T) { }, }, { - "referencing descriptor 500 does not exist", + "referenced table ID 500: descriptor not found", descpb.TypeDescriptor{ Name: "t", ID: typeDescID, ParentID: 100, - ParentSchemaID: 101, + ParentSchemaID: keys.PublicSchemaID, Kind: descpb.TypeDescriptor_ENUM, ArrayTypeID: 102, ReferencingDescriptorIDs: []descpb.ID{500}, @@ -634,12 +661,12 @@ func TestValidateTypeDesc(t *testing.T) { }, }, { - "referencing descriptor 500 does not exist", + "referenced table ID 500: descriptor not found", descpb.TypeDescriptor{ Name: "t", ID: typeDescID, ParentID: 200, - ParentSchemaID: 101, + ParentSchemaID: keys.PublicSchemaID, Kind: descpb.TypeDescriptor_MULTIREGION_ENUM, RegionConfig: &descpb.TypeDescriptor_RegionConfig{ PrimaryRegion: "us-east-1", @@ -673,7 +700,7 @@ func TestValidateTypeDesc(t *testing.T) { Name: "t", ID: typeDescID, ParentID: 200, - ParentSchemaID: 101, + ParentSchemaID: keys.PublicSchemaID, Kind: descpb.TypeDescriptor_MULTIREGION_ENUM, RegionConfig: &descpb.TypeDescriptor_RegionConfig{ PrimaryRegion: "us-east-1", @@ -698,7 +725,7 @@ func TestValidateTypeDesc(t *testing.T) { Name: "t", ID: typeDescID, ParentID: 200, - ParentSchemaID: 101, + ParentSchemaID: keys.PublicSchemaID, Kind: descpb.TypeDescriptor_MULTIREGION_ENUM, RegionConfig: &descpb.TypeDescriptor_RegionConfig{ PrimaryRegion: "us-east-1", @@ -740,7 +767,7 @@ func TestValidateTypeDesc(t *testing.T) { Name: "t", ID: typeDescID, ParentID: 200, - ParentSchemaID: 101, + ParentSchemaID: keys.PublicSchemaID, Kind: descpb.TypeDescriptor_MULTIREGION_ENUM, EnumMembers: []descpb.TypeDescriptor_EnumMember{ { @@ -758,7 +785,7 @@ func TestValidateTypeDesc(t *testing.T) { Name: "t", ID: typeDescID, ParentID: 200, - ParentSchemaID: 101, + ParentSchemaID: keys.PublicSchemaID, Kind: descpb.TypeDescriptor_MULTIREGION_ENUM, RegionConfig: &descpb.TypeDescriptor_RegionConfig{ PrimaryRegion: "us-east-2", @@ -777,10 +804,11 @@ func TestValidateTypeDesc(t *testing.T) { for i, test := range testData { desc := typedesc.NewImmutable(test.desc) - if err := desc.Validate(ctx, descs); err == nil { - t.Errorf("#%d expected err: %s but found nil: %v", i, test.err, test.desc) - } else if test.err != err.Error() && "internal error: "+test.err != err.Error() { - t.Errorf("#%d expected err: %s but found: %s", i, test.err, err) + expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.TypeName(), desc.GetName(), desc.GetID(), test.err) + if err := catalog.ValidateSelfAndCrossReferences(ctx, descs, desc); err == nil { + t.Errorf("#%d expected err: %s but found nil: %v", i, expectedErr, test.desc) + } else if expectedErr != err.Error() { + t.Errorf("#%d expected err: %s but found: %s", i, expectedErr, err) } } } diff --git a/pkg/sql/catalog/validate.go b/pkg/sql/catalog/validate.go new file mode 100644 index 000000000000..e1fee833ef97 --- /dev/null +++ b/pkg/sql/catalog/validate.go @@ -0,0 +1,325 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package catalog + +import ( + "context" + "fmt" + + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/errors" +) + +// Validate performs validation checks on the provided descriptors, up to the +// specified level. +// Some of these checks may require cross-referencing with other descriptors, +// hence the need for a ctx and a DescGetter. If the DescGetter is also a +// BatchDescGetter, then its batching functionality is used. +// If one of these checks surfaces an error, that error is wrapped with a prefix +// identifying the descriptor being validated. +// Validate returns a ValidationErrors interface which can provide the errors +// either as a slice or combined as one. +func Validate( + ctx context.Context, + maybeBatchDescGetter DescGetter, + level ValidationLevel, + descriptors ...Descriptor, +) ValidationErrors { + // Check internal descriptor consistency. + var vea validationErrorAccumulator + for _, desc := range descriptors { + if level&ValidationLevelSelfOnly == 0 { + continue + } + vea.setPrefix(desc) + desc.ValidateSelf(&vea) + } + if level <= ValidationLevelSelfOnly || len(vea.errors) > 0 { + return &vea + } + // Collect descriptors referenced by the validated descriptors. + // These are their immediate neighbors in the reference graph, and in some + // special cases those neighbors' immediate neighbors also. + vdg, err := collectDescriptorsForValidation(ctx, maybeBatchDescGetter, descriptors) + if err != nil { + vea.wrapPrefix = "collecting referenced descriptors" + vea.Report(err) + return &vea + } + // Perform cross-reference checks. + for _, desc := range descriptors { + if level&ValidationLevelSelfAndCrossReferences == 0 || desc.Dropped() { + continue + } + vea.setPrefix(desc) + desc.ValidateCrossReferences(&vea, vdg) + } + if level <= ValidationLevelSelfAndCrossReferences { + return &vea + } + // Perform pre-txn-commit checks. + for _, desc := range descriptors { + if level&ValidationLevelAllPreTxnCommit == 0 || desc.Dropped() { + continue + } + vea.setPrefix(desc) + desc.ValidateTxnCommit(&vea, vdg) + } + return &vea +} + +// ValidationLevel defines up to which degree to perform validation in Validate. +type ValidationLevel uint32 + +const ( + // ValidationLevelSelfOnly means only validate internal descriptor consistency. + ValidationLevelSelfOnly ValidationLevel = 1<<(iota+1) - 1 + // ValidationLevelSelfAndCrossReferences means do the above and also check + // cross-references. + ValidationLevelSelfAndCrossReferences + // ValidationLevelAllPreTxnCommit means do the above and also perform + // pre-txn-commit checks. + ValidationLevelAllPreTxnCommit +) + +// ValidateSelf is a convenience function for validate called at the +// ValidationLevelSelfOnly level and combining the resulting errors. +func ValidateSelf(descriptors ...Descriptor) error { + return Validate(context.TODO(), nil, ValidationLevelSelfOnly, descriptors...).CombinedError() +} + +// ValidateSelfAndCrossReferences is a convenience function for Validate called at the +// ValidationLevelSelfAndCrossReferences level and combining the resulting errors. +func ValidateSelfAndCrossReferences( + ctx context.Context, maybeBatchDescGetter DescGetter, descriptors ...Descriptor, +) error { + return Validate(ctx, maybeBatchDescGetter, ValidationLevelSelfAndCrossReferences, descriptors...).CombinedError() +} + +// ValidationErrorAccumulator is used by the validation methods on Descriptor +// to accumulate any encountered validation errors which are then processed by +// the Validate function. +// This interface is sealed to ensure that the validation methods only get +// called via the Validate function. +type ValidationErrorAccumulator interface { + + // Report is called by the validation methods to report a possible error. + // No-ops when err is nil. + Report(err error) + + // Seals this interface. + sealed() +} + +// ValidationErrors is the interface returned by Validate which contains +// all of the errors accumulated during validation. +type ValidationErrors interface { + + // Errors returns all of the errors accumulated during validation. + Errors() []error + + // CombinedError returns all of the above reduced to one error. + CombinedError() error + + // Seals this interface. + sealed() +} + +type validationErrors struct { + errors []error +} + +var _ ValidationErrors = &validationErrors{} + +// sealed implements the ValidationErrors interface. +func (*validationErrors) sealed() {} + +// Errors implements the ValidationErrors interface. +func (ve *validationErrors) Errors() []error { + return ve.errors +} + +// CombinedError implements the ValidationErrors interface. +func (ve *validationErrors) CombinedError() error { + var combinedErr error + for i := len(ve.errors) - 1; i >= 0; i-- { + combinedErr = errors.CombineErrors(ve.errors[i], combinedErr) + } + return combinedErr +} + +type validationErrorAccumulator struct { + validationErrors + wrapPrefix string +} + +var _ ValidationErrorAccumulator = &validationErrorAccumulator{} + +// Report implements the ValidationErrorAccumulator interface. +func (vea *validationErrorAccumulator) Report(err error) { + if err == nil { + return + } + if vea.wrapPrefix != "" { + err = errors.Wrapf(err, "%s", vea.wrapPrefix) + } + vea.errors = append(vea.errors, err) +} + +func (vea *validationErrorAccumulator) setPrefix(desc Descriptor) { + vea.wrapPrefix = fmt.Sprintf("%s %q (%d)", desc.TypeName(), desc.GetName(), desc.GetID()) +} + +// ValidationDescGetter is used by the validation methods on Descriptor. +// This interface is sealed to ensure those methods only get called via +// the Validate function. +type ValidationDescGetter interface { + + // GetDatabaseDescriptor returns the corresponding DatabaseDescriptor or an error instead. + GetDatabaseDescriptor(id descpb.ID) (DatabaseDescriptor, error) + + // GetSchemaDescriptor returns the corresponding SchemaDescriptor or an error instead. + GetSchemaDescriptor(id descpb.ID) (SchemaDescriptor, error) + + // GetTableDescriptor returns the corresponding TableDescriptor or an error instead. + GetTableDescriptor(id descpb.ID) (TableDescriptor, error) + + // GetTypeDescriptor returns the corresponding TypeDescriptor or an error instead. + GetTypeDescriptor(id descpb.ID) (TypeDescriptor, error) + + // Seals this interface. + sealed() +} + +type validationDescGetterImpl MapDescGetter + +var _ ValidationDescGetter = validationDescGetterImpl{} + +// sealed implements the ValidationDescGetter interface. +func (validationDescGetterImpl) sealed() {} + +// GetDatabaseDescriptor implements the ValidationDescGetter interface. +func (vdg validationDescGetterImpl) GetDatabaseDescriptor( + id descpb.ID, +) (DatabaseDescriptor, error) { + desc, found := vdg[id] + if !found || desc == nil { + return nil, WrapDatabaseDescRefErr(id, ErrDescriptorNotFound) + } + return AsDatabaseDescriptor(desc) +} + +// GetSchemaDescriptor implements the ValidationDescGetter interface. +func (vdg validationDescGetterImpl) GetSchemaDescriptor(id descpb.ID) (SchemaDescriptor, error) { + desc, found := vdg[id] + if !found || desc == nil { + return nil, WrapSchemaDescRefErr(id, ErrDescriptorNotFound) + } + return AsSchemaDescriptor(desc) +} + +// GetTableDescriptor implements the ValidationDescGetter interface. +func (vdg validationDescGetterImpl) GetTableDescriptor(id descpb.ID) (TableDescriptor, error) { + desc, found := vdg[id] + if !found || desc == nil { + return nil, WrapTableDescRefErr(id, ErrDescriptorNotFound) + } + return AsTableDescriptor(desc) +} + +// GetTypeDescriptor implements the ValidationDescGetter interface. +func (vdg validationDescGetterImpl) GetTypeDescriptor(id descpb.ID) (TypeDescriptor, error) { + desc, found := vdg[id] + if !found || desc == nil { + return nil, WrapTypeDescRefErr(id, ErrDescriptorNotFound) + } + return AsTypeDescriptor(desc) +} + +// collectorState is used by collectDescriptorsForValidation +type collectorState struct { + descs validationDescGetterImpl + referencedBy DescriptorIDSet +} + +// addDirectReferences adds all immediate neighbors of desc to the state. +func (cs *collectorState) addDirectReferences(desc Descriptor) { + cs.descs[desc.GetID()] = desc + desc.GetReferencedDescIDs().ForEach(cs.referencedBy.Add) +} + +// getMissingDescs fetches the descs which have corresponding IDs in the state +// but which are otherwise missing. +func (cs *collectorState) getMissingDescs( + ctx context.Context, maybeBatchDescGetter DescGetter, +) (resps []Descriptor, err error) { + reqs := make([]descpb.ID, 0, cs.referencedBy.Len()) + for _, id := range cs.referencedBy.Ordered() { + if _, exists := cs.descs[id]; !exists { + reqs = append(reqs, id) + } + } + if len(reqs) == 0 { + return nil, nil + } + if bdg, ok := maybeBatchDescGetter.(BatchDescGetter); ok { + resps, err = bdg.GetDescs(ctx, reqs) + if err != nil { + return nil, err + } + } else { + resps = make([]Descriptor, len(reqs)) + for i, id := range reqs { + resps[i], err = maybeBatchDescGetter.GetDesc(ctx, id) + if err != nil { + return nil, err + } + } + } + for _, desc := range resps { + if desc != nil { + cs.descs[desc.GetID()] = desc + } + } + return resps, nil +} + +// collectDescriptorsForValidation is used by Validate to provide it with all +// possible descriptors required for validation. +func collectDescriptorsForValidation( + ctx context.Context, maybeBatchDescGetter DescGetter, descriptors []Descriptor, +) (ValidationDescGetter, error) { + cs := collectorState{ + descs: make(map[descpb.ID]Descriptor, len(descriptors)), + referencedBy: MakeDescriptorIDSet(), + } + for _, desc := range descriptors { + cs.addDirectReferences(desc) + } + newDescs, err := cs.getMissingDescs(ctx, maybeBatchDescGetter) + if err != nil { + return nil, err + } + for _, newDesc := range newDescs { + if newDesc == nil { + continue + } + switch newDesc.(type) { + case DatabaseDescriptor, TypeDescriptor: + cs.addDirectReferences(newDesc) + } + } + _, err = cs.getMissingDescs(ctx, maybeBatchDescGetter) + if err != nil { + return nil, err + } + return cs.descs, nil +} diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go index d830818a6cca..1f4ab47a8a26 100644 --- a/pkg/sql/conn_executor.go +++ b/pkg/sql/conn_executor.go @@ -34,7 +34,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execstats" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -1323,13 +1322,6 @@ func (ex *connExecutor) Ctx() context.Context { if _, ok := ex.machine.CurState().(stateInternalError); ok { ctx = ex.ctxHolder.ctx() } - return ex.DescriptorValidationContext(ctx) -} - -func (ex *connExecutor) DescriptorValidationContext(ctx context.Context) context.Context { - if ex.server.cfg.TestingKnobs.TestingDescriptorValidation { - return context.WithValue(ctx, tabledesc.PerformTestingDescriptorValidation, true) - } return ctx } diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index 1798a8cceaa9..eeb8ff752eca 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -3933,7 +3933,7 @@ CREATE TABLE crdb_internal.invalid_objects ( if descriptor == nil { return nil } - err := descriptor.Validate(ctx, fn) + err := catalog.ValidateSelfAndCrossReferences(ctx, fn, descriptor) if err == nil { return nil } @@ -3960,7 +3960,7 @@ CREATE TABLE crdb_internal.invalid_objects ( if descriptor == nil { return nil } - err = descriptor.Validate(ctx, fn) + err := catalog.ValidateSelfAndCrossReferences(ctx, fn, descriptor) if err == nil { return nil } diff --git a/pkg/sql/crdb_internal_test.go b/pkg/sql/crdb_internal_test.go index 227ab73b0036..a68e37e2cc17 100644 --- a/pkg/sql/crdb_internal_test.go +++ b/pkg/sql/crdb_internal_test.go @@ -434,14 +434,14 @@ DELETE FROM system.descriptor WHERE id=54; require.Equal(t, 53, id) require.Equal(t, "", dbName) require.Equal(t, "", schemaName) - require.Equal(t, "desc 53: parentID 52 does not exist", errStr) + require.Equal(t, `relation "test" (53): referenced database ID 52: descriptor not found`, errStr) require.True(t, rows.Next()) require.NoError(t, rows.Scan(&id, &dbName, &schemaName, &objName, &errStr)) require.Equal(t, 55, id) require.Equal(t, "defaultdb", dbName) require.Equal(t, "public", schemaName) - require.Equal(t, "desc 55: invalid foreign key: missing table=54: descriptor not found", errStr) + require.Equal(t, `relation "tbl" (55): invalid foreign key: missing table=54: referenced table ID 54: descriptor not found`, errStr) require.False(t, rows.Next()) } diff --git a/pkg/sql/create_index.go b/pkg/sql/create_index.go index 3f768c981a96..4da50d7ac072 100644 --- a/pkg/sql/create_index.go +++ b/pkg/sql/create_index.go @@ -112,7 +112,7 @@ func (p *planner) setupFamilyAndConstraintForShard( if err != nil { return err } - info, err := tableDesc.GetConstraintInfo(ctx, nil) + info, err := tableDesc.GetConstraintInfo() if err != nil { return err } diff --git a/pkg/sql/create_sequence.go b/pkg/sql/create_sequence.go index 6ee409d08750..700f6be46248 100644 --- a/pkg/sql/create_sequence.go +++ b/pkg/sql/create_sequence.go @@ -152,8 +152,7 @@ func doCreateSequence( return err } - dg := catalogkv.NewOneLevelUncachedDescGetter(params.p.txn, params.ExecCfg().Codec) - if err := desc.Validate(params.ctx, dg); err != nil { + if err := validateDescriptor(params.ctx, params.p, desc); err != nil { return err } @@ -232,7 +231,7 @@ func NewSequenceTableDesc( // immediately. desc.State = descpb.DescriptorState_PUBLIC - if err := desc.ValidateSelf(ctx); err != nil { + if err := catalog.ValidateSelf(&desc); err != nil { return nil, err } return &desc, nil diff --git a/pkg/sql/create_table.go b/pkg/sql/create_table.go index daee10e4b136..3807447899dc 100644 --- a/pkg/sql/create_table.go +++ b/pkg/sql/create_table.go @@ -446,8 +446,7 @@ func (n *createTableNode) startExec(params runParams) error { } } - dg := catalogkv.NewOneLevelUncachedDescGetter(params.p.txn, params.ExecCfg().Codec) - if err := desc.Validate(params.ctx, dg); err != nil { + if err := validateDescriptor(params.ctx, params.p, desc); err != nil { return err } @@ -793,7 +792,7 @@ func ResolveUniqueWithoutIndexConstraint( } // Verify we are not writing a constraint over the same name. - constraintInfo, err := tbl.GetConstraintInfo(ctx, nil) + constraintInfo, err := tbl.GetConstraintInfo() if err != nil { return err } @@ -992,7 +991,7 @@ func ResolveFK( // or else we can hit other checks that break things with // undesired error codes, e.g. #42858. // It may be removable after #37255 is complete. - constraintInfo, err := tbl.GetConstraintInfo(ctx, nil) + constraintInfo, err := tbl.GetConstraintInfo() if err != nil { return err } @@ -2321,8 +2320,8 @@ func NewTableDesc( return nil, errors.Newf("unknown locality level: %v", n.Locality.LocalityLevel) } - dg := catalogkv.NewOneLevelUncachedDescGetter(txn, evalCtx.Codec) - if err := desc.ValidateTableLocalityConfig(ctx, dg); err != nil { + bdg := catalogkv.NewOneLevelUncachedDescGetter(txn, evalCtx.Codec) + if err := catalog.ValidateSelfAndCrossReferences(ctx, bdg, &desc); err != nil { return nil, err } } diff --git a/pkg/sql/create_view.go b/pkg/sql/create_view.go index c53961073b0e..b6dfde35aa4c 100644 --- a/pkg/sql/create_view.go +++ b/pkg/sql/create_view.go @@ -72,7 +72,6 @@ func (n *createViewNode) startExec(params runParams) error { } viewName := n.viewName.Object() - persistence := n.persistence log.VEventf(params.ctx, 2, "dependencies for view %s:\n%s", viewName, n.planDeps.String()) // Check that the view does not contain references to other databases. @@ -92,24 +91,28 @@ func (n *createViewNode) startExec(params runParams) error { // First check the backrefs and see if any of them are temporary. // If so, promote this view to temporary. backRefMutables := make(map[descpb.ID]*tabledesc.Mutable, len(n.planDeps)) + hasTempBackref := false for id, updated := range n.planDeps { backRefMutable := params.p.Descriptors().GetUncommittedTableByID(id) if backRefMutable == nil { backRefMutable = tabledesc.NewExistingMutable(*updated.desc.TableDesc()) } - if !persistence.IsTemporary() && backRefMutable.Temporary { - // This notice is sent from pg, let's imitate. - params.p.BufferClientNotice( - params.ctx, - pgnotice.Newf(`view "%s" will be a temporary view`, viewName), - ) - persistence = tree.PersistenceTemporary + if !n.persistence.IsTemporary() && backRefMutable.Temporary { + hasTempBackref = true } backRefMutables[id] = backRefMutable } + if hasTempBackref { + n.persistence = tree.PersistenceTemporary + // This notice is sent from pg, let's imitate. + params.p.BufferClientNotice( + params.ctx, + pgnotice.Newf(`view "%s" will be a temporary view`, viewName), + ) + } var replacingDesc *tabledesc.Mutable - tKey, schemaID, err := getTableCreateParams(params, n.dbDesc.GetID(), persistence, n.viewName, + tKey, schemaID, err := getTableCreateParams(params, n.dbDesc.GetID(), n.persistence, n.viewName, tree.ResolveRequireViewDesc, n.ifNotExists) if err != nil { switch { @@ -258,8 +261,7 @@ func (n *createViewNode) startExec(params runParams) error { return err } - dg := catalogkv.NewOneLevelUncachedDescGetter(params.p.txn, params.ExecCfg().Codec) - if err := newDesc.Validate(params.ctx, dg); err != nil { + if err := validateDescriptor(params.ctx, params.p, newDesc); err != nil { return err } diff --git a/pkg/sql/descriptor.go b/pkg/sql/descriptor.go index 3fdbfe8cfe8d..e1fbae4992ef 100644 --- a/pkg/sql/descriptor.go +++ b/pkg/sql/descriptor.go @@ -174,40 +174,22 @@ func (p *planner) createDescriptorWithID( if !ok { log.Fatalf(ctx, "unexpected type %T when creating descriptor", descriptor) } + isTable := false - switch desc := mutDesc.(type) { - case *typedesc.Mutable: - dg := catalogkv.NewOneLevelUncachedDescGetter(p.txn, p.ExecCfg().Codec) - if err := desc.Validate(ctx, dg); err != nil { - return err - } - if err := p.Descriptors().AddUncommittedDescriptor(mutDesc); err != nil { - return err - } + addUncommitted := false + switch mutDesc.(type) { + case *dbdesc.Mutable, *schemadesc.Mutable, *typedesc.Mutable: + addUncommitted = true case *tabledesc.Mutable: + addUncommitted = true isTable = true - if err := desc.ValidateSelf(ctx); err != nil { - return err - } - if err := p.Descriptors().AddUncommittedDescriptor(mutDesc); err != nil { - return err - } - case *dbdesc.Mutable: - if err := desc.ValidateSelf(ctx); err != nil { - return err - } - if err := p.Descriptors().AddUncommittedDescriptor(mutDesc); err != nil { - return err - } - case *schemadesc.Mutable: - if err := desc.ValidateSelf(ctx); err != nil { - return err - } + default: + log.Fatalf(ctx, "unexpected type %T when creating descriptor", mutDesc) + } + if addUncommitted { if err := p.Descriptors().AddUncommittedDescriptor(mutDesc); err != nil { return err } - default: - log.Fatalf(ctx, "unexpected type %T when creating descriptor", mutDesc) } if err := p.txn.Run(ctx, b); err != nil { diff --git a/pkg/sql/descriptor_mutation_test.go b/pkg/sql/descriptor_mutation_test.go index 4c391806d265..a3210d10b2cf 100644 --- a/pkg/sql/descriptor_mutation_test.go +++ b/pkg/sql/descriptor_mutation_test.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -85,7 +86,7 @@ func (mt mutationTest) makeMutationsActive(ctx context.Context) { } mt.tableDesc.Mutations = nil mt.tableDesc.Version++ - if err := mt.tableDesc.ValidateSelf(ctx); err != nil { + if err := catalog.ValidateSelf(mt.tableDesc); err != nil { mt.Fatal(err) } if err := mt.kvDB.Put( @@ -145,7 +146,7 @@ func (mt mutationTest) writeMutation(ctx context.Context, m descpb.DescriptorMut } mt.tableDesc.Mutations = append(mt.tableDesc.Mutations, m) mt.tableDesc.Version++ - if err := mt.tableDesc.ValidateSelf(ctx); err != nil { + if err := catalog.ValidateSelf(mt.tableDesc); err != nil { mt.Fatal(err) } if err := mt.kvDB.Put( @@ -455,21 +456,21 @@ CREATE INDEX allidx ON t.test (k, v); // Check that a mutation can only be inserted with an explicit mutation state, and direction. tableDesc = mTest.tableDesc tableDesc.Mutations = []descpb.DescriptorMutation{{}} - if err := tableDesc.ValidateSelf(ctx); !testutils.IsError(err, "mutation in state UNKNOWN, direction NONE, and no column/index descriptor") { + if err := catalog.ValidateSelf(tableDesc); !testutils.IsError(err, "mutation in state UNKNOWN, direction NONE, and no column/index descriptor") { t.Fatal(err) } tableDesc.Mutations = []descpb.DescriptorMutation{{Descriptor_: &descpb.DescriptorMutation_Column{Column: &tableDesc.Columns[len(tableDesc.Columns)-1]}}} tableDesc.Columns = tableDesc.Columns[:len(tableDesc.Columns)-1] - if err := tableDesc.ValidateSelf(ctx); !testutils.IsError(err, `mutation in state UNKNOWN, direction NONE, col "i", id 3`) { + if err := catalog.ValidateSelf(tableDesc); !testutils.IsError(err, `mutation in state UNKNOWN, direction NONE, col "i", id 3`) { t.Fatal(err) } tableDesc.Mutations[0].State = descpb.DescriptorMutation_DELETE_ONLY - if err := tableDesc.ValidateSelf(ctx); !testutils.IsError(err, `mutation in state DELETE_ONLY, direction NONE, col "i", id 3`) { + if err := catalog.ValidateSelf(tableDesc); !testutils.IsError(err, `mutation in state DELETE_ONLY, direction NONE, col "i", id 3`) { t.Fatal(err) } tableDesc.Mutations[0].State = descpb.DescriptorMutation_UNKNOWN tableDesc.Mutations[0].Direction = descpb.DescriptorMutation_DROP - if err := tableDesc.ValidateSelf(ctx); !testutils.IsError(err, `mutation in state UNKNOWN, direction DROP, col "i", id 3`) { + if err := catalog.ValidateSelf(tableDesc); !testutils.IsError(err, `mutation in state UNKNOWN, direction DROP, col "i", id 3`) { t.Fatal(err) } } @@ -645,7 +646,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo (v)); index := tableDesc.PublicNonPrimaryIndexes()[len(tableDesc.PublicNonPrimaryIndexes())-1] tableDesc.Mutations = []descpb.DescriptorMutation{{Descriptor_: &descpb.DescriptorMutation_Index{Index: index.IndexDesc()}}} tableDesc.RemovePublicNonPrimaryIndex(index.Ordinal()) - if err := tableDesc.ValidateSelf(ctx); !testutils.IsError(err, "mutation in state UNKNOWN, direction NONE, index foo, id 2") { + if err := catalog.ValidateSelf(tableDesc); !testutils.IsError(err, "mutation in state UNKNOWN, direction NONE, index foo, id 2") { t.Fatal(err) } } diff --git a/pkg/sql/doctor/BUILD.bazel b/pkg/sql/doctor/BUILD.bazel index ec6e9eb14111..91472fbff242 100644 --- a/pkg/sql/doctor/BUILD.bazel +++ b/pkg/sql/doctor/BUILD.bazel @@ -12,7 +12,6 @@ go_library( "//pkg/sql/catalog", "//pkg/sql/catalog/catalogkv", "//pkg/sql/catalog/descpb", - "//pkg/sql/catalog/typedesc", "//pkg/util/hlc", "//pkg/util/log", "//pkg/util/protoutil", diff --git a/pkg/sql/doctor/doctor.go b/pkg/sql/doctor/doctor.go index 277f7f561bb3..b918e01b6b5a 100644 --- a/pkg/sql/doctor/doctor.go +++ b/pkg/sql/doctor/doctor.go @@ -24,7 +24,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" @@ -124,6 +123,7 @@ func ExamineDescriptors( if err != nil { return false, err } + nMap := newNamespaceMap(namespaceTable) var problemsFound bool @@ -140,61 +140,9 @@ func ExamineDescriptors( continue } - _, parentExists := descGetter[desc.GetParentID()] - parentSchema, parentSchemaExists := descGetter[desc.GetParentSchemaID()] - var skipParentIDCheck bool - skipParentSchemaCheck := desc.Dropped() - switch d := desc.(type) { - case catalog.TableDescriptor: - if err := d.Validate(ctx, descGetter); err != nil { - problemsFound = true - fmt.Fprint(stdout, reportMsg(desc, "%s", err)) - } - // Table has been already validated. - skipParentIDCheck = true - case catalog.TypeDescriptor: - typ := typedesc.NewImmutable(*d.TypeDesc()) - if err := typ.Validate(ctx, descGetter); err != nil { - problemsFound = true - fmt.Fprint(stdout, reportMsg(desc, "%s", err)) - } - case catalog.SchemaDescriptor: - // parent schema id is always 0. - skipParentSchemaCheck = true - } - - // TODO(postamar): The following descriptor checks on parent id, parent - // schema id and parent schema parent id should instead be performed by the - // descriptor validation logic. - // For doctor to still be useful this will require rewriting it such that it - // return multiple errors instead of only the first it encounters. - invalidParentID := !parentExists && - desc.GetParentID() != descpb.InvalidID - - if !skipParentIDCheck && invalidParentID { + for _, err := range catalog.Validate(ctx, descGetter, catalog.ValidationLevelSelfAndCrossReferences, desc).Errors() { problemsFound = true - fmt.Fprint(stdout, reportMsg(desc, "invalid parent id %d", desc.GetParentID())) - } - - invalidParentSchemaID := !parentSchemaExists && - desc.GetParentSchemaID() != descpb.InvalidID && - desc.GetParentSchemaID() != keys.PublicSchemaID - - invalidParentSchemaParentID := !invalidParentID && - desc.GetParentSchemaID() != descpb.InvalidID && - desc.GetParentSchemaID() != keys.PublicSchemaID && - parentSchemaExists && - parentSchema.GetParentID() != desc.GetParentID() - - if !skipParentSchemaCheck { - if invalidParentSchemaID { - problemsFound = true - fmt.Fprint(stdout, reportMsg(desc, "invalid parent schema id %d", desc.GetParentSchemaID())) - } - if invalidParentSchemaParentID { - problemsFound = true - fmt.Fprint(stdout, reportMsg(desc, "invalid parent id of parent schema, expected %d, found %d", desc.GetParentID(), parentSchema.GetParentID())) - } + fmt.Fprint(stdout, reportMsg(desc, "%s", err)) } // Process namespace entries pointing to this descriptor. @@ -339,18 +287,13 @@ func ExamineJobs( } func reportMsg(desc catalog.Descriptor, format string, args ...interface{}) string { - var header string - switch desc.(type) { - case catalog.TypeDescriptor: - header = " Type" - case catalog.TableDescriptor: - header = " Table" - case catalog.SchemaDescriptor: - header = " Schema" - case catalog.DatabaseDescriptor: - header = "Database" + msg := fmt.Sprintf(format, args...) + // Add descriptor-identifying prefix if it isn't there already. + // The prefix has the same format as the validation error wrapper. + msgPrefix := fmt.Sprintf("%s %q (%d): ", desc.TypeName(), desc.GetName(), desc.GetID()) + if msg[:len(msgPrefix)] == msgPrefix { + msgPrefix = "" } - return fmt.Sprintf("%s %3d: ParentID %3d, ParentSchemaID %2d, Name '%s': ", - header, desc.GetID(), desc.GetParentID(), desc.GetParentSchemaID(), desc.GetName()) + - fmt.Sprintf(format, args...) + "\n" + return fmt.Sprintf(" ParentID %3d, ParentSchemaID %2d: %s%s\n", + desc.GetParentID(), desc.GetParentSchemaID(), msgPrefix, msg) } diff --git a/pkg/sql/doctor/doctor_test.go b/pkg/sql/doctor/doctor_test.go index 4ac4fdff2868..9c33aa45b50d 100644 --- a/pkg/sql/doctor/doctor_test.go +++ b/pkg/sql/doctor/doctor_test.go @@ -60,8 +60,28 @@ var validTableDesc = &descpb.Descriptor{ }, } -func toBytes(t *testing.T, pb protoutil.Message) []byte { - res, err := protoutil.Marshal(pb) +func toBytes(t *testing.T, desc *descpb.Descriptor) []byte { + if desc.GetDatabase() != nil { + if desc.GetDatabase().Privileges == nil { + descpb.MaybeFixPrivileges(desc.GetDatabase().GetID(), &desc.GetDatabase().Privileges) + } + } else if desc.GetSchema() != nil { + if desc.GetSchema().Privileges == nil { + descpb.MaybeFixPrivileges(desc.GetSchema().GetID(), &desc.GetSchema().Privileges) + } + } else if tbl := descpb.TableFromDescriptor(desc, hlc.Timestamp{}); tbl != nil { + if tbl.Privileges == nil { + descpb.MaybeFixPrivileges(tbl.GetID(), &tbl.Privileges) + } + if tbl.FormatVersion == 0 { + tbl.FormatVersion = descpb.InterleavedFormatVersion + } + } else if typ := descpb.TypeFromDescriptor(desc, hlc.Timestamp{}); typ != nil { + if typ.Privileges == nil { + descpb.MaybeFixPrivileges(typ.GetID(), &typ.Privileges) + } + } + res, err := protoutil.Marshal(desc) require.NoError(t, err) return res } @@ -104,7 +124,7 @@ func TestExamineDescriptors(t *testing.T) { }, }, expected: `Examining 1 descriptors and 0 namespace entries... - Table 2: ParentID 0, ParentSchemaID 29, Name '': different id in descriptor table: 1 + ParentID 0, ParentSchemaID 29: relation "" (2): different id in descriptor table: 1 `, }, { @@ -117,7 +137,8 @@ func TestExamineDescriptors(t *testing.T) { }, }, expected: `Examining 1 descriptors and 0 namespace entries... - Table 1: ParentID 0, ParentSchemaID 29, Name 'foo': invalid parent ID 0 + ParentID 0, ParentSchemaID 29: relation "foo" (1): invalid parent ID 0 + ParentID 0, ParentSchemaID 29: relation "foo" (1): table must contain at least 1 column `, }, { @@ -133,7 +154,8 @@ func TestExamineDescriptors(t *testing.T) { {NameInfo: descpb.NameInfo{ParentSchemaID: 29, Name: "foo"}, ID: 1}, }, expected: `Examining 1 descriptors and 1 namespace entries... - Table 1: ParentID 0, ParentSchemaID 29, Name 'foo': invalid parent ID 0 + ParentID 0, ParentSchemaID 29: relation "foo" (1): invalid parent ID 0 + ParentID 0, ParentSchemaID 29: relation "foo" (1): table must contain at least 1 column `, }, { @@ -146,7 +168,7 @@ func TestExamineDescriptors(t *testing.T) { }, }, expected: `Examining 1 descriptors and 0 namespace entries... -Database 1: ParentID 0, ParentSchemaID 0, Name 'db': not being dropped but no namespace entry found + ParentID 0, ParentSchemaID 0: database "db" (1): not being dropped but no namespace entry found `, }, { @@ -164,8 +186,8 @@ Database 1: ParentID 0, ParentSchemaID 0, Name 'db': not being dropped but {NameInfo: descpb.NameInfo{Name: "db"}, ID: 2}, }, expected: `Examining 2 descriptors and 2 namespace entries... - Table 1: ParentID 2, ParentSchemaID 29, Name 't': namespace entry {ParentID:0 ParentSchemaID:29 Name:t} not found in draining names - Table 1: ParentID 2, ParentSchemaID 29, Name 't': could not find name in namespace table + ParentID 2, ParentSchemaID 29: relation "t" (1): namespace entry {ParentID:0 ParentSchemaID:29 Name:t} not found in draining names + ParentID 2, ParentSchemaID 29: relation "t" (1): could not find name in namespace table `, }, { @@ -181,7 +203,7 @@ Database 1: ParentID 0, ParentSchemaID 0, Name 'db': not being dropped but {NameInfo: descpb.NameInfo{ParentID: 2, Name: "schema"}, ID: 1}, }, expected: `Examining 1 descriptors and 1 namespace entries... - Schema 1: ParentID 2, ParentSchemaID 0, Name 'schema': invalid parent id 2 + ParentID 2, ParentSchemaID 0: schema "schema" (1): referenced database ID 2: descriptor not found `, }, { @@ -197,7 +219,8 @@ Database 1: ParentID 0, ParentSchemaID 0, Name 'db': not being dropped but {NameInfo: descpb.NameInfo{Name: "type"}, ID: 1}, }, expected: `Examining 1 descriptors and 1 namespace entries... - Type 1: ParentID 0, ParentSchemaID 0, Name 'type': invalid parentID 0 + ParentID 0, ParentSchemaID 0: type "type" (1): invalid parentID 0 + ParentID 0, ParentSchemaID 0: type "type" (1): invalid parent schema ID 0 `, }, { @@ -205,16 +228,23 @@ Database 1: ParentID 0, ParentSchemaID 0, Name 'db': not being dropped but { ID: 1, DescBytes: toBytes(t, &descpb.Descriptor{Union: &descpb.Descriptor_Type{ - Type: &descpb.TypeDescriptor{Name: "type", ID: 1, ParentSchemaID: 2}, + Type: &descpb.TypeDescriptor{Name: "type", ID: 1, ParentID: 3, ParentSchemaID: 2}, + }}), + }, + { + ID: 3, + DescBytes: toBytes(t, &descpb.Descriptor{Union: &descpb.Descriptor_Database{ + Database: &descpb.DatabaseDescriptor{Name: "db", ID: 3}, }}), }, }, namespaceTable: doctor.NamespaceTable{ - {NameInfo: descpb.NameInfo{ParentSchemaID: 2, Name: "type"}, ID: 1}, + {NameInfo: descpb.NameInfo{ParentID: 3, ParentSchemaID: 2, Name: "type"}, ID: 1}, + {NameInfo: descpb.NameInfo{Name: "db"}, ID: 3}, }, - expected: `Examining 1 descriptors and 1 namespace entries... - Type 1: ParentID 0, ParentSchemaID 2, Name 'type': invalid parentID 0 - Type 1: ParentID 0, ParentSchemaID 2, Name 'type': invalid parent schema id 2 + expected: `Examining 2 descriptors and 2 namespace entries... + ParentID 3, ParentSchemaID 2: type "type" (1): referenced schema ID 2: descriptor not found + ParentID 3, ParentSchemaID 2: type "type" (1): arrayTypeID 0 does not exist for "ENUM": referenced type ID 0: descriptor not found `, }, { @@ -229,17 +259,25 @@ Database 1: ParentID 0, ParentSchemaID 0, Name 'db': not being dropped but { ID: 3, DescBytes: toBytes(t, &descpb.Descriptor{Union: &descpb.Descriptor_Schema{ - Schema: &descpb.SchemaDescriptor{Name: "schema", ID: 3, ParentID: 0}, + Schema: &descpb.SchemaDescriptor{Name: "schema", ID: 3, ParentID: 4}, + }}), + }, + { + ID: 4, + DescBytes: toBytes(t, &descpb.Descriptor{Union: &descpb.Descriptor_Database{ + Database: &descpb.DatabaseDescriptor{Name: "db2", ID: 4}, }}), }, }, namespaceTable: doctor.NamespaceTable{ {NameInfo: descpb.NameInfo{ParentID: 2, ParentSchemaID: 3, Name: "t"}, ID: 1}, {NameInfo: descpb.NameInfo{Name: "db"}, ID: 2}, - {NameInfo: descpb.NameInfo{ParentID: 0, Name: "schema"}, ID: 3}, + {NameInfo: descpb.NameInfo{ParentID: 4, Name: "schema"}, ID: 3}, + {NameInfo: descpb.NameInfo{Name: "db2"}, ID: 4}, }, - expected: `Examining 3 descriptors and 3 namespace entries... - Table 1: ParentID 2, ParentSchemaID 3, Name 't': invalid parent id of parent schema, expected 2, found 0 + expected: `Examining 4 descriptors and 4 namespace entries... + ParentID 2, ParentSchemaID 3: relation "t" (1): parent schema 3 is in different database 4 + ParentID 4, ParentSchemaID 0: schema "schema" (3): not present in parent database [4] schemas mapping `, }, { @@ -319,7 +357,7 @@ Row(s) [{ParentID:0 ParentSchemaID:0 Name:null}]: NULL value found {NameInfo: descpb.NameInfo{Name: "db2"}, ID: 1}, }, expected: `Examining 1 descriptors and 3 namespace entries... -Database 1: ParentID 0, ParentSchemaID 0, Name 'db': extra draining names found [{ParentID:0 ParentSchemaID:0 Name:db3}] + ParentID 0, ParentSchemaID 0: database "db" (1): extra draining names found [{ParentID:0 ParentSchemaID:0 Name:db3}] `, }, { @@ -337,7 +375,7 @@ Database 1: ParentID 0, ParentSchemaID 0, Name 'db': extra draining names f {NameInfo: descpb.NameInfo{Name: "db"}, ID: 2}, }, expected: `Examining 2 descriptors and 2 namespace entries... - Table 1: ParentID 2, ParentSchemaID 29, Name 't': dropped but namespace entry(s) found: [{2 29 t}] + ParentID 2, ParentSchemaID 29: relation "t" (1): dropped but namespace entry(s) found: [{2 29 t}] `, }, } diff --git a/pkg/sql/drop_index.go b/pkg/sql/drop_index.go index f08674288678..a19a9d385674 100644 --- a/pkg/sql/drop_index.go +++ b/pkg/sql/drop_index.go @@ -21,7 +21,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -506,11 +505,10 @@ func (p *planner) dropIndexByName( return err } - if err := tableDesc.Validate( - ctx, catalogkv.NewOneLevelUncachedDescGetter(p.txn, p.ExecCfg().Codec), - ); err != nil { + if err := validateDescriptor(ctx, p, tableDesc); err != nil { return err } + mutationID := tableDesc.ClusterVersion.NextMutationID if err := p.writeSchemaChange(ctx, tableDesc, mutationID, jobDesc); err != nil { return err diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index c2677a030621..2041b214ad47 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -929,11 +929,6 @@ type ExecutorTestingKnobs struct { // a given table id. RunAfterSCJobsCacheLookup func(*jobs.Job) - // TestingDescriptorValidation dictates if stronger descriptor validation - // should be performed (typically turned on during tests only to guard against - // wild descriptors which are corrupted due to bugs). - TestingDescriptorValidation bool - // TestingSaveFlows, if set, will be called with the given stmt. The resulting // function will be called with the physical plan of that statement's main // query (i.e. no subqueries). The physical plan is only safe for use for the diff --git a/pkg/sql/logictest/testdata/logic_test/alter_primary_key b/pkg/sql/logictest/testdata/logic_test/alter_primary_key index 156f25db0893..0f3f0f7909ad 100644 --- a/pkg/sql/logictest/testdata/logic_test/alter_primary_key +++ b/pkg/sql/logictest/testdata/logic_test/alter_primary_key @@ -650,7 +650,7 @@ BEGIN statement ok ALTER TABLE t ALTER PRIMARY KEY USING COLUMNS (y) -statement error pq: unimplemented: cannot perform other schema changes in the same transaction as a primary key change +statement error pq: relation "t" \([0-9]+\): unimplemented: cannot perform other schema changes in the same transaction as a primary key change CREATE INDEX ON t (y) statement ok @@ -666,7 +666,7 @@ BEGIN statement ok ALTER TABLE t ALTER PRIMARY KEY USING COLUMNS (y) -statement error pq: unimplemented: cannot perform other schema changes in the same transaction as a primary key change +statement error pq: relation "t" \([0-9]+\): unimplemented: cannot perform other schema changes in the same transaction as a primary key change ALTER TABLE t ADD COLUMN z INT statement ok @@ -799,7 +799,7 @@ statement ok DROP TABLE IF EXISTS t; CREATE TABLE t (x INT PRIMARY KEY, y INT NOT NULL, FAMILY (x), FAMILY (y)) -statement error pq: unimplemented: primary key of table t dropped without subsequent addition of new primary key +statement error pq: relation "t" \([0-9]+\): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction ALTER TABLE t DROP CONSTRAINT "primary" statement error pq: multiple primary keys for table "t" are not allowed @@ -960,7 +960,7 @@ ALTER TABLE t1 DROP CONSTRAINT "primary" statement ok INSERT INTO t2 VALUES (1) -statement error pq: unimplemented: primary key of table t1 dropped without subsequent addition of new primary key +statement error pq: relation "t1" \([0-9]+\): unimplemented: primary key dropped without subsequent addition of new primary key in same transaction COMMIT query I diff --git a/pkg/sql/logictest/testdata/logic_test/system_columns b/pkg/sql/logictest/testdata/logic_test/system_columns index 8757f15f6309..ea805f297084 100644 --- a/pkg/sql/logictest/testdata/logic_test/system_columns +++ b/pkg/sql/logictest/testdata/logic_test/system_columns @@ -138,7 +138,7 @@ statement error pq: column \"crdb_internal_mvcc_timestamp\" does not exist INSERT INTO t VALUES (1, 2, 3) RETURNING crdb_internal_mvcc_timestamp # Ensure that we can't create columns that conflict with system column names. -statement error pq: column name "crdb_internal_mvcc_timestamp" conflicts with a system column name +statement error pq: relation "bad" \([0-9]+\): column name "crdb_internal_mvcc_timestamp" conflicts with a system column name CREATE TABLE bad (crdb_internal_mvcc_timestamp int) statement error pq: column name "crdb_internal_mvcc_timestamp" conflicts with a system column name @@ -196,5 +196,5 @@ SELECT tableoid, x FROM tab3@i WHERE x = 1 ---- 58 1 -statement error pq: column name "tableoid" conflicts with a system column name +statement error pq: relation "bad" \([0-9]+\): column name "tableoid" conflicts with a system column name CREATE TABLE bad (tableoid int) diff --git a/pkg/sql/namespace_test.go b/pkg/sql/namespace_test.go index 311106e223df..f2c0faf3c6a7 100644 --- a/pkg/sql/namespace_test.go +++ b/pkg/sql/namespace_test.go @@ -15,6 +15,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -140,7 +141,7 @@ func TestNamespaceTableSemantics(t *testing.T) { keys.PublicSchemaID, "rel", hlc.Timestamp{}, - &descpb.PrivilegeDescriptor{}, + descpb.NewDefaultPrivilegeDescriptor(security.RootUserName()), tree.PersistencePermanent, ) if err := desc.AllocateIDs(ctx); err != nil { diff --git a/pkg/sql/pgwire/testdata/pgtest/notice b/pkg/sql/pgwire/testdata/pgtest/notice index 593395bfd527..30d967ea4267 100644 --- a/pkg/sql/pgwire/testdata/pgtest/notice +++ b/pkg/sql/pgwire/testdata/pgtest/notice @@ -55,7 +55,7 @@ Query {"String": "DROP INDEX t_x_idx"} until crdb_only CommandComplete ---- -{"Severity":"NOTICE","SeverityUnlocalized":"","Code":"00000","Message":"the data for dropped indexes is reclaimed asynchronously","Detail":"","Hint":"The reclamation delay can be customized in the zone configuration for the table.","Position":0,"InternalPosition":0,"InternalQuery":"","Where":"","SchemaName":"","TableName":"","ColumnName":"","DataTypeName":"","ConstraintName":"","File":"drop_index.go","Line":521,"Routine":"dropIndexByName","UnknownFields":null} +{"Severity":"NOTICE","SeverityUnlocalized":"","Code":"00000","Message":"the data for dropped indexes is reclaimed asynchronously","Detail":"","Hint":"The reclamation delay can be customized in the zone configuration for the table.","Position":0,"InternalPosition":0,"InternalQuery":"","Where":"","SchemaName":"","TableName":"","ColumnName":"","DataTypeName":"","ConstraintName":"","File":"drop_index.go","Line":519,"Routine":"dropIndexByName","UnknownFields":null} {"Type":"CommandComplete","CommandTag":"DROP INDEX"} until noncrdb_only diff --git a/pkg/sql/planner.go b/pkg/sql/planner.go index 13d5aab26ad2..35d1950f0b98 100644 --- a/pkg/sql/planner.go +++ b/pkg/sql/planner.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" @@ -789,3 +790,10 @@ func (p *planner) CompactEngineSpan( _, err = client.CompactEngineSpan(ctx, req) return err } + +// validateDescriptor is a convenience function for validating +// descriptors in the context of a planner. +func validateDescriptor(ctx context.Context, p *planner, descriptor catalog.Descriptor) error { + bdg := catalogkv.NewOneLevelUncachedDescGetter(p.Txn(), p.ExecCfg().Codec) + return catalog.ValidateSelfAndCrossReferences(ctx, bdg, descriptor) +} diff --git a/pkg/sql/rename_column.go b/pkg/sql/rename_column.go index fe56fce09d78..6a9fb54069b6 100644 --- a/pkg/sql/rename_column.go +++ b/pkg/sql/rename_column.go @@ -13,7 +13,6 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" @@ -80,9 +79,7 @@ func (n *renameColumnNode) startExec(params runParams) error { return nil } - if err := tableDesc.Validate( - ctx, catalogkv.NewOneLevelUncachedDescGetter(p.txn, p.ExecCfg().Codec), - ); err != nil { + if err := validateDescriptor(ctx, p, tableDesc); err != nil { return err } diff --git a/pkg/sql/rename_index.go b/pkg/sql/rename_index.go index e4d1de5993cd..911ade726d63 100644 --- a/pkg/sql/rename_index.go +++ b/pkg/sql/rename_index.go @@ -13,7 +13,6 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -106,9 +105,7 @@ func (n *renameIndexNode) startExec(params runParams) error { return err } - if err := tableDesc.Validate( - ctx, catalogkv.NewOneLevelUncachedDescGetter(p.txn, p.ExecCfg().Codec), - ); err != nil { + if err := validateDescriptor(ctx, p, tableDesc); err != nil { return err } diff --git a/pkg/sql/rename_table.go b/pkg/sql/rename_table.go index 896ebfd8f8f7..fc791572c27e 100644 --- a/pkg/sql/rename_table.go +++ b/pkg/sql/rename_table.go @@ -218,9 +218,7 @@ func (n *renameTableNode) startExec(params runParams) error { tableDesc.SetName(newTn.Table()) tableDesc.ParentID = targetDbDesc.GetID() - if err := tableDesc.Validate( - ctx, catalogkv.NewOneLevelUncachedDescGetter(p.txn, p.ExecCfg().Codec), - ); err != nil { + if err := validateDescriptor(ctx, p, tableDesc); err != nil { return err } diff --git a/pkg/sql/repair.go b/pkg/sql/repair.go index 3f9327e636e4..a12eab24071e 100644 --- a/pkg/sql/repair.go +++ b/pkg/sql/repair.go @@ -144,6 +144,11 @@ func (p *planner) UnsafeUpsertDescriptor( default: return errors.AssertionFailedf("unknown descriptor type %T for id %d", existing, id) } + + if force { + p.Descriptors().SkipValidationOnWrite() + } + { b := p.txn.NewBatch() if err := p.Descriptors().WriteDescToBatch( @@ -380,32 +385,24 @@ func (p *planner) UnsafeUpsertNamespaceEntry( if val.Value != nil { existingID = descpb.ID(val.ValueInt()) } + flags := p.CommonLookupFlags(true /* required */) + flags.IncludeDropped = true + flags.IncludeOffline = true validateDescriptor := func() error { - desc, err := p.Descriptors().GetMutableDescriptorByID(ctx, descID, p.txn) + desc, err := p.Descriptors().GetImmutableDescriptorByID(ctx, p.Txn(), descID, flags) if err != nil && descID != keys.PublicSchemaID { return errors.Wrapf(err, "failed to retrieve descriptor %d", descID) } + invalid := false switch desc.(type) { case nil: return nil - case *tabledesc.Mutable, *typedesc.Mutable: - if parentID == 0 || parentSchemaID == 0 { - return pgerror.Newf(pgcode.InvalidCatalogName, - "invalid prefix (%d, %d) for object %d", - parentID, parentSchemaID, descID) - } - case *schemadesc.Mutable: - if parentID == 0 || parentSchemaID != 0 { - return pgerror.Newf(pgcode.InvalidCatalogName, - "invalid prefix (%d, %d) for schema %d", - parentID, parentSchemaID, descID) - } - case *dbdesc.Mutable: - if parentID != 0 || parentSchemaID != 0 { - return pgerror.Newf(pgcode.InvalidCatalogName, - "invalid prefix (%d, %d) for database %d", - parentID, parentSchemaID, descID) - } + case catalog.TableDescriptor, catalog.TypeDescriptor: + invalid = parentID == descpb.InvalidID || parentSchemaID == descpb.InvalidID + case catalog.SchemaDescriptor: + invalid = parentID == descpb.InvalidID || parentSchemaID != descpb.InvalidID + case catalog.DatabaseDescriptor: + invalid = parentID != descpb.InvalidID || parentSchemaID != descpb.InvalidID default: // The public schema does not have a descriptor. if descID == keys.PublicSchemaID { @@ -414,15 +411,19 @@ func (p *planner) UnsafeUpsertNamespaceEntry( return errors.AssertionFailedf( "unexpected descriptor type %T for descriptor %d", desc, descID) } + + if invalid { + return pgerror.Newf(pgcode.InvalidCatalogName, + "invalid prefix (%d, %d) for %s %d", + parentID, parentSchemaID, desc.TypeName(), descID) + } return nil } validateParentDescriptor := func() error { - if parentID == 0 { + if parentID == descpb.InvalidID { return nil } - parent, err := p.Descriptors().GetMutableDescriptorByID( - ctx, parentID, p.txn, - ) + parent, err := p.Descriptors().GetImmutableDescriptorByID(ctx, p.Txn(), parentID, flags) if err != nil { return errors.Wrapf(err, "failed to look up parent %d", parentID) } @@ -433,12 +434,10 @@ func (p *planner) UnsafeUpsertNamespaceEntry( return nil } validateParentSchemaDescriptor := func() error { - if parentSchemaID == 0 || parentSchemaID == keys.PublicSchemaID { + if parentSchemaID == descpb.InvalidID || parentSchemaID == keys.PublicSchemaID { return nil } - schema, err := p.Descriptors().GetMutableDescriptorByID( - ctx, parentSchemaID, p.txn, - ) + schema, err := p.Descriptors().GetImmutableDescriptorByID(ctx, p.Txn(), parentSchemaID, flags) if err != nil { return err } @@ -520,7 +519,10 @@ func (p *planner) UnsafeDeleteNamespaceEntry( parentID, parentSchemaID, name, existingID, descID) } } - desc, err := p.Descriptors().GetMutableDescriptorByID(ctx, descID, p.txn) + flags := p.CommonLookupFlags(true /* required */) + flags.IncludeDropped = true + flags.IncludeOffline = true + desc, err := p.Descriptors().GetImmutableDescriptorByID(ctx, p.txn, descID, flags) var forceNoticeString string // for the event if err != nil && !errors.Is(err, catalog.ErrDescriptorNotFound) { if force { @@ -585,6 +587,9 @@ func (p *planner) UnsafeDeleteDescriptor(ctx context.Context, descID int64, forc if err := p.Descriptors().AddUncommittedDescriptor(mut); err != nil { return errors.WithAssertionFailure(err) } + if force { + p.Descriptors().SkipValidationOnWrite() + } } descKey := catalogkeys.MakeDescMetadataKey(p.execCfg.Codec, id) if err := p.txn.Del(ctx, descKey); err != nil { diff --git a/pkg/sql/schema_changer.go b/pkg/sql/schema_changer.go index e1b2562030fb..0554c38731bb 100644 --- a/pkg/sql/schema_changer.go +++ b/pkg/sql/schema_changer.go @@ -1277,15 +1277,6 @@ func (sc *SchemaChanger) done(ctx context.Context) error { localityConfigToSwapTo, ) } - - // Validate the new locality before updating the table descriptor. - dg := catalogkv.NewOneLevelUncachedDescGetter(txn, sc.execCfg.Codec) - if err := scTable.ValidateTableLocalityConfig( - ctx, - dg, - ); err != nil { - return err - } } // If any old index had an interleaved parent, remove the diff --git a/pkg/sql/schema_changer_test.go b/pkg/sql/schema_changer_test.go index 1e9ed536397c..805f82252e1b 100644 --- a/pkg/sql/schema_changer_test.go +++ b/pkg/sql/schema_changer_test.go @@ -2511,7 +2511,7 @@ CREATE TABLE t.test (k INT NOT NULL, v INT); // Test that trying different schema changes results an error. _, err := sqlDB.Exec(`ALTER TABLE t.test ADD COLUMN z INT`) - expected := "pq: unimplemented: cannot perform a schema change operation while a primary key change is in progress" + expected := `pq: relation "test" \(53\): unimplemented: cannot perform a schema change operation while a primary key change is in progress` if !testutils.IsError(err, expected) { t.Fatalf("expected to find error %s but found %+v", expected, err) } diff --git a/pkg/sql/scrub.go b/pkg/sql/scrub.go index 82b989495876..f52df6f713f0 100644 --- a/pkg/sql/scrub.go +++ b/pkg/sql/scrub.go @@ -423,7 +423,17 @@ func createConstraintCheckOperations( asOf hlc.Timestamp, ) (results []checkOperation, err error) { dg := catalogkv.NewOneLevelUncachedDescGetter(p.txn, p.ExecCfg().Codec) - constraints, err := tableDesc.GetConstraintInfo(ctx, dg) + constraints, err := tableDesc.GetConstraintInfoWithLookup(func(id descpb.ID) (catalog.TableDescriptor, error) { + desc, err := dg.GetDesc(ctx, id) + if err != nil { + return nil, err + } + table, ok := desc.(catalog.TableDescriptor) + if !ok { + return nil, catalog.WrapTableDescRefErr(id, catalog.ErrDescriptorNotFound) + } + return table, nil + }) if err != nil { return nil, err } diff --git a/pkg/sql/table.go b/pkg/sql/table.go index eb7992319d8f..0807993dc905 100644 --- a/pkg/sql/table.go +++ b/pkg/sql/table.go @@ -19,6 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" @@ -281,7 +282,7 @@ func (p *planner) writeTableDescToBatch( } } - if err := tableDesc.ValidateSelf(ctx); err != nil { + if err := catalog.ValidateSelf(tableDesc); err != nil { return errors.AssertionFailedf("table descriptor is not valid: %s\n%v", err, tableDesc) } diff --git a/pkg/sql/table_test.go b/pkg/sql/table_test.go index c879ea3096e2..2e527f35e474 100644 --- a/pkg/sql/table_test.go +++ b/pkg/sql/table_test.go @@ -393,7 +393,7 @@ func TestPrimaryKeyUnspecified(t *testing.T) { } desc.SetPrimaryIndex(descpb.IndexDescriptor{}) - err = desc.ValidateSelf(ctx) + err = catalog.ValidateSelf(desc) if !testutils.IsError(err, tabledesc.ErrMissingPrimaryKey.Error()) { t.Fatalf("unexpected error: %v", err) } diff --git a/pkg/sql/tests/repair_test.go b/pkg/sql/tests/repair_test.go index 823f23283865..fd9e8bac2eb3 100644 --- a/pkg/sql/tests/repair_test.go +++ b/pkg/sql/tests/repair_test.go @@ -95,7 +95,7 @@ func TestDescriptorRepairOrphanedDescriptors(t *testing.T) { _, err := db.Exec( "SELECT count(*) FROM \"\".crdb_internal.tables WHERE table_id = $1", descID) - require.Regexp(t, "internal error: desc 53: parentID 52 does not exist", err) + require.Regexp(t, `pq: relation "foo" \(53\): referenced database ID 52: descriptor not found`, err) // In this case, we're treating the injected descriptor as having no data // so we can clean it up by just deleting the erroneous descriptor and @@ -146,7 +146,7 @@ func TestDescriptorRepairOrphanedDescriptors(t *testing.T) { _, err := db.Exec( "SELECT count(*) FROM \"\".crdb_internal.tables WHERE table_id = $1", descID) - require.Regexp(t, "internal error: desc 53: parentID 52 does not exist", err) + require.Regexp(t, `pq: relation "foo" \(53\): referenced database ID 52: descriptor not found`, err) // In this case, we're going to inject a parent database require.NoError(t, crdb.ExecuteTx(ctx, db, nil, func(tx *gosql.Tx) error { @@ -332,7 +332,7 @@ SELECT crdb_internal.unsafe_delete_namespace_entry("parentID", 0, 'foo', id) upsertInvalidateDuplicateColumnDescriptor, }, op: `SELECT crdb_internal.unsafe_upsert_namespace_entry(50, 29, 'foo', 52);`, - expErrRE: `failed to retrieve descriptor 52: duplicate column name: "i"`, + expErrRE: `relation "foo" \(52\): duplicate column name: "i"`, }, { // Upsert a descriptor which is invalid, then try to upsert a namespace @@ -344,7 +344,7 @@ SELECT crdb_internal.unsafe_delete_namespace_entry("parentID", 0, 'foo', id) expEventLogEntries: []eventLogPattern{ { typ: "unsafe_upsert_namespace_entry", - info: `"Force":true,"FailedValidation":true,"ValidationErrors":"failed to retrieve descriptor 52: duplicate column name: \\"i\\""`, + info: `"Force":true,"FailedValidation":true,"ValidationErrors":".*duplicate column name: \\"i\\""`, }, }, }, @@ -356,7 +356,7 @@ SELECT crdb_internal.unsafe_delete_namespace_entry("parentID", 0, 'foo', id) `SELECT crdb_internal.unsafe_upsert_namespace_entry(50, 29, 'foo', 52, true);`, }, op: `SELECT crdb_internal.unsafe_delete_descriptor(52);`, - expErrRE: `duplicate column name: "i"`, + expErrRE: `pq: crdb_internal.unsafe_delete_descriptor\(\): relation "foo" \(52\): duplicate column name: "i"`, }, { // Upsert a descriptor which is invalid, upsert a namespace entry for it, @@ -369,7 +369,7 @@ SELECT crdb_internal.unsafe_delete_namespace_entry("parentID", 0, 'foo', id) expEventLogEntries: []eventLogPattern{ { typ: "unsafe_delete_descriptor", - info: `"Force":true,"ForceNotice":"[^"]*duplicate column name: \\"i\\""`, + info: `"Force":true,"ForceNotice":".*duplicate column name: \\"i\\""`, }, }, }, @@ -381,7 +381,7 @@ SELECT crdb_internal.unsafe_delete_namespace_entry("parentID", 0, 'foo', id) `SELECT crdb_internal.unsafe_upsert_namespace_entry(50, 29, 'foo', 52, true);`, }, op: updateInvalidateDuplicateColumnDescriptorNoForce, - expErrRE: `duplicate column name: "i"`, + expErrRE: `pq: crdb_internal.unsafe_upsert_descriptor\(\): relation "foo" \(52\): duplicate column name: "i"`, }, { // Upsert a descriptor which is invalid, upsert a namespace entry for it, @@ -394,7 +394,7 @@ SELECT crdb_internal.unsafe_delete_namespace_entry("parentID", 0, 'foo', id) expEventLogEntries: []eventLogPattern{ { typ: "unsafe_upsert_descriptor", - info: `"Force":true,"ForceNotice":"[^"]*duplicate column name: \\"i\\""`, + info: `"Force":true,"ForceNotice":".*duplicate column name: \\"i\\""`, }, }, after: []string{ @@ -410,7 +410,7 @@ SELECT crdb_internal.unsafe_delete_namespace_entry("parentID", 0, 'foo', id) `SELECT crdb_internal.unsafe_upsert_namespace_entry(50, 29, 'foo', 52, true);`, }, op: `SELECT crdb_internal.unsafe_delete_namespace_entry(50, 29, 'foo', 52);`, - expErrRE: `duplicate column name: "i"`, + expErrRE: `pq: crdb_internal.unsafe_delete_namespace_entry\(\): failed to retrieve descriptor 52: relation "foo" \(52\): duplicate column name: "i"`, }, { // Upsert a descriptor which is invalid, upsert a namespace entry for it, @@ -423,7 +423,7 @@ SELECT crdb_internal.unsafe_delete_namespace_entry("parentID", 0, 'foo', id) expEventLogEntries: []eventLogPattern{ { typ: "unsafe_delete_namespace_entry", - info: `"Force":true,"ForceNotice":"[^"]*duplicate column name: \\"i\\""`, + info: `"Force":true,"ForceNotice":".*duplicate column name: \\"i\\""`, }, }, }, diff --git a/pkg/sql/tests/system_table_test.go b/pkg/sql/tests/system_table_test.go index 47519c93cba7..25ee0b7fb2db 100644 --- a/pkg/sql/tests/system_table_test.go +++ b/pkg/sql/tests/system_table_test.go @@ -158,7 +158,6 @@ func TestInitialKeysAndSplits(t *testing.T) { func TestSystemTableLiterals(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - ctx := context.Background() type testcase struct { id descpb.ID schema string @@ -204,7 +203,7 @@ func TestSystemTableLiterals(t *testing.T) { if err != nil { t.Fatalf("test: %+v, err: %v", test, err) } - require.NoError(t, gen.ValidateSelf(ctx)) + require.NoError(t, catalog.ValidateSelf(gen)) if !test.pkg.TableDesc().Equal(gen.TableDesc()) { diff := strings.Join(pretty.Diff(test.pkg.TableDesc(), gen.TableDesc()), "\n") diff --git a/pkg/sql/virtual_schema.go b/pkg/sql/virtual_schema.go index 6a8179c119c4..0a2fa7bb8071 100644 --- a/pkg/sql/virtual_schema.go +++ b/pkg/sql/virtual_schema.go @@ -645,7 +645,7 @@ func NewVirtualSchemaHolder( } } td := tabledesc.NewImmutable(tableDesc) - if err := td.ValidateSelf(ctx); err != nil { + if err := catalog.ValidateSelf(td); err != nil { return nil, errors.NewAssertionErrorWithWrappedErrf(err, "failed to validate virtual table %s: programmer error", errors.Safe(td.GetName())) } @@ -736,7 +736,7 @@ func (vs *VirtualSchemaHolder) getVirtualTableEntry(tn *tree.TableName) (*virtua func (vs *VirtualSchemaHolder) getVirtualTableEntryByID(id descpb.ID) (*virtualDefEntry, error) { entry, ok := vs.defsByID[id] if !ok { - return nil, catalog.ErrDescriptorNotFound + return nil, catalog.WrapTableDescRefErr(id, catalog.ErrDescriptorNotFound) } return entry, nil } From 4d9034ca948479faaf0f784a6263f07a6d13d5cd Mon Sep 17 00:00:00 2001 From: Marius Posta Date: Thu, 25 Feb 2021 10:46:37 -0500 Subject: [PATCH 2/2] doctor: add validation runtime error recovery Previously, doctor could crash when validating corrupted descriptors. The validation code is usually cautious enough to not dereference nils or the like but this isn't enforced in any way. This commit protects the validation calls in doctor with panic recovery, allowing doctor to continue and validate the remaining descriptors. Release justification: non-production code change. Release note: None --- pkg/sql/doctor/doctor.go | 19 ++++++++++++++++- pkg/sql/doctor/doctor_test.go | 40 +++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/pkg/sql/doctor/doctor.go b/pkg/sql/doctor/doctor.go index b918e01b6b5a..c24902a903fd 100644 --- a/pkg/sql/doctor/doctor.go +++ b/pkg/sql/doctor/doctor.go @@ -140,7 +140,7 @@ func ExamineDescriptors( continue } - for _, err := range catalog.Validate(ctx, descGetter, catalog.ValidationLevelSelfAndCrossReferences, desc).Errors() { + for _, err := range validateSafely(ctx, descGetter, desc) { problemsFound = true fmt.Fprint(stdout, reportMsg(desc, "%s", err)) } @@ -237,6 +237,23 @@ func ExamineDescriptors( return !problemsFound, err } +func validateSafely( + ctx context.Context, descGetter catalog.MapDescGetter, desc catalog.Descriptor, +) (errs []error) { + defer func() { + if r := recover(); r != nil { + err, ok := r.(error) + if !ok { + err = errors.Newf("%v", r) + } + err = errors.WithAssertionFailure(errors.Wrap(err, "validation")) + errs = append(errs, err) + } + }() + errs = append(errs, catalog.Validate(ctx, descGetter, catalog.ValidationLevelSelfAndCrossReferences, desc).Errors()...) + return errs +} + // ExamineJobs runs a suite of consistency checks over the system.jobs table. func ExamineJobs( ctx context.Context, diff --git a/pkg/sql/doctor/doctor_test.go b/pkg/sql/doctor/doctor_test.go index 9c33aa45b50d..8101482ec981 100644 --- a/pkg/sql/doctor/doctor_test.go +++ b/pkg/sql/doctor/doctor_test.go @@ -245,6 +245,46 @@ func TestExamineDescriptors(t *testing.T) { expected: `Examining 2 descriptors and 2 namespace entries... ParentID 3, ParentSchemaID 2: type "type" (1): referenced schema ID 2: descriptor not found ParentID 3, ParentSchemaID 2: type "type" (1): arrayTypeID 0 does not exist for "ENUM": referenced type ID 0: descriptor not found +`, + }, + { + descTable: doctor.DescriptorTable{ + { + ID: 51, + DescBytes: toBytes(t, &descpb.Descriptor{Union: &descpb.Descriptor_Database{ + Database: &descpb.DatabaseDescriptor{Name: "db", ID: 51}, + }}), + }, + { + ID: 52, + DescBytes: func() []byte { + // Skip `toBytes` to produce a descriptor with unset privileges field. + // The purpose of this is to produce a nil dereference during validation + // in order to test that doctor recovers from this. + // + // Note that it might be the case that validation aught to check that + // this field is not nil in the first place, in which case this test case + // will need to craft a corrupt descriptor serialization in a more + // creative way. Ideally validation code should never cause runtime errors + // but there's no way to guarantee that short of formally verifying it. We + // therefore have to consider the possibility of runtime errors (sadly) and + // doctor should absolutely make every possible effort to continue executing + // in the face of these, considering its main use case! + desc := &descpb.Descriptor{Union: &descpb.Descriptor_Type{ + Type: &descpb.TypeDescriptor{Name: "type", ID: 52, ParentID: 51, ParentSchemaID: keys.PublicSchemaID}, + }} + res, err := protoutil.Marshal(desc) + require.NoError(t, err) + return res + }(), + }, + }, + namespaceTable: doctor.NamespaceTable{ + {NameInfo: descpb.NameInfo{Name: "db"}, ID: 51}, + {NameInfo: descpb.NameInfo{ParentID: 51, ParentSchemaID: keys.PublicSchemaID, Name: "type"}, ID: 52}, + }, + expected: `Examining 2 descriptors and 2 namespace entries... + ParentID 51, ParentSchemaID 29: type "type" (52): validation: runtime error: invalid memory address or nil pointer dereference `, }, {