From 0f46ff6a0b85b22601b7d7eba4d9f59019e55146 Mon Sep 17 00:00:00 2001 From: Marius Posta Date: Thu, 18 Aug 2022 15:29:52 -0400 Subject: [PATCH 1/2] catalog: spin off and adopt back-reference validation level Previously, forward- and backward-reference-checking was done in one same validation phase, identified by the catalog.ValidationLevelCrossReferences constant and a similarly named method on catalog.Descriptor. This validation phase was exercised both when reading and when writing descriptors. The problem with this was that a corrupt back-reference would make a table unusable until descriptor surgery was performed. While this might be warranted in the context of schema changes, that's not the case when it comes to queries, which don't really care about these back-references. For this reason, we want to bypass back-reference validation when reading descriptors for the purpose of serving queries. These reads are characterized by the flag AvoidLeased being unset. To do this, this commit splits the cross-reference validation level into two. The back-reference level now exists to check the integrity of back-references directly, as well as to check that forward references also have matching back-references in the referenced descriptors. Fixes #85263. Release justification: low-risk, high benefit change Release note: None --- pkg/ccl/backupccl/restore_job.go | 65 ++++---- pkg/sql/catalog/dbdesc/database_desc.go | 17 ++- pkg/sql/catalog/dbdesc/database_test.go | 2 +- pkg/sql/catalog/descriptor.go | 7 +- pkg/sql/catalog/errors.go | 1 - pkg/sql/catalog/funcdesc/BUILD.bazel | 1 + pkg/sql/catalog/funcdesc/func_desc.go | 30 +++- pkg/sql/catalog/funcdesc/func_desc_test.go | 3 +- pkg/sql/catalog/internal/validate/validate.go | 27 +++- pkg/sql/catalog/nstree/catalog.go | 2 +- pkg/sql/catalog/schemadesc/schema_desc.go | 59 +++---- .../catalog/schemadesc/schema_desc_test.go | 2 +- .../schemadesc/synthetic_schema_desc.go | 6 +- pkg/sql/catalog/tabledesc/ttl.go | 1 + pkg/sql/catalog/tabledesc/validate.go | 144 ++++++++++++------ pkg/sql/catalog/tabledesc/validate_test.go | 2 +- .../typedesc/table_implicit_record_type.go | 10 +- pkg/sql/catalog/typedesc/type_desc.go | 42 +++-- pkg/sql/catalog/typedesc/type_desc_test.go | 2 +- pkg/sql/catalog/validate.go | 47 ++++-- .../testdata/logic_test/row_level_ttl | 10 +- .../testdata/logic_test/schema_repair | 114 +++++++++++++- pkg/sql/planner.go | 2 +- .../alter_table_alter_primary_key.go | 20 +-- .../scdeps/sctestdeps/test_deps.go | 8 +- pkg/sql/testdata/telemetry/error | 4 +- pkg/sql/tests/repair_test.go | 4 + 27 files changed, 449 insertions(+), 183 deletions(-) diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index c9ad3567361b..3239cbf05e31 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -2337,7 +2337,12 @@ func (r *restoreResumer) dropDescriptors( // Delete any schema descriptors that this restore created. Also collect the // descriptors so we can update their parent databases later. - dbsWithDeletedSchemas := make(map[descpb.ID][]catalog.Descriptor) + type dbWithDeletedSchemas struct { + db *dbdesc.Mutable + schemas []catalog.Descriptor + } + + dbsWithDeletedSchemas := make(map[descpb.ID]dbWithDeletedSchemas) for _, schemaDesc := range details.SchemaDescs { // We need to ignore descriptors we just added since we haven't committed the txn that deletes these. isSchemaEmpty, err := isSchemaEmpty(ctx, txn, schemaDesc.GetID(), allDescs, ignoredChildDescIDs) @@ -2354,49 +2359,53 @@ func (r *restoreResumer) dropDescriptors( if err != nil { return err } + entry, hasEntry := dbsWithDeletedSchemas[schemaDesc.GetParentID()] + if !hasEntry { + mutParent, err := descsCol.GetMutableDescriptorByID(ctx, txn, schemaDesc.GetParentID()) + if err != nil { + return err + } + entry.db = mutParent.(*dbdesc.Mutable) + } - // Mark schema as dropped and add uncommitted version to pass pre-txn - // descriptor validation. + // Delete schema entries in descriptor and namespace system tables. + b.Del(catalogkeys.EncodeNameKey(codec, mutSchema)) + b.Del(catalogkeys.MakeDescMetadataKey(codec, mutSchema.GetID())) + descsCol.NotifyOfDeletedDescriptor(mutSchema.GetID()) + // Add dropped descriptor as uncommitted to satisfy descriptor validation. mutSchema.SetDropped() mutSchema.MaybeIncrementVersion() if err := descsCol.AddUncommittedDescriptor(ctx, mutSchema); err != nil { return err } - b.Del(catalogkeys.EncodeNameKey(codec, mutSchema)) - b.Del(catalogkeys.MakeDescMetadataKey(codec, mutSchema.GetID())) - descsCol.NotifyOfDeletedDescriptor(mutSchema.GetID()) - dbsWithDeletedSchemas[mutSchema.GetParentID()] = append(dbsWithDeletedSchemas[mutSchema.GetParentID()], mutSchema) + // Remove the back-reference to the deleted schema in the parent database. + if schemaInfo, ok := entry.db.Schemas[schemaDesc.GetName()]; !ok { + log.Warningf(ctx, "unexpected missing schema entry for %s from db %d; skipping deletion", + schemaDesc.GetName(), entry.db.GetID()) + } else if schemaInfo.ID != schemaDesc.GetID() { + log.Warningf(ctx, "unexpected schema entry %d for %s from db %d, expecting %d; skipping deletion", + schemaInfo.ID, schemaDesc.GetName(), entry.db.GetID(), schemaDesc.GetID()) + } else { + delete(entry.db.Schemas, schemaDesc.GetName()) + } + + entry.schemas = append(entry.schemas, mutSchema) + dbsWithDeletedSchemas[entry.db.GetID()] = entry } // For each database that had a child schema deleted (regardless of whether // the db was created in the restore job), if it wasn't deleted just now, - // delete the now-deleted child schema from its schema map. + // write the updated descriptor with the now-deleted child schemas from its + // schema map. // // This cleanup must be done prior to dropping the database descriptors in the // loop below so that we do not accidentally `b.Put` the descriptor with the // modified schema slice after we have issued a `b.Del` to drop it. - for dbID, schemas := range dbsWithDeletedSchemas { - log.Infof(ctx, "deleting %d schema entries from database %d", len(schemas), dbID) - desc, err := descsCol.GetMutableDescriptorByID(ctx, txn, dbID) - if err != nil { - return err - } - db := desc.(*dbdesc.Mutable) - for _, sc := range schemas { - if schemaInfo, ok := db.Schemas[sc.GetName()]; !ok { - log.Warningf(ctx, "unexpected missing schema entry for %s from db %d; skipping deletion", - sc.GetName(), dbID) - } else if schemaInfo.ID != sc.GetID() { - log.Warningf(ctx, "unexpected schema entry %d for %s from db %d, expecting %d; skipping deletion", - schemaInfo.ID, sc.GetName(), dbID, sc.GetID()) - } else { - delete(db.Schemas, sc.GetName()) - } - } - + for dbID, entry := range dbsWithDeletedSchemas { + log.Infof(ctx, "deleting %d schema entries from database %d", len(entry.schemas), dbID) if err := descsCol.WriteDescToBatch( - ctx, false /* kvTrace */, db, b, + ctx, false /* kvTrace */, entry.db, b, ); err != nil { return err } diff --git a/pkg/sql/catalog/dbdesc/database_desc.go b/pkg/sql/catalog/dbdesc/database_desc.go index 1e8164666924..b6cf6f09284f 100644 --- a/pkg/sql/catalog/dbdesc/database_desc.go +++ b/pkg/sql/catalog/dbdesc/database_desc.go @@ -278,8 +278,8 @@ func (desc *immutable) GetReferencedDescIDs() (catalog.DescriptorIDSet, error) { return ids, nil } -// ValidateCrossReferences implements the catalog.Descriptor interface. -func (desc *immutable) ValidateCrossReferences( +// ValidateForwardReferences implements the catalog.Descriptor interface. +func (desc *immutable) ValidateForwardReferences( vea catalog.ValidationErrorAccumulator, vdg catalog.ValidationDescGetter, ) { // Check multi-region enum type. @@ -302,13 +302,11 @@ func (desc *immutable) ValidateCrossReferences( } } -// ValidateTxnCommit implements the catalog.Descriptor interface. -func (desc *immutable) ValidateTxnCommit( +// ValidateBackReferences implements the catalog.Descriptor interface. +func (desc *immutable) ValidateBackReferences( vea catalog.ValidationErrorAccumulator, vdg catalog.ValidationDescGetter, ) { // Check schema references. - // This could be done in ValidateCrossReferences but it can be quite expensive - // so we do it here instead. for schemaName, schemaInfo := range desc.Schemas { report := func(err error) { vea.Report(errors.Wrapf(err, "schema mapping entry %q (%d)", @@ -332,6 +330,13 @@ func (desc *immutable) ValidateTxnCommit( } } +// ValidateTxnCommit implements the catalog.Descriptor interface. +func (desc *immutable) ValidateTxnCommit( + vea catalog.ValidationErrorAccumulator, vdg catalog.ValidationDescGetter, +) { + // No-op. +} + // MaybeIncrementVersion implements the MutableDescriptor interface. func (desc *Mutable) MaybeIncrementVersion() { // Already incremented, no-op. diff --git a/pkg/sql/catalog/dbdesc/database_test.go b/pkg/sql/catalog/dbdesc/database_test.go index 25bf9c0dbdd0..48c230c37be7 100644 --- a/pkg/sql/catalog/dbdesc/database_test.go +++ b/pkg/sql/catalog/dbdesc/database_test.go @@ -279,7 +279,7 @@ func TestValidateCrossDatabaseReferences(t *testing.T) { return nil }) expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.DescriptorType(), desc.GetName(), desc.GetID(), test.err) - results := cb.Validate(ctx, clusterversion.TestingClusterVersion, catalog.NoValidationTelemetry, catalog.ValidationLevelAllPreTxnCommit, desc) + results := cb.Validate(ctx, clusterversion.TestingClusterVersion, catalog.NoValidationTelemetry, validate.Write, desc) if err := results.CombinedError(); err == nil { if test.err != "" { t.Errorf("%d: expected \"%s\", but found success: %+v", i, expectedErr, test.desc) diff --git a/pkg/sql/catalog/descriptor.go b/pkg/sql/catalog/descriptor.go index b4152e643ac9..de4c982b84f6 100644 --- a/pkg/sql/catalog/descriptor.go +++ b/pkg/sql/catalog/descriptor.go @@ -199,8 +199,11 @@ type Descriptor interface { // ValidateSelf checks the internal consistency of the descriptor. ValidateSelf(vea ValidationErrorAccumulator) - // ValidateCrossReferences performs cross-reference checks. - ValidateCrossReferences(vea ValidationErrorAccumulator, vdg ValidationDescGetter) + // ValidateForwardReferences performs forward-reference checks. + ValidateForwardReferences(vea ValidationErrorAccumulator, vdg ValidationDescGetter) + + // ValidateBackReferences performs back-reference checks. + ValidateBackReferences(vea ValidationErrorAccumulator, vdg ValidationDescGetter) // ValidateTxnCommit performs pre-commit checks. ValidateTxnCommit(vea ValidationErrorAccumulator, vdg ValidationDescGetter) diff --git a/pkg/sql/catalog/errors.go b/pkg/sql/catalog/errors.go index 1152ecf319e8..15515cadc384 100644 --- a/pkg/sql/catalog/errors.go +++ b/pkg/sql/catalog/errors.go @@ -165,7 +165,6 @@ func AsFunctionDescriptor(desc Descriptor) (FunctionDescriptor, error) { // WrapDescRefErr wraps an error pertaining to a descriptor id. func WrapDescRefErr(id descpb.ID, err error) error { return errors.Wrapf(err, "referenced descriptor ID %d", errors.Safe(id)) - } // WrapDatabaseDescRefErr wraps an error pertaining to a database descriptor id. diff --git a/pkg/sql/catalog/funcdesc/BUILD.bazel b/pkg/sql/catalog/funcdesc/BUILD.bazel index dc465c382f59..1a90ff48590f 100644 --- a/pkg/sql/catalog/funcdesc/BUILD.bazel +++ b/pkg/sql/catalog/funcdesc/BUILD.bazel @@ -45,6 +45,7 @@ go_test( "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/dbdesc", "//pkg/sql/catalog/descpb", + "//pkg/sql/catalog/internal/validate", "//pkg/sql/catalog/nstree", "//pkg/sql/catalog/schemadesc", "//pkg/sql/catalog/tabledesc", diff --git a/pkg/sql/catalog/funcdesc/func_desc.go b/pkg/sql/catalog/funcdesc/func_desc.go index dd54520b199d..1ae724e30d18 100644 --- a/pkg/sql/catalog/funcdesc/func_desc.go +++ b/pkg/sql/catalog/funcdesc/func_desc.go @@ -221,8 +221,8 @@ func (desc *immutable) ValidateSelf(vea catalog.ValidationErrorAccumulator) { } } -// ValidateCrossReferences implements the catalog.Descriptor interface. -func (desc *immutable) ValidateCrossReferences( +// ValidateForwardReferences implements the catalog.Descriptor interface. +func (desc *immutable) ValidateForwardReferences( vea catalog.ValidationErrorAccumulator, vdg catalog.ValidationDescGetter, ) { // Check that parent DB exists. @@ -240,14 +240,33 @@ func (desc *immutable) ValidateCrossReferences( } else if scDesc.Dropped() { vea.Report(errors.AssertionFailedf("parent schema %q (%d) is dropped", scDesc.GetName(), scDesc.GetID())) } - vea.Report(desc.validateFuncExistsInSchema(scDesc)) for _, depID := range desc.DependsOn { - vea.Report(catalog.ValidateOutboundTableRef(desc.ID, depID, vdg)) + vea.Report(catalog.ValidateOutboundTableRef(depID, vdg)) } for _, typeID := range desc.DependsOnTypes { - vea.Report(catalog.ValidateOutboundTypeRef(desc.ID, typeID, vdg)) + vea.Report(catalog.ValidateOutboundTypeRef(typeID, vdg)) + } +} + +// ValidateBackReferences implements the catalog.Descriptor interface. +func (desc *immutable) ValidateBackReferences( + vea catalog.ValidationErrorAccumulator, vdg catalog.ValidationDescGetter, +) { + // Check that function exists in parent schema. + if sc, err := vdg.GetSchemaDescriptor(desc.GetParentSchemaID()); err == nil { + vea.Report(desc.validateFuncExistsInSchema(sc)) + } + + for _, depID := range desc.DependsOn { + tbl, _ := vdg.GetTableDescriptor(depID) + vea.Report(catalog.ValidateOutboundTableRefBackReference(desc.GetID(), tbl)) + } + + for _, typeID := range desc.DependsOnTypes { + typ, _ := vdg.GetTypeDescriptor(typeID) + vea.Report(catalog.ValidateOutboundTypeRefBackReference(desc.GetID(), typ)) } // Currently, we don't support cross function references yet. @@ -255,7 +274,6 @@ func (desc *immutable) ValidateCrossReferences( for _, by := range desc.DependedOnBy { vea.Report(desc.validateInboundTableRef(by, vdg)) } - } func (desc *immutable) validateFuncExistsInSchema(scDesc catalog.SchemaDescriptor) error { diff --git a/pkg/sql/catalog/funcdesc/func_desc_test.go b/pkg/sql/catalog/funcdesc/func_desc_test.go index 2412347bf344..6260442f24f6 100644 --- a/pkg/sql/catalog/funcdesc/func_desc_test.go +++ b/pkg/sql/catalog/funcdesc/func_desc_test.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/funcdesc" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/internal/validate" "github.com/cockroachdb/cockroach/pkg/sql/catalog/nstree" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" @@ -459,7 +460,7 @@ func TestValidateFuncDesc(t *testing.T) { for i, test := range testData { desc := funcdesc.NewBuilder(&test.desc).BuildImmutable() expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.DescriptorType(), desc.GetName(), desc.GetID(), test.err) - ve := cb.Validate(ctx, clusterversion.TestingClusterVersion, catalog.NoValidationTelemetry, catalog.ValidationLevelCrossReferences, desc) + ve := cb.Validate(ctx, clusterversion.TestingClusterVersion, catalog.NoValidationTelemetry, validate.Write, desc) if err := ve.CombinedError(); err == nil { t.Errorf("#%d expected err: %s, but found nil: %v", i, expectedErr, test.desc) } else if expectedErr != err.Error() { diff --git a/pkg/sql/catalog/internal/validate/validate.go b/pkg/sql/catalog/internal/validate/validate.go index 0bd2e6403f4f..73231e725f61 100644 --- a/pkg/sql/catalog/internal/validate/validate.go +++ b/pkg/sql/catalog/internal/validate/validate.go @@ -25,11 +25,11 @@ import ( const ( // ImmutableRead is the default validation level when reading an immutable // descriptor. - ImmutableRead = catalog.ValidationLevelCrossReferences + ImmutableRead = catalog.ValidationLevelForwardReferences // MutableRead is the default validation level when reading a mutable // descriptor. - MutableRead = catalog.ValidationLevelCrossReferences + MutableRead = catalog.ValidationLevelBackReferences // Write is the default validation level when writing a descriptor. Write = catalog.ValidationLevelAllPreTxnCommit @@ -81,13 +81,24 @@ func Validate( vea.reportDescGetterError(collectingReferencedDescriptors, descGetterErr) return vea.errors } - // Descriptor cross-reference checks. + // Descriptor forward-reference checks. if !vea.validateDescriptorsAtLevel( - catalog.ValidationLevelCrossReferences, + catalog.ValidationLevelForwardReferences, descriptors, func(desc catalog.Descriptor) { if !desc.Dropped() { - desc.ValidateCrossReferences(&vea, vdg) + desc.ValidateForwardReferences(&vea, vdg) + } + }) { + return vea.errors + } + // Descriptor backward-reference checks. + if !vea.validateDescriptorsAtLevel( + catalog.ValidationLevelBackReferences, + descriptors, + func(desc catalog.Descriptor) { + if !desc.Dropped() { + desc.ValidateBackReferences(&vea, vdg) } }) { return vea.errors @@ -239,8 +250,10 @@ func (vea *validationErrorAccumulator) decorate(err error) error { switch vea.currentLevel { case catalog.ValidationLevelSelfOnly: tkSuffix = "self" - case catalog.ValidationLevelCrossReferences: - tkSuffix = "cross_references" + case catalog.ValidationLevelForwardReferences: + tkSuffix = "forward_references" + case catalog.ValidationLevelBackReferences: + tkSuffix = "backward_references" case catalog.ValidationLevelNamespace: tkSuffix = "namespace" case catalog.ValidationLevelAllPreTxnCommit: diff --git a/pkg/sql/catalog/nstree/catalog.go b/pkg/sql/catalog/nstree/catalog.go index 959a9322c9da..1a155dba15eb 100644 --- a/pkg/sql/catalog/nstree/catalog.go +++ b/pkg/sql/catalog/nstree/catalog.go @@ -199,7 +199,7 @@ func (c Catalog) ValidateWithRecover( ve = append(ve, err) } }() - return c.Validate(ctx, version, catalog.NoValidationTelemetry, catalog.ValidationLevelAllPreTxnCommit, desc) + return c.Validate(ctx, version, catalog.NoValidationTelemetry, validate.Write, desc) } // ByteSize returns memory usage of the underlying map in bytes. diff --git a/pkg/sql/catalog/schemadesc/schema_desc.go b/pkg/sql/catalog/schemadesc/schema_desc.go index 9475e808ff10..29050f46c8ba 100644 --- a/pkg/sql/catalog/schemadesc/schema_desc.go +++ b/pkg/sql/catalog/schemadesc/schema_desc.go @@ -218,8 +218,8 @@ func (desc *immutable) GetReferencedDescIDs() (catalog.DescriptorIDSet, error) { return ret, nil } -// ValidateCrossReferences implements the catalog.Descriptor interface. -func (desc *immutable) ValidateCrossReferences( +// ValidateForwardReferences implements the catalog.Descriptor interface. +func (desc *immutable) ValidateForwardReferences( vea catalog.ValidationErrorAccumulator, vdg catalog.ValidationDescGetter, ) { // Check schema parent reference. @@ -232,8 +232,38 @@ func (desc *immutable) ValidateCrossReferences( vea.Report(errors.AssertionFailedf("parent database %q (%d) is dropped", db.GetName(), db.GetID())) } +} + +// ValidateBackReferences implements the catalog.Descriptor interface. +func (desc *immutable) ValidateBackReferences( + vea catalog.ValidationErrorAccumulator, vdg catalog.ValidationDescGetter, +) { + // Check that parent database has correct entry in schemas mapping. + if db, err := vdg.GetDatabaseDescriptor(desc.GetParentID()); err == nil { + isInDBSchemas := false + _ = db.ForEachSchema(func(id descpb.ID, name string) error { + if id == desc.GetID() { + if name != desc.GetName() { + vea.Report(errors.AssertionFailedf("present in parent database [%d] schemas mapping but under name %q", + desc.GetParentID(), errors.Safe(name))) + return nil + } + isInDBSchemas = true + return nil + } + if name == desc.GetName() { + vea.Report(errors.AssertionFailedf("present in parent database [%d] schemas mapping but name maps to other schema [%d]", + desc.GetParentID(), id)) + } + return nil + }) + if !isInDBSchemas { + vea.Report(errors.AssertionFailedf("not present in parent database [%d] schemas mapping", + desc.GetParentID())) + } + } - // Check that all functions exist + // Check that all functions exist. for _, function := range desc.Functions { for _, overload := range function.Overloads { _, err := vdg.GetFunctionDescriptor(overload.ID) @@ -243,29 +273,6 @@ func (desc *immutable) ValidateCrossReferences( } } } - - // Check that parent has correct entry in schemas mapping. - isInDBSchemas := false - _ = db.ForEachSchema(func(id descpb.ID, name string) error { - if id == desc.GetID() { - if name != desc.GetName() { - vea.Report(errors.AssertionFailedf("present in parent database [%d] schemas mapping but under name %q", - desc.GetParentID(), errors.Safe(name))) - return nil - } - isInDBSchemas = true - return nil - } - if name == desc.GetName() { - vea.Report(errors.AssertionFailedf("present in parent database [%d] schemas mapping but name maps to other schema [%d]", - desc.GetParentID(), id)) - } - return nil - }) - if !isInDBSchemas { - vea.Report(errors.AssertionFailedf("not present in parent database [%d] schemas mapping", - desc.GetParentID())) - } } // ValidateTxnCommit implements the catalog.Descriptor interface. diff --git a/pkg/sql/catalog/schemadesc/schema_desc_test.go b/pkg/sql/catalog/schemadesc/schema_desc_test.go index b41d926a60fc..d41afa0d9b6c 100644 --- a/pkg/sql/catalog/schemadesc/schema_desc_test.go +++ b/pkg/sql/catalog/schemadesc/schema_desc_test.go @@ -232,7 +232,7 @@ func TestValidateCrossSchemaReferences(t *testing.T) { test.dbDesc.Privileges = privilege cb.UpsertDescriptorEntry(dbdesc.NewBuilder(&test.dbDesc).BuildImmutable()) expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.DescriptorType(), desc.GetName(), desc.GetID(), test.err) - const validateCrossReferencesOnly = catalog.ValidationLevelCrossReferences &^ (catalog.ValidationLevelCrossReferences >> 1) + const validateCrossReferencesOnly = catalog.ValidationLevelBackReferences &^ catalog.ValidationLevelSelfOnly results := cb.Validate(ctx, clusterversion.TestingClusterVersion, catalog.NoValidationTelemetry, validateCrossReferencesOnly, desc) if err := results.CombinedError(); err == nil { if test.err != "" { diff --git a/pkg/sql/catalog/schemadesc/synthetic_schema_desc.go b/pkg/sql/catalog/schemadesc/synthetic_schema_desc.go index 4f32bac2a41e..b28a8d231a8f 100644 --- a/pkg/sql/catalog/schemadesc/synthetic_schema_desc.go +++ b/pkg/sql/catalog/schemadesc/synthetic_schema_desc.go @@ -94,7 +94,11 @@ func (p synthetic) GetReferencedDescIDs() (catalog.DescriptorIDSet, error) { } func (p synthetic) ValidateSelf(_ catalog.ValidationErrorAccumulator) { } -func (p synthetic) ValidateCrossReferences( +func (p synthetic) ValidateForwardReferences( + _ catalog.ValidationErrorAccumulator, _ catalog.ValidationDescGetter, +) { +} +func (p synthetic) ValidateBackReferences( _ catalog.ValidationErrorAccumulator, _ catalog.ValidationDescGetter, ) { } diff --git a/pkg/sql/catalog/tabledesc/ttl.go b/pkg/sql/catalog/tabledesc/ttl.go index f4678125dfcc..6d4c07914e9f 100644 --- a/pkg/sql/catalog/tabledesc/ttl.go +++ b/pkg/sql/catalog/tabledesc/ttl.go @@ -139,6 +139,7 @@ func ValidateTTLExpirationColumn(desc catalog.TableDescriptor) error { ) } } + return nil } diff --git a/pkg/sql/catalog/tabledesc/validate.go b/pkg/sql/catalog/tabledesc/validate.go index 05b545522040..5995f5c937a5 100644 --- a/pkg/sql/catalog/tabledesc/validate.go +++ b/pkg/sql/catalog/tabledesc/validate.go @@ -123,9 +123,8 @@ func (desc *wrapper) GetReferencedDescIDs() (catalog.DescriptorIDSet, error) { return ids, nil } -// ValidateCrossReferences validates that each reference to another table is -// resolvable and that the necessary back references exist. -func (desc *wrapper) ValidateCrossReferences( +// ValidateForwardReferences implements the catalog.Descriptor interface. +func (desc *wrapper) ValidateForwardReferences( vea catalog.ValidationErrorAccumulator, vdg catalog.ValidationDescGetter, ) { // Check that parent DB exists. @@ -188,7 +187,7 @@ func (desc *wrapper) ValidateCrossReferences( } switch depDesc.DescriptorType() { case catalog.Table: - vea.Report(desc.validateOutboundTableRef(id, vdg)) + vea.Report(catalog.ValidateOutboundTableRef(id, vdg)) case catalog.Function: vea.Report(desc.validateOutboundFuncRef(id, vdg)) default: @@ -197,30 +196,21 @@ func (desc *wrapper) ValidateCrossReferences( } } - for _, by := range desc.DependedOnBy { - depDesc, err := vdg.GetDescriptor(by.ID) - if err != nil { - vea.Report(errors.NewAssertionErrorWithWrappedErrf(err, "invalid depended-on-by relation back reference")) - continue - } - switch depDesc.DescriptorType() { - case catalog.Table: - vea.Report(desc.validateInboundTableRef(by, vdg)) - case catalog.Function: - vea.Report(desc.validateInboundFunctionRef(by, vdg)) - default: - vea.Report(errors.AssertionFailedf("table is depended on by unexpected %s %s (%d)", - depDesc.DescriptorType(), depDesc.GetName(), depDesc.GetID())) - } - } - - // For row-level TTL, no FKs are allowed. + // Row-level TTL is not compatible with foreign keys. + // This check should be in ValidateSelf but interferes with AllocateIDs. if desc.HasRowLevelTTL() { - if len(desc.OutboundFKs) > 0 || len(desc.InboundFKs) > 0 { + if len(desc.OutboundFKs) > 0 { + vea.Report(unimplemented.NewWithIssuef( + 76407, + `foreign keys from table with TTL %q are not permitted`, + desc.GetName(), + )) + } + if len(desc.InboundFKs) > 0 { vea.Report(unimplemented.NewWithIssuef( 76407, - `foreign keys to/from table with TTL "%s" are not permitted`, - desc.Name, + `foreign keys to table with TTL %q are not permitted`, + desc.GetName(), )) } } @@ -229,9 +219,6 @@ func (desc *wrapper) ValidateCrossReferences( for i := range desc.OutboundFKs { vea.Report(desc.validateOutboundFK(&desc.OutboundFKs[i], vdg)) } - for i := range desc.InboundFKs { - vea.Report(desc.validateInboundFK(&desc.InboundFKs[i], vdg)) - } // Check partitioning is correctly set. // We only check these for active indexes, as inactive indexes may be in the @@ -250,10 +237,63 @@ func (desc *wrapper) ValidateCrossReferences( } } -func (desc *wrapper) validateOutboundTableRef( - id descpb.ID, vdg catalog.ValidationDescGetter, -) error { - return catalog.ValidateOutboundTableRef(desc.GetID(), id, vdg) +// ValidateBackReferences implements the catalog.Descriptor interface. +func (desc *wrapper) ValidateBackReferences( + vea catalog.ValidationErrorAccumulator, vdg catalog.ValidationDescGetter, +) { + // Check that outbound foreign keys have matching back-references. + for i := range desc.OutboundFKs { + vea.Report(desc.validateOutboundFKBackReference(&desc.OutboundFKs[i], vdg)) + } + + // Check foreign key back-references. + for i := range desc.InboundFKs { + vea.Report(desc.validateInboundFK(&desc.InboundFKs[i], vdg)) + } + + // For views, check dependent relations. + if desc.IsView() { + for _, id := range desc.DependsOnTypes { + typ, _ := vdg.GetTypeDescriptor(id) + vea.Report(desc.validateOutboundTypeRefBackReference(typ)) + } + } + + for _, id := range desc.DependsOn { + ref, _ := vdg.GetDescriptor(id) + if ref == nil { + // Don't follow up on backward references for invalid or irrelevant + // forward references. + continue + } + switch refDesc := ref.(type) { + case catalog.TableDescriptor: + vea.Report(catalog.ValidateOutboundTableRefBackReference(desc.GetID(), refDesc)) + case catalog.FunctionDescriptor: + vea.Report(desc.validateOutboundFuncRefBackReference(refDesc)) + } + } + + // Check relation back-references to relations and functions. + for _, by := range desc.DependedOnBy { + depDesc, err := vdg.GetDescriptor(by.ID) + if err != nil { + vea.Report(errors.NewAssertionErrorWithWrappedErrf(err, "invalid depended-on-by relation back reference")) + continue + } + switch depDesc.DescriptorType() { + case catalog.Table: + // If this is a table, it may be referenced by a view, otherwise if this + // is a sequence, then it may be also be referenced by a table. + vea.Report(desc.validateInboundTableRef(by, vdg)) + case catalog.Function: + // This relation may be referenced by a function. + vea.Report(desc.validateInboundFunctionRef(by, vdg)) + default: + vea.Report(errors.AssertionFailedf("table is depended on by unexpected %s %s (%d)", + depDesc.DescriptorType(), depDesc.GetName(), depDesc.GetID())) + } + } } func (desc *wrapper) validateOutboundTypeRef(id descpb.ID, vdg catalog.ValidationDescGetter) error { @@ -265,25 +305,30 @@ func (desc *wrapper) validateOutboundTypeRef(id descpb.ID, vdg catalog.Validatio return errors.AssertionFailedf("depends-on type %q (%d) is dropped", typ.GetName(), typ.GetID()) } + return nil +} + +func (desc *wrapper) validateOutboundTypeRefBackReference(ref catalog.TypeDescriptor) error { // TODO(postamar): maintain back-references in type, and validate these. - // use catalog.ValidateOutboundTypeRef function. return nil } func (desc *wrapper) validateOutboundFuncRef(id descpb.ID, vdg catalog.ValidationDescGetter) error { - refFunc, err := vdg.GetFunctionDescriptor(id) + _, err := vdg.GetFunctionDescriptor(id) if err != nil { return errors.NewAssertionErrorWithWrappedErrf(err, "invalid depends-on function back reference") } + return nil +} - for _, dep := range refFunc.GetDependedOnBy() { - if dep.ID == id { +func (desc *wrapper) validateOutboundFuncRefBackReference(ref catalog.FunctionDescriptor) error { + for _, dep := range ref.GetDependedOnBy() { + if dep.ID == desc.GetID() { return nil } } - return errors.AssertionFailedf("depends-on function %q (%d) has no corresponding depended-on-by back reference", - refFunc.GetName(), refFunc.GetID()) + ref.GetName(), ref.GetID()) } func (desc *wrapper) validateInboundFunctionRef( @@ -377,6 +422,19 @@ func (desc *wrapper) validateOutboundFK( return errors.AssertionFailedf("referenced table %q (%d) is dropped", referencedTable.GetName(), referencedTable.GetID()) } + return nil +} + +func (desc *wrapper) validateOutboundFKBackReference( + fk *descpb.ForeignKeyConstraint, vdg catalog.ValidationDescGetter, +) error { + referencedTable, _ := vdg.GetTableDescriptor(fk.ReferencedTableID) + if referencedTable == nil || referencedTable.Dropped() { + // Don't follow up on backward references for invalid or irrelevant forward + // references. + return nil + } + found := false _ = referencedTable.ForeachInboundFK(func(backref *descpb.ForeignKeyConstraint) error { if !found && backref.OriginTableID == desc.ID && backref.Name == fk.Name { @@ -746,14 +804,8 @@ func (desc *wrapper) ValidateSelf(vea catalog.ValidationErrorAccumulator) { // because it can only be called on an initialized table descriptor. // ValidateRowLevelTTL is also used before the table descriptor is fully // initialized to validate the storage parameters. - if err := ValidateTTLExpirationExpr(desc); err != nil { - vea.Report(err) - return - } - if err := ValidateTTLExpirationColumn(desc); err != nil { - vea.Report(err) - return - } + vea.Report(ValidateTTLExpirationExpr(desc)) + vea.Report(ValidateTTLExpirationColumn(desc)) // Validate that there are no column with both a foreign key ON UPDATE and an // ON UPDATE expression. This check is made to ensure that we know which ON diff --git a/pkg/sql/catalog/tabledesc/validate_test.go b/pkg/sql/catalog/tabledesc/validate_test.go index cab4fbe9d1aa..dcc38756403a 100644 --- a/pkg/sql/catalog/tabledesc/validate_test.go +++ b/pkg/sql/catalog/tabledesc/validate_test.go @@ -2718,7 +2718,7 @@ func TestValidateCrossTableReferences(t *testing.T) { desc := NewBuilder(&test.desc).BuildImmutable() cb.UpsertDescriptorEntry(funcdesc.NewBuilder(&descpb.FunctionDescriptor{ID: 100, Name: "f"}).BuildImmutable()) expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.DescriptorType(), desc.GetName(), desc.GetID(), test.err) - const validateCrossReferencesOnly = catalog.ValidationLevelCrossReferences &^ (catalog.ValidationLevelCrossReferences >> 1) + const validateCrossReferencesOnly = catalog.ValidationLevelBackReferences &^ catalog.ValidationLevelSelfOnly results := cb.Validate(ctx, clusterversion.TestingClusterVersion, catalog.NoValidationTelemetry, validateCrossReferencesOnly, desc) if err := results.CombinedError(); err == nil { if test.err != "" { diff --git a/pkg/sql/catalog/typedesc/table_implicit_record_type.go b/pkg/sql/catalog/typedesc/table_implicit_record_type.go index bab12606f093..55fe563ef83f 100644 --- a/pkg/sql/catalog/typedesc/table_implicit_record_type.go +++ b/pkg/sql/catalog/typedesc/table_implicit_record_type.go @@ -204,8 +204,14 @@ func (v TableImplicitRecordType) GetReferencedDescIDs() (catalog.DescriptorIDSet func (v TableImplicitRecordType) ValidateSelf(_ catalog.ValidationErrorAccumulator) { } -// ValidateCrossReferences implements the Descriptor interface. -func (v TableImplicitRecordType) ValidateCrossReferences( +// ValidateForwardReferences implements the Descriptor interface. +func (v TableImplicitRecordType) ValidateForwardReferences( + _ catalog.ValidationErrorAccumulator, _ catalog.ValidationDescGetter, +) { +} + +// ValidateBackReferences implements the Descriptor interface. +func (v TableImplicitRecordType) ValidateBackReferences( _ catalog.ValidationErrorAccumulator, _ catalog.ValidationDescGetter, ) { } diff --git a/pkg/sql/catalog/typedesc/type_desc.go b/pkg/sql/catalog/typedesc/type_desc.go index 610e4c53a1b9..791424903b8b 100644 --- a/pkg/sql/catalog/typedesc/type_desc.go +++ b/pkg/sql/catalog/typedesc/type_desc.go @@ -606,8 +606,8 @@ func (desc *immutable) GetReferencedDescIDs() (catalog.DescriptorIDSet, error) { return ids, nil } -// ValidateCrossReferences performs cross reference checks on the type descriptor. -func (desc *immutable) ValidateCrossReferences( +// ValidateForwardReferences implements the catalog.Descriptor interface. +func (desc *immutable) ValidateForwardReferences( vea catalog.ValidationErrorAccumulator, vdg catalog.ValidationDescGetter, ) { // Validate the parentID. @@ -638,27 +638,37 @@ func (desc *immutable) ValidateCrossReferences( desc.validateMultiRegion(dbDesc, vea) } - // Validate that the referenced types exist. + // Validate that the forward-referenced types exist. + if desc.GetKind() == descpb.TypeDescriptor_ALIAS && desc.GetAlias().UserDefined() { + aliasedID, err := UserDefinedTypeOIDToID(desc.GetAlias().Oid()) + if err != nil { + vea.Report(err) + } + if typ, err := vdg.GetTypeDescriptor(aliasedID); err != nil { + vea.Report(errors.Wrapf(err, "aliased type %d does not exist", aliasedID)) + } else if typ.Dropped() { + vea.Report(errors.AssertionFailedf("aliased type %q (%d) is dropped", typ.GetName(), typ.GetID())) + } + } +} + +// ValidateBackReferences implements the catalog.Descriptor interface. +func (desc *immutable) ValidateBackReferences( + vea catalog.ValidationErrorAccumulator, vdg catalog.ValidationDescGetter, +) { + + // Validate that the backward-referenced types exist. switch desc.GetKind() { case descpb.TypeDescriptor_ENUM, descpb.TypeDescriptor_MULTIREGION_ENUM: - // Ensure that the referenced array type exists. + // Ensure that the array type exists. + // This is considered to be a backward reference, not a forward reference, + // as the element type doesn't need the array type to exist, but the + // converse is not true. if typ, err := vdg.GetTypeDescriptor(desc.GetArrayTypeID()); err != nil { vea.Report(errors.Wrapf(err, "arrayTypeID %d does not exist for %q", desc.GetArrayTypeID(), desc.GetKind())) } else if typ.Dropped() { vea.Report(errors.AssertionFailedf("array type %q (%d) is dropped", typ.GetName(), typ.GetID())) } - case descpb.TypeDescriptor_ALIAS: - if desc.GetAlias().UserDefined() { - aliasedID, err := UserDefinedTypeOIDToID(desc.GetAlias().Oid()) - if err != nil { - vea.Report(err) - } - if typ, err := vdg.GetTypeDescriptor(aliasedID); err != nil { - vea.Report(errors.Wrapf(err, "aliased type %d does not exist", aliasedID)) - } else if typ.Dropped() { - vea.Report(errors.AssertionFailedf("aliased type %q (%d) is dropped", typ.GetName(), typ.GetID())) - } - } } // Validate that all of the referencing descriptors exist. diff --git a/pkg/sql/catalog/typedesc/type_desc_test.go b/pkg/sql/catalog/typedesc/type_desc_test.go index 4bf946d4608d..268967e4acab 100644 --- a/pkg/sql/catalog/typedesc/type_desc_test.go +++ b/pkg/sql/catalog/typedesc/type_desc_test.go @@ -792,7 +792,7 @@ func TestValidateTypeDesc(t *testing.T) { for i, test := range testData { desc := typedesc.NewBuilder(&test.desc).BuildImmutable() expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.DescriptorType(), desc.GetName(), desc.GetID(), test.err) - ve := cb.Validate(ctx, clusterversion.TestingClusterVersion, catalog.NoValidationTelemetry, catalog.ValidationLevelCrossReferences, desc) + ve := cb.Validate(ctx, clusterversion.TestingClusterVersion, catalog.NoValidationTelemetry, catalog.ValidationLevelBackReferences, desc) if err := ve.CombinedError(); err == nil { t.Errorf("#%d expected err: %s but found nil: %v", i, expectedErr, test.desc) } else if expectedErr != err.Error() { diff --git a/pkg/sql/catalog/validate.go b/pkg/sql/catalog/validate.go index 5b4dc491b553..81e9caa543b3 100644 --- a/pkg/sql/catalog/validate.go +++ b/pkg/sql/catalog/validate.go @@ -27,9 +27,12 @@ const ( NoValidation ValidationLevel = 0 // ValidationLevelSelfOnly means only validate internal descriptor consistency. ValidationLevelSelfOnly ValidationLevel = 1<<(iota+1) - 1 - // ValidationLevelCrossReferences means do the above and also check - // cross-references. - ValidationLevelCrossReferences + // ValidationLevelForwardReferences means do the above and also check + // forward references. + ValidationLevelForwardReferences + // ValidationLevelBackReferences means do the above and also check + // backward references. + ValidationLevelBackReferences // ValidationLevelNamespace means do the above and also check namespace // table records. ValidationLevelNamespace @@ -120,7 +123,7 @@ type ValidationDescGetter interface { // ValidateOutboundTableRef validates outbound reference to relation descriptor // depID from descriptor selfID. -func ValidateOutboundTableRef(selfID descpb.ID, depID descpb.ID, vdg ValidationDescGetter) error { +func ValidateOutboundTableRef(depID descpb.ID, vdg ValidationDescGetter) error { referencedTable, err := vdg.GetTableDescriptor(depID) if err != nil { return errors.NewAssertionErrorWithWrappedErrf(err, "invalid depends-on relation reference") @@ -129,21 +132,29 @@ func ValidateOutboundTableRef(selfID descpb.ID, depID descpb.ID, vdg ValidationD return errors.AssertionFailedf("depends-on relation %q (%d) is dropped", referencedTable.GetName(), referencedTable.GetID()) } - for _, by := range referencedTable.TableDesc().DependedOnBy { + return nil +} + +// ValidateOutboundTableRefBackReference validates that the outbound reference +// to a table descriptor has a matching back-reference. +func ValidateOutboundTableRefBackReference(selfID descpb.ID, ref TableDescriptor) error { + if ref == nil || ref.Dropped() { + // Don't follow up on backward references for invalid or irrelevant forward + // references. + return nil + } + for _, by := range ref.TableDesc().DependedOnBy { if by.ID == selfID { return nil } } return errors.AssertionFailedf("depends-on relation %q (%d) has no corresponding depended-on-by back reference", - referencedTable.GetName(), referencedTable.GetID()) + ref.GetName(), ref.GetID()) } -// ValidateOutboundTypeRef validates outbound reference to type descriptor -// depTypeID from descriptor selfID. -func ValidateOutboundTypeRef( - selfID descpb.ID, depTypeID descpb.ID, vdg ValidationDescGetter, -) error { - typ, err := vdg.GetTypeDescriptor(depTypeID) +// ValidateOutboundTypeRef validates outbound reference to type descriptor. +func ValidateOutboundTypeRef(typeID descpb.ID, vdg ValidationDescGetter) error { + typ, err := vdg.GetTypeDescriptor(typeID) if err != nil { return errors.NewAssertionErrorWithWrappedErrf(err, "invalid depends-on type reference") } @@ -151,13 +162,23 @@ func ValidateOutboundTypeRef( return errors.AssertionFailedf("depends-on type %q (%d) is dropped", typ.GetName(), typ.GetID()) } + return nil +} + +// ValidateOutboundTypeRefBackReference validates that the outbound reference +// to a type descriptor has a matching back-reference. +func ValidateOutboundTypeRefBackReference(selfID descpb.ID, typ TypeDescriptor) error { + if typ == nil || typ.Dropped() { + // Don't follow up on backward references for invalid or irrelevant forward + // references. + return nil + } for ord := 0; ord < typ.NumReferencingDescriptors(); ord++ { if typ.GetReferencingDescriptorID(ord) == selfID { return nil } } - return errors.AssertionFailedf("depends-on type %q (%d) has no corresponding referencing-descriptor back references", typ.GetName(), typ.GetID()) } diff --git a/pkg/sql/logictest/testdata/logic_test/row_level_ttl b/pkg/sql/logictest/testdata/logic_test/row_level_ttl index a14388b28d04..fce5bd83ac0a 100644 --- a/pkg/sql/logictest/testdata/logic_test/row_level_ttl +++ b/pkg/sql/logictest/testdata/logic_test/row_level_ttl @@ -791,25 +791,25 @@ ALTER TABLE tbl SET (ttl_expire_after = '10 minutes') statement ok CREATE TABLE ref_table (id INT PRIMARY KEY, ref INT) -statement error foreign keys to/from table with TTL "ttl_table" are not permitted +statement error foreign keys from table with TTL "ttl_table" are not permitted CREATE TABLE ttl_table (id INT PRIMARY KEY, ref INT REFERENCES ref_table(id)) WITH (ttl_expire_after = '10 mins') statement ok CREATE TABLE ttl_table (id INT PRIMARY KEY, ref INT) WITH (ttl_expire_after = '10 mins') -statement error foreign keys to/from table with TTL "ttl_table" are not permitted +statement error foreign keys to table with TTL "ttl_table" are not permitted CREATE TABLE new_ref_table (id INT PRIMARY KEY, ref INT REFERENCES ttl_table(id)) -statement error foreign keys to/from table with TTL "ttl_table" are not permitted +statement error foreign keys to table with TTL "ttl_table" are not permitted ALTER TABLE ref_table ADD CONSTRAINT fk FOREIGN KEY (ref) REFERENCES ttl_table (id) -statement error foreign keys to/from table with TTL "ttl_table" are not permitted +statement error foreign keys from table with TTL "ttl_table" are not permitted ALTER TABLE ttl_table ADD CONSTRAINT fk FOREIGN KEY (ref) REFERENCES ttl_table (id) statement ok CREATE TABLE ttl_become_table (id INT PRIMARY KEY, ref INT REFERENCES ref_table (id)) -statement error foreign keys to/from table with TTL "ttl_become_table" are not permitted +statement error foreign keys from table with TTL "ttl_become_table" are not permitted ALTER TABLE ttl_become_table SET (ttl_expire_after = '10 minutes') # Check non-ascending PKs are not permitted. diff --git a/pkg/sql/logictest/testdata/logic_test/schema_repair b/pkg/sql/logictest/testdata/logic_test/schema_repair index b07eac45bbf2..2f9ea8013805 100644 --- a/pkg/sql/logictest/testdata/logic_test/schema_repair +++ b/pkg/sql/logictest/testdata/logic_test/schema_repair @@ -1,4 +1,4 @@ -subtest lost-table-data +subtest lost_table_data statement ok CREATE TABLE corruptdesc (v INT8) @@ -117,7 +117,7 @@ statement ok SELECT * FROM corruptdesc; # Test the crdb_internal.force_delete_table_data function -subtest force-delete-data +subtest force_delete_data statement ok CREATE TABLE forcedeletemydata (v int) @@ -187,3 +187,113 @@ select * from crdb_internal.unsafe_upsert_descriptor($t_id, crdb_internal.json_t query I SELECT * FROM forcedeletemydata ORDER BY v ASC ---- + +# Test that corrupt back-references should not prevent objects from being queried. +subtest queryable_despite_corrupt_back_refs + +statement ok +CREATE TABLE corrupt_backref_fk (k INT PRIMARY KEY, v STRING); +INSERT INTO corrupt_backref_fk (k, v) VALUES (1, 'a'); +CREATE TABLE corrupt_fk (k INT NOT NULL, FOREIGN KEY (k) REFERENCES corrupt_backref_fk (k)); + +query BB +SELECT + crdb_internal.unsafe_delete_descriptor(id), + crdb_internal.unsafe_delete_namespace_entry("parentID", "parentSchemaID", name, id) +FROM + system.namespace +WHERE + name = 'corrupt_fk' +---- +true true + +query IT +SELECT * FROM corrupt_backref_fk +---- +1 a + +statement error invalid foreign key backreference +DROP TABLE corrupt_backref_fk + +statement ok +CREATE TABLE corrupt_backref_view (k INT PRIMARY KEY, v STRING); +INSERT INTO corrupt_backref_view (k, v) VALUES (1, 'a'); +CREATE VIEW corrupt_view AS SELECT k, v FROM corrupt_backref_view + +query BB +SELECT + crdb_internal.unsafe_delete_descriptor(id), + crdb_internal.unsafe_delete_namespace_entry("parentID", "parentSchemaID", name, id) +FROM + system.namespace +WHERE + name = 'corrupt_view' +---- +true true + +query IT +SELECT * FROM corrupt_backref_view +---- +1 a + +statement error pgcode XX000 invalid depended-on-by relation back reference +DROP TABLE corrupt_backref_view + +statement ok +CREATE TYPE corrupt_backref_typ AS ENUM ('a', 'b'); +CREATE TABLE corrupt_typ (k INT PRIMARY KEY, v corrupt_backref_typ); + +query BB +SELECT + crdb_internal.unsafe_delete_descriptor(id), + crdb_internal.unsafe_delete_namespace_entry("parentID", "parentSchemaID", name, id) +FROM + system.namespace +WHERE + name = 'corrupt_typ' +---- +true true + +query T +SELECT 'a'::corrupt_backref_typ +---- +a + +statement error pgcode XXUUU referenced descriptor not found +ALTER TYPE corrupt_backref_typ DROP VALUE 'b' + +# This is required to pass the validation tests when the logic test completes. +subtest cleanup + +query TB +SELECT + name, + crdb_internal.unsafe_delete_descriptor(id, true) +FROM + system.namespace +WHERE + name LIKE '%corrupt_backref_%' +ORDER BY + name +---- +_corrupt_backref_typ true +corrupt_backref_fk true +corrupt_backref_typ true +corrupt_backref_view true + +query TBB +SELECT + name, + crdb_internal.force_delete_table_data(id), + crdb_internal.unsafe_delete_namespace_entry("parentID", "parentSchemaID", name, id) +FROM + system.namespace +WHERE + name LIKE '%corrupt_backref_%' +ORDER BY + name +---- +_corrupt_backref_typ true true +corrupt_backref_fk true true +corrupt_backref_typ true true +corrupt_backref_view true true diff --git a/pkg/sql/planner.go b/pkg/sql/planner.go index 36e791c46061..1350fc1c1e28 100644 --- a/pkg/sql/planner.go +++ b/pkg/sql/planner.go @@ -883,7 +883,7 @@ func validateDescriptor(ctx context.Context, p *planner, descriptor catalog.Desc ctx, p.Txn(), catalog.NoValidationTelemetry, - catalog.ValidationLevelCrossReferences, + catalog.ValidationLevelBackReferences, descriptor, ) } diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_primary_key.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_primary_key.go index 6d2d6c5eb6f8..c7461196a715 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_primary_key.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_primary_key.go @@ -370,8 +370,7 @@ func fallBackIfRegionalByRowTable(b BuildCtx, t alterPrimaryKeySpec, tableID cat // error if the table is a (row-level-ttl table && (it has a descending // key column || it has any inbound/outbound FK constraint)). func fallBackIfDescColInRowLevelTTLTables(b BuildCtx, tableID catid.DescID, t alterPrimaryKeySpec) { - _, _, rowLevelTTLElem := scpb.FindRowLevelTTL(b.QueryByID(tableID)) - if rowLevelTTLElem == nil { + if _, _, rowLevelTTLElem := scpb.FindRowLevelTTL(b.QueryByID(tableID)); rowLevelTTLElem == nil { return } @@ -384,18 +383,15 @@ func fallBackIfDescColInRowLevelTTLTables(b BuildCtx, tableID catid.DescID, t al } _, _, ns := scpb.FindNamespace(b.QueryByID(tableID)) - hasFKConstraintError := scerrors.NotImplementedErrorf(t.n, - `foreign keys to/from table with TTL %q are not permitted`, ns.Name) // Panic if there is any inbound/outbound FK constraints. - _, _, inboundFKElem := scpb.FindForeignKeyConstraint(b.BackReferences(tableID)) - if inboundFKElem != nil { - panic(hasFKConstraintError) + if _, _, inboundFKElem := scpb.FindForeignKeyConstraint(b.BackReferences(tableID)); inboundFKElem != nil { + panic(scerrors.NotImplementedErrorf(t.n, + `foreign keys to table with TTL %q are not permitted`, ns.Name)) + } + if _, _, outboundFKElem := scpb.FindForeignKeyConstraint(b.QueryByID(tableID)); outboundFKElem != nil { + panic(scerrors.NotImplementedErrorf(t.n, + `foreign keys from table with TTL %q are not permitted`, ns.Name)) } - scpb.ForEachForeignKeyConstraint(b.QueryByID(tableID), func( - current scpb.Status, target scpb.TargetStatus, e *scpb.ForeignKeyConstraint, - ) { - panic(hasFKConstraintError) - }) } func mustRetrievePrimaryIndexElement(b BuildCtx, tableID catid.DescID) (res *scpb.PrimaryIndex) { diff --git a/pkg/sql/schemachanger/scdeps/sctestdeps/test_deps.go b/pkg/sql/schemachanger/scdeps/sctestdeps/test_deps.go index 4adb6122152d..b0f0b9255d0e 100644 --- a/pkg/sql/schemachanger/scdeps/sctestdeps/test_deps.go +++ b/pkg/sql/schemachanger/scdeps/sctestdeps/test_deps.go @@ -787,7 +787,13 @@ func (b *testCatalogChangeBatcher) ValidateAndRun(ctx context.Context) error { for _, deletedID := range b.zoneConfigsToDelete.Ordered() { b.s.LogSideEffectf("deleting zone config for #%d", deletedID) } - ve := b.s.uncommitted.Validate(ctx, clusterversion.TestingClusterVersion, catalog.NoValidationTelemetry, catalog.ValidationLevelAllPreTxnCommit, b.descs...) + ve := b.s.uncommitted.Validate( + ctx, + clusterversion.TestingClusterVersion, + catalog.NoValidationTelemetry, + catalog.ValidationLevelAllPreTxnCommit, + b.descs..., + ) return ve.CombinedError() } diff --git a/pkg/sql/testdata/telemetry/error b/pkg/sql/testdata/telemetry/error index 1884c66fa529..bbc50567cd51 100644 --- a/pkg/sql/testdata/telemetry/error +++ b/pkg/sql/testdata/telemetry/error @@ -41,11 +41,11 @@ SELECT crdb_internal.unsafe_upsert_descriptor(id, crdb_internal.json_to_pb('desc # Table descriptor validation failure on read. feature-usage -SELECT * FROM tbl; +DROP TABLE tbl CASCADE; ---- error: pq: internal error: relation "tbl" (...): missing fk back reference "tbl_customer_fkey" to "tbl" from "fktbl" errorcodes.XX000 -sql.schema.validation_errors.read.cross_references.relation +sql.schema.validation_errors.read.backward_references.relation exec CREATE TYPE greeting AS ENUM('hello', 'hi'); diff --git a/pkg/sql/tests/repair_test.go b/pkg/sql/tests/repair_test.go index f4d46199ee72..1febbe46b3b9 100644 --- a/pkg/sql/tests/repair_test.go +++ b/pkg/sql/tests/repair_test.go @@ -902,10 +902,14 @@ func TestCorruptDescriptorRepair(t *testing.T) { // back-reference. tdb.Exec(t, `CREATE DATABASE testdb`) tdb.Exec(t, `CREATE TABLE testdb.parent (k INT PRIMARY KEY, v STRING)`) + tdb.Exec(t, `INSERT INTO testdb.parent (k, v) VALUES (1, 'a')`) tdb.Exec(t, `CREATE TABLE testdb.child (k INT NOT NULL, FOREIGN KEY (k) REFERENCES testdb.parent (k))`) tdb.Exec(t, `SELECT crdb_internal.unsafe_delete_descriptor(id) FROM system.namespace WHERE name = 'child'`) tdb.Exec(t, `SELECT crdb_internal.unsafe_delete_namespace_entry("parentID", "parentSchemaID", name, id) FROM system.namespace WHERE name = 'child'`) + // Querying the table should succeed in spite of the dangling back-reference. + tdb.CheckQueryResults(t, `SELECT * FROM testdb.parent`, [][]string{{"1", "a"}}) + // Dropping the table should fail, because the table descriptor will fail // the validation checks when being read from storage. tdb.ExpectErr(t, "invalid foreign key backreference", `DROP TABLE testdb.parent`) From 1a66a9bb4583a9bda5344f783dc490cb28cbfcae Mon Sep 17 00:00:00 2001 From: Marius Posta Date: Fri, 26 Aug 2022 15:47:39 -0400 Subject: [PATCH 2/2] backupccl: clean up data-driven test harness Previously, it swallowed all kinds of errors, making debugging painful. Release justification: test-only change Release note: None --- pkg/ccl/backupccl/datadriven_test.go | 223 ++++++--------------------- 1 file changed, 51 insertions(+), 172 deletions(-) diff --git a/pkg/ccl/backupccl/datadriven_test.go b/pkg/ccl/backupccl/datadriven_test.go index 9b37fb44471b..a8f4ca2193f5 100644 --- a/pkg/ccl/backupccl/datadriven_test.go +++ b/pkg/ccl/backupccl/datadriven_test.go @@ -388,6 +388,49 @@ func TestDataDriven(t *testing.T) { ds := newDatadrivenTestState() defer ds.cleanup(ctx) datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string { + + execWithTagAndPausePoint := func(jobType jobspb.Type) string { + const user = "root" + sqlDB := ds.getSQLDB(t, lastCreatedServer, user) + // First, run the schema change. + + _, err := sqlDB.Exec(d.Input) + + var jobID jobspb.JobID + { + const qFmt = `SELECT job_id FROM [SHOW JOBS] WHERE job_type = '%s' ORDER BY created DESC LIMIT 1` + errJob := sqlDB.QueryRow(fmt.Sprintf(qFmt, jobType)).Scan(&jobID) + if !errors.Is(errJob, gosql.ErrNoRows) { + require.NoError(t, errJob) + } + require.NotZerof(t, jobID, "job not found for %q: %+v", d.Input, err) + } + + // Tag the job. + if d.HasArg("tag") { + var jobTag string + d.ScanArgs(t, "tag", &jobTag) + if _, exists := ds.jobTags[jobTag]; exists { + t.Fatalf("failed to `tag`, job with tag %s already exists", jobTag) + } + ds.jobTags[jobTag] = jobID + } + + // Check if we expect a pausepoint error. + if d.HasArg("expect-pausepoint") { + // Check if we are expecting a pausepoint error. + require.NotNilf(t, err, "expected pause point error") + require.Regexp(t, "pause point .* hit$", err.Error()) + jobutils.WaitForJobToPause(t, sqlutils.MakeSQLRunner(sqlDB), jobID) + ret := append(ds.noticeBuffer, "job paused at pausepoint") + return strings.Join(ret, "\n") + } + + // All other errors are bad. + require.NoError(t, err) + return "" + } + for v := range ds.vars { d.Input = strings.Replace(d.Input, v, ds.vars[v], -1) d.Expected = strings.Replace(d.Expected, v, ds.vars[v], -1) @@ -608,58 +651,12 @@ func TestDataDriven(t *testing.T) { return "" case "backup": - server := lastCreatedServer - user := "root" - jobType := "BACKUP" - - // First, run the backup. - _, err := ds.getSQLDB(t, server, user).Exec(d.Input) - - // Tag the job. - if d.HasArg("tag") { - tagJob(t, server, user, jobType, ds, d) - } - - // Check if we expect a pausepoint error. - if d.HasArg("expect-pausepoint") { - expectPausepoint(t, err, jobType, server, user, ds) - ret := append(ds.noticeBuffer, "job paused at pausepoint") - return strings.Join(ret, "\n") - } - - // All other errors are bad. - require.NoError(t, err) - return "" + return execWithTagAndPausePoint(jobspb.TypeBackup) case "import": - server := lastCreatedServer - user := "root" - jobType := "IMPORT" - - // First, run the backup. - _, err := ds.getSQLDB(t, server, user).Exec(d.Input) - - // Tag the job. - if d.HasArg("tag") { - tagJob(t, server, user, jobType, ds, d) - } - - // Check if we expect a pausepoint error. - if d.HasArg("expect-pausepoint") { - expectPausepoint(t, err, jobType, server, user, ds) - ret := append(ds.noticeBuffer, "job paused at pausepoint") - return strings.Join(ret, "\n") - } - - // All other errors are bad. - require.NoError(t, err) - return "" + return execWithTagAndPausePoint(jobspb.TypeImport) case "restore": - server := lastCreatedServer - user := "root" - jobType := "RESTORE" - if d.HasArg("aost") { var aost string d.ScanArgs(t, "aost", &aost) @@ -673,95 +670,17 @@ func TestDataDriven(t *testing.T) { d.Input = strings.Replace(d.Input, aost, fmt.Sprintf("'%s'", ts), 1) } - - // First, run the restore. - _, err := ds.getSQLDB(t, server, user).Exec(d.Input) - - // Tag the job. - if d.HasArg("tag") { - tagJob(t, server, user, jobType, ds, d) - } - - // Check if the job must be run aost. - if d.HasArg("aost") { - var aost string - d.ScanArgs(t, "aost", &aost) - } - - // Check if we expect a pausepoint error. - if d.HasArg("expect-pausepoint") { - expectPausepoint(t, err, jobType, server, user, ds) - ret := append(ds.noticeBuffer, "job paused at pausepoint") - return strings.Join(ret, "\n") - } - - // All other errors are bad. - require.NoError(t, err) - return "" + return execWithTagAndPausePoint(jobspb.TypeRestore) case "new-schema-change": - server := lastCreatedServer - user := "root" - jobType := "NEW SCHEMA CHANGE" - - // First, run the schema change. - _, err := ds.getSQLDB(t, server, user).Exec(d.Input) - - // Tag the job. - if d.HasArg("tag") { - tagJob(t, server, user, jobType, ds, d) - } - - // Check if the job must be run aost. - if d.HasArg("aost") { - var aost string - d.ScanArgs(t, "aost", &aost) - } - - // Check if we expect a pausepoint error. - if d.HasArg("expect-pausepoint") { - expectPausepoint(t, err, jobType, server, user, ds) - ret := append(ds.noticeBuffer, "job paused at pausepoint") - return strings.Join(ret, "\n") - } - - // All other errors are bad. - require.NoError(t, err) - return "" + return execWithTagAndPausePoint(jobspb.TypeNewSchemaChange) case "schema-change": - server := lastCreatedServer - user := "root" - jobType := "SCHEMA CHANGE" - - // First, run the schema change. - _, err := ds.getSQLDB(t, server, user).Exec(d.Input) - - // Tag the job. - if d.HasArg("tag") { - tagJob(t, server, user, jobType, ds, d) - } - - // Check if the job must be run aost. - if d.HasArg("aost") { - var aost string - d.ScanArgs(t, "aost", &aost) - } - - // Check if we expect a pausepoint error. - if d.HasArg("expect-pausepoint") { - expectPausepoint(t, err, jobType, server, user, ds) - ret := append(ds.noticeBuffer, "job paused at pausepoint") - return strings.Join(ret, "\n") - } - - // All other errors are bad. - require.NoError(t, err) - return "" + return execWithTagAndPausePoint(jobspb.TypeSchemaChange) case "job": server := lastCreatedServer - user := "root" + const user = "root" if d.HasArg("cancel") { var cancelJobTag string @@ -821,7 +740,7 @@ func TestDataDriven(t *testing.T) { case "save-cluster-ts": server := lastCreatedServer - user := "root" + const user = "root" var timestampTag string d.ScanArgs(t, "tag", ×tampTag) if _, ok := ds.clusterTimestamps[timestampTag]; ok { @@ -857,7 +776,7 @@ func TestDataDriven(t *testing.T) { case "corrupt-backup": server := lastCreatedServer - user := "root" + const user = "root" var uri string d.ScanArgs(t, "uri", &uri) parsedURI, err := url.Parse(strings.Replace(uri, "'", "", -1)) @@ -923,43 +842,3 @@ func handleKVRequest( t.Fatalf("Unknown kv request") } } - -// findMostRecentJobWithType returns the most recently created job of `job_type` -// jobType. -func findMostRecentJobWithType( - t *testing.T, ds datadrivenTestState, server, user string, jobType string, -) jobspb.JobID { - var jobID jobspb.JobID - require.NoError( - t, ds.getSQLDB(t, server, user).QueryRow( - fmt.Sprintf( - `SELECT job_id FROM [SHOW JOBS] WHERE job_type = '%s' ORDER BY created DESC LIMIT 1`, - jobType)).Scan(&jobID)) - return jobID -} - -// expectPausepoint waits for the job to hit a pausepoint and enter a paused -// state. -func expectPausepoint( - t *testing.T, err error, jobType, server, user string, ds datadrivenTestState, -) { - // Check if we are expecting a pausepoint error. - require.NotNilf(t, err, "expected pause point error") - - runner := sqlutils.MakeSQLRunner(ds.getSQLDB(t, server, user)) - jobutils.WaitForJobToPause(t, runner, - findMostRecentJobWithType(t, ds, server, user, jobType)) -} - -// tagJob stores the jobID of the most recent job of `jobType`. Users can use -// the tag to refer to the job in the future. -func tagJob( - t *testing.T, server, user, jobType string, ds datadrivenTestState, d *datadriven.TestData, -) { - var jobTag string - d.ScanArgs(t, "tag", &jobTag) - if _, exists := ds.jobTags[jobTag]; exists { - t.Fatalf("failed to `tag`, job with tag %s already exists", jobTag) - } - ds.jobTags[jobTag] = findMostRecentJobWithType(t, ds, server, user, jobType) -}