diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index 82a4a1090f8a..fbc73486cfe3 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -2089,9 +2089,6 @@ func (r *restoreResumer) dropDescriptors( tableToDrop.DropTime = dropTime b.Del(catalogkeys.EncodeNameKey(codec, tableToDrop)) descsCol.AddDeletedDescriptor(tableToDrop.GetID()) - if err := descsCol.WriteDescToBatch(ctx, false /* kvTrace */, tableToDrop, b); err != nil { - return errors.Wrap(err, "writing dropping table to batch") - } } // Drop the type descriptors that this restore created. @@ -2111,9 +2108,6 @@ func (r *restoreResumer) dropDescriptors( b.Del(catalogkeys.EncodeNameKey(codec, typDesc)) mutType.SetDropped() - if err := descsCol.WriteDescToBatch(ctx, false /* kvTrace */, mutType, b); err != nil { - return errors.Wrap(err, "writing dropping type to batch") - } // Remove the system.descriptor entry. b.Del(catalogkeys.MakeDescMetadataKey(codec, typDesc.ID)) descsCol.AddDeletedDescriptor(mutType.GetID()) @@ -2271,6 +2265,16 @@ func (r *restoreResumer) dropDescriptors( deletedDBs[db.GetID()] = struct{}{} } + // Avoid telling the descriptor collection about the mutated descriptors + // until after all relevant relations have been retrieved to avoid a + // scenario whereby we make a descriptor invalid too early. + const kvTrace = false + for _, t := range mutableTables { + if err := descsCol.WriteDescToBatch(ctx, kvTrace, t, b); err != nil { + return errors.Wrap(err, "writing dropping table to batch") + } + } + if err := txn.Run(ctx, b); err != nil { return errors.Wrap(err, "dropping tables created at the start of restore caused by fail/cancel") } diff --git a/pkg/ccl/benchccl/rttanalysisccl/testdata/benchmark_expectations b/pkg/ccl/benchccl/rttanalysisccl/testdata/benchmark_expectations index 4b509dda0c65..bf0b49d98909 100644 --- a/pkg/ccl/benchccl/rttanalysisccl/testdata/benchmark_expectations +++ b/pkg/ccl/benchccl/rttanalysisccl/testdata/benchmark_expectations @@ -1,19 +1,19 @@ exp,benchmark 17,AlterPrimaryRegion/alter_empty_database_alter_primary_region -23,AlterPrimaryRegion/alter_empty_database_set_initial_primary_region +21,AlterPrimaryRegion/alter_empty_database_set_initial_primary_region 17,AlterPrimaryRegion/alter_populated_database_alter_primary_region 24,AlterPrimaryRegion/alter_populated_database_set_initial_primary_region -17,AlterRegions/alter_empty_database_add_region +14,AlterRegions/alter_empty_database_add_region 17,AlterRegions/alter_empty_database_drop_region -17,AlterRegions/alter_populated_database_add_region +15,AlterRegions/alter_populated_database_add_region 17,AlterRegions/alter_populated_database_drop_region 17,AlterSurvivalGoals/alter_empty_database_from_region_to_zone -18,AlterSurvivalGoals/alter_empty_database_from_zone_to_region +16,AlterSurvivalGoals/alter_empty_database_from_zone_to_region 37,AlterSurvivalGoals/alter_populated_database_from_region_to_zone 38,AlterSurvivalGoals/alter_populated_database_from_zone_to_region 15,AlterTableLocality/alter_from_global_to_rbr 17,AlterTableLocality/alter_from_global_to_regional_by_table -14,AlterTableLocality/alter_from_rbr_to_global -14,AlterTableLocality/alter_from_rbr_to_regional_by_table +12,AlterTableLocality/alter_from_rbr_to_global +12,AlterTableLocality/alter_from_rbr_to_regional_by_table 17,AlterTableLocality/alter_from_regional_by_table_to_global 15,AlterTableLocality/alter_from_regional_by_table_to_rbr diff --git a/pkg/sql/catalog/descs/descriptor.go b/pkg/sql/catalog/descs/descriptor.go index 02a103d8fc47..b803e3867ef4 100644 --- a/pkg/sql/catalog/descs/descriptor.go +++ b/pkg/sql/catalog/descs/descriptor.go @@ -172,6 +172,25 @@ func (tc *Collection) getDescriptorsByID( return nil, err } for j, desc := range kvDescs { + // Callers expect the descriptors to come back hydrated. + // In practice, array types here are not hydrated, and that's a bummer. + // Nobody presently is upset about it, but it's not a good thing. + // Ideally we'd have a clearer contract regarding hydration and the values + // stored in the various maps inside the collection. One might want to + // store only hydrated values in the various maps. This turns out to be + // somewhat tricky because we'd need to make sure to properly re-hydrate + // all the relevant descriptors when a type descriptor change. Leased + // descriptors are at least as tricky, plus, there we have a cache that + // works relatively well. + // + // TODO(ajwerner): Sort out the hydration mess; define clearly what is + // hydrated where and test the API boundary accordingly. + if table, isTable := desc.(catalog.TableDescriptor); isTable { + desc, err = tc.hydrateTypesInTableDesc(ctx, txn, table) + if err != nil { + return nil, err + } + } descs[indexes[j]] = desc } return descs, nil diff --git a/pkg/sql/catalog/descs/hydrate.go b/pkg/sql/catalog/descs/hydrate.go index e0fa565a022a..efe5bb132c24 100644 --- a/pkg/sql/catalog/descs/hydrate.go +++ b/pkg/sql/catalog/descs/hydrate.go @@ -97,8 +97,13 @@ func (tc *Collection) hydrateTypesInTableDesc( return name, desc, nil }) - // Utilize the cache of hydrated tables if we have one. - if tc.hydratedTables != nil { + // Utilize the cache of hydrated tables if we have one and this descriptor + // was leased. + // TODO(ajwerner): Consider surfacing the mechanism used to retrieve the + // descriptor up to this layer. + if tc.hydratedTables != nil && + tc.uncommitted.descs.GetByID(desc.GetID()) == nil && + tc.synthetic.descs.GetByID(desc.GetID()) == nil { hydrated, err := tc.hydratedTables.GetHydratedTableDescriptor(ctx, t, getType) if err != nil { return nil, err diff --git a/pkg/sql/catalog/descs/validate.go b/pkg/sql/catalog/descs/validate.go index 44ca51298c94..f18cabda845a 100644 --- a/pkg/sql/catalog/descs/validate.go +++ b/pkg/sql/catalog/descs/validate.go @@ -124,10 +124,7 @@ func (c collectionBackedDereferencer) DereferenceDescriptors( func (c collectionBackedDereferencer) fastDescLookup( ctx context.Context, id descpb.ID, ) (catalog.Descriptor, error) { - if uc, status := c.tc.uncommitted.getImmutableByID(id); uc != nil { - if status == checkedOutAtLeastOnce { - return nil, nil - } + if uc, _ := c.tc.uncommitted.getImmutableByID(id); uc != nil { return uc, nil } return nil, nil diff --git a/pkg/sql/database.go b/pkg/sql/database.go index a8cc6b569aaa..84fdf737ed41 100644 --- a/pkg/sql/database.go +++ b/pkg/sql/database.go @@ -108,19 +108,28 @@ func (p *planner) forEachMutableTableInDatabase( return err } + // TODO(ajwerner): Rewrite this to not use the internalLookupCtx. lCtx := newInternalLookupCtx(all.OrderedDescriptors(), dbDesc) + var droppedRemoved []descpb.ID for _, tbID := range lCtx.tbIDs { desc := lCtx.tbDescs[tbID] if desc.Dropped() { continue } - mutable := tabledesc.NewBuilder(desc.TableDesc()).BuildExistingMutableTable() - schemaName, found, err := lCtx.GetSchemaName(ctx, desc.GetParentSchemaID(), desc.GetParentID(), p.ExecCfg().Settings.Version) + droppedRemoved = append(droppedRemoved, tbID) + } + descs, err := p.Descriptors().GetMutableDescriptorsByID(ctx, p.Txn(), droppedRemoved...) + if err != nil { + return err + } + for _, d := range descs { + mutable := d.(*tabledesc.Mutable) + schemaName, found, err := lCtx.GetSchemaName(ctx, d.GetParentSchemaID(), d.GetParentID(), p.ExecCfg().Settings.Version) if err != nil { return err } if !found { - return errors.AssertionFailedf("schema id %d not found", desc.GetParentSchemaID()) + return errors.AssertionFailedf("schema id %d not found", d.GetParentSchemaID()) } if err := fn(ctx, schemaName, mutable); err != nil { return err diff --git a/pkg/sql/importer/import_job.go b/pkg/sql/importer/import_job.go index 016c108b19f6..2eaf13178f85 100644 --- a/pkg/sql/importer/import_job.go +++ b/pkg/sql/importer/import_job.go @@ -1515,6 +1515,7 @@ func (r *importResumer) dropTables( b := txn.NewBatch() tablesToGC := make([]descpb.ID, 0, len(details.Tables)) + toWrite := make([]*tabledesc.Mutable, 0, len(details.Tables)) for _, tbl := range details.Tables { newTableDesc, err := descsCol.GetMutableTableVersionByID(ctx, tbl.Desc.ID, txn) if err != nil { @@ -1536,9 +1537,13 @@ func (r *importResumer) dropTables( // IMPORT did not create this table, so we should not drop it. newTableDesc.SetPublic() } - if err := descsCol.WriteDescToBatch( - ctx, false /* kvTrace */, newTableDesc, b, - ); err != nil { + // Accumulate the changes before adding them to the batch to avoid + // making any table invalid before having read it. + toWrite = append(toWrite, newTableDesc) + } + for _, d := range toWrite { + const kvTrace = false + if err := descsCol.WriteDescToBatch(ctx, kvTrace, d, b); err != nil { return err } }