Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sql: ensure descriptors versions change only once, fix related bugs #79697

Merged
merged 3 commits into from
Apr 13, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 10 additions & 6 deletions pkg/ccl/backupccl/restore_job.go
Original file line number Diff line number Diff line change
Expand Up @@ -2089,9 +2089,6 @@ func (r *restoreResumer) dropDescriptors(
tableToDrop.DropTime = dropTime
b.Del(catalogkeys.EncodeNameKey(codec, tableToDrop))
descsCol.AddDeletedDescriptor(tableToDrop.GetID())
if err := descsCol.WriteDescToBatch(ctx, false /* kvTrace */, tableToDrop, b); err != nil {
return errors.Wrap(err, "writing dropping table to batch")
}
}

// Drop the type descriptors that this restore created.
Expand All @@ -2111,9 +2108,6 @@ func (r *restoreResumer) dropDescriptors(

b.Del(catalogkeys.EncodeNameKey(codec, typDesc))
mutType.SetDropped()
if err := descsCol.WriteDescToBatch(ctx, false /* kvTrace */, mutType, b); err != nil {
return errors.Wrap(err, "writing dropping type to batch")
}
// Remove the system.descriptor entry.
b.Del(catalogkeys.MakeDescMetadataKey(codec, typDesc.ID))
descsCol.AddDeletedDescriptor(mutType.GetID())
Expand Down Expand Up @@ -2271,6 +2265,16 @@ func (r *restoreResumer) dropDescriptors(
deletedDBs[db.GetID()] = struct{}{}
}

// Avoid telling the descriptor collection about the mutated descriptors
// until after all relevant relations have been retrieved to avoid a
// scenario whereby we make a descriptor invalid too early.
const kvTrace = false
for _, t := range mutableTables {
if err := descsCol.WriteDescToBatch(ctx, kvTrace, t, b); err != nil {
return errors.Wrap(err, "writing dropping table to batch")
}
}

if err := txn.Run(ctx, b); err != nil {
return errors.Wrap(err, "dropping tables created at the start of restore caused by fail/cancel")
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
exp,benchmark
17,AlterPrimaryRegion/alter_empty_database_alter_primary_region
23,AlterPrimaryRegion/alter_empty_database_set_initial_primary_region
21,AlterPrimaryRegion/alter_empty_database_set_initial_primary_region
17,AlterPrimaryRegion/alter_populated_database_alter_primary_region
24,AlterPrimaryRegion/alter_populated_database_set_initial_primary_region
17,AlterRegions/alter_empty_database_add_region
14,AlterRegions/alter_empty_database_add_region
17,AlterRegions/alter_empty_database_drop_region
17,AlterRegions/alter_populated_database_add_region
15,AlterRegions/alter_populated_database_add_region
17,AlterRegions/alter_populated_database_drop_region
17,AlterSurvivalGoals/alter_empty_database_from_region_to_zone
18,AlterSurvivalGoals/alter_empty_database_from_zone_to_region
16,AlterSurvivalGoals/alter_empty_database_from_zone_to_region
37,AlterSurvivalGoals/alter_populated_database_from_region_to_zone
38,AlterSurvivalGoals/alter_populated_database_from_zone_to_region
15,AlterTableLocality/alter_from_global_to_rbr
17,AlterTableLocality/alter_from_global_to_regional_by_table
14,AlterTableLocality/alter_from_rbr_to_global
14,AlterTableLocality/alter_from_rbr_to_regional_by_table
12,AlterTableLocality/alter_from_rbr_to_global
12,AlterTableLocality/alter_from_rbr_to_regional_by_table
17,AlterTableLocality/alter_from_regional_by_table_to_global
15,AlterTableLocality/alter_from_regional_by_table_to_rbr
15 changes: 12 additions & 3 deletions pkg/sql/alter_table_locality.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,10 @@ func (n *alterTableSetLocalityNode) alterTableLocalityGlobalToRegionalByTable(

_, dbDesc, err := params.p.Descriptors().GetImmutableDatabaseByID(
params.ctx, params.p.txn, n.tableDesc.ParentID,
tree.DatabaseLookupFlags{Required: true})
tree.DatabaseLookupFlags{
Required: true,
AvoidLeased: true,
})
if err != nil {
return err
}
Expand Down Expand Up @@ -185,7 +188,10 @@ func (n *alterTableSetLocalityNode) alterTableLocalityRegionalByTableToRegionalB

_, dbDesc, err := params.p.Descriptors().GetImmutableDatabaseByID(
params.ctx, params.p.txn, n.tableDesc.ParentID,
tree.DatabaseLookupFlags{Required: true})
tree.DatabaseLookupFlags{
Required: true,
AvoidLeased: true,
})
if err != nil {
return err
}
Expand Down Expand Up @@ -656,7 +662,10 @@ func setNewLocalityConfig(
) error {
getMultiRegionTypeDesc := func() (*typedesc.Mutable, error) {
_, dbDesc, err := descsCol.GetImmutableDatabaseByID(
ctx, txn, desc.GetParentID(), tree.DatabaseLookupFlags{Required: true})
ctx, txn, desc.GetParentID(), tree.DatabaseLookupFlags{
Required: true,
AvoidLeased: true,
})
if err != nil {
return nil, err
}
Expand Down
19 changes: 19 additions & 0 deletions pkg/sql/catalog/descs/descriptor.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,25 @@ func (tc *Collection) getDescriptorsByID(
return nil, err
}
for j, desc := range kvDescs {
// Callers expect the descriptors to come back hydrated.
// In practice, array types here are not hydrated, and that's a bummer.
// Nobody presently is upset about it, but it's not a good thing.
// Ideally we'd have a clearer contract regarding hydration and the values
// stored in the various maps inside the collection. One might want to
// store only hydrated values in the various maps. This turns out to be
// somewhat tricky because we'd need to make sure to properly re-hydrate
// all the relevant descriptors when a type descriptor change. Leased
// descriptors are at least as tricky, plus, there we have a cache that
// works relatively well.
//
// TODO(ajwerner): Sort out the hydration mess; define clearly what is
// hydrated where and test the API boundary accordingly.
if table, isTable := desc.(catalog.TableDescriptor); isTable {
desc, err = tc.hydrateTypesInTableDesc(ctx, txn, table)
if err != nil {
return nil, err
}
}
descs[indexes[j]] = desc
}
return descs, nil
Expand Down
9 changes: 7 additions & 2 deletions pkg/sql/catalog/descs/hydrate.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,13 @@ func (tc *Collection) hydrateTypesInTableDesc(
return name, desc, nil
})

// Utilize the cache of hydrated tables if we have one.
if tc.hydratedTables != nil {
// Utilize the cache of hydrated tables if we have one and this descriptor
// was leased.
// TODO(ajwerner): Consider surfacing the mechanism used to retrieve the
// descriptor up to this layer.
if tc.hydratedTables != nil &&
tc.uncommitted.descs.GetByID(desc.GetID()) == nil &&
tc.synthetic.descs.GetByID(desc.GetID()) == nil {
hydrated, err := tc.hydratedTables.GetHydratedTableDescriptor(ctx, t, getType)
if err != nil {
return nil, err
Expand Down
7 changes: 7 additions & 0 deletions pkg/sql/catalog/descs/uncommitted_descriptors.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,13 @@ func (ud *uncommittedDescriptors) add(
for _, n := range uNew.immutable.GetDrainingNames() {
ud.descNames.Add(n)
}
if prev, ok := ud.descs.GetByID(mut.GetID()).(*uncommittedDescriptor); ok {
if prev.mutable.OriginalVersion() != mut.OriginalVersion() {
return nil, errors.AssertionFailedf(
"cannot add a version of descriptor with a different original version" +
" than it was previously added with")
}
}
ud.descs.Upsert(uNew)
return uNew.immutable, err
}
Expand Down
5 changes: 1 addition & 4 deletions pkg/sql/catalog/descs/validate.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,10 +124,7 @@ func (c collectionBackedDereferencer) DereferenceDescriptors(
func (c collectionBackedDereferencer) fastDescLookup(
ctx context.Context, id descpb.ID,
) (catalog.Descriptor, error) {
if uc, status := c.tc.uncommitted.getImmutableByID(id); uc != nil {
if status == checkedOutAtLeastOnce {
return nil, nil
}
if uc, _ := c.tc.uncommitted.getImmutableByID(id); uc != nil {
return uc, nil
}
return nil, nil
Expand Down
5 changes: 4 additions & 1 deletion pkg/sql/create_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -445,7 +445,10 @@ func (n *createTableNode) startExec(params runParams) error {
params.ctx,
params.p.txn,
desc.ParentID,
tree.DatabaseLookupFlags{Required: true},
tree.DatabaseLookupFlags{
Required: true,
AvoidLeased: true,
},
)
if err != nil {
return errors.Wrap(err, "error resolving database for multi-region")
Expand Down
15 changes: 12 additions & 3 deletions pkg/sql/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,19 +108,28 @@ func (p *planner) forEachMutableTableInDatabase(
return err
}

// TODO(ajwerner): Rewrite this to not use the internalLookupCtx.
lCtx := newInternalLookupCtx(all.OrderedDescriptors(), dbDesc)
var droppedRemoved []descpb.ID
for _, tbID := range lCtx.tbIDs {
desc := lCtx.tbDescs[tbID]
if desc.Dropped() {
continue
}
mutable := tabledesc.NewBuilder(desc.TableDesc()).BuildExistingMutableTable()
schemaName, found, err := lCtx.GetSchemaName(ctx, desc.GetParentSchemaID(), desc.GetParentID(), p.ExecCfg().Settings.Version)
droppedRemoved = append(droppedRemoved, tbID)
}
descs, err := p.Descriptors().GetMutableDescriptorsByID(ctx, p.Txn(), droppedRemoved...)
if err != nil {
return err
}
for _, d := range descs {
mutable := d.(*tabledesc.Mutable)
schemaName, found, err := lCtx.GetSchemaName(ctx, d.GetParentSchemaID(), d.GetParentID(), p.ExecCfg().Settings.Version)
if err != nil {
return err
}
if !found {
return errors.AssertionFailedf("schema id %d not found", desc.GetParentSchemaID())
return errors.AssertionFailedf("schema id %d not found", d.GetParentSchemaID())
}
if err := fn(ctx, schemaName, mutable); err != nil {
return err
Expand Down
8 changes: 6 additions & 2 deletions pkg/sql/database_region_change_finalizer.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,8 @@ func newDatabaseRegionChangeFinalizer(
txn,
dbID,
tree.DatabaseLookupFlags{
Required: true,
Required: true,
AvoidLeased: true,
},
)
if err != nil {
Expand Down Expand Up @@ -176,7 +177,10 @@ func (r *databaseRegionChangeFinalizer) updateGlobalTablesZoneConfig(
ctx,
txn,
r.dbID,
tree.DatabaseLookupFlags{Required: true},
tree.DatabaseLookupFlags{
Required: true,
AvoidLeased: true,
},
)
if err != nil {
return err
Expand Down
10 changes: 8 additions & 2 deletions pkg/sql/drop_type.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,10 @@ func (p *planner) addBackRefsFromAllTypesInTable(
ctx context.Context, desc *tabledesc.Mutable,
) error {
_, dbDesc, err := p.Descriptors().GetImmutableDatabaseByID(
ctx, p.txn, desc.GetParentID(), tree.DatabaseLookupFlags{Required: true})
ctx, p.txn, desc.GetParentID(), tree.DatabaseLookupFlags{
Required: true,
AvoidLeased: true,
})
if err != nil {
return err
}
Expand All @@ -224,7 +227,10 @@ func (p *planner) removeBackRefsFromAllTypesInTable(
ctx context.Context, desc *tabledesc.Mutable,
) error {
_, dbDesc, err := p.Descriptors().GetImmutableDatabaseByID(
ctx, p.txn, desc.GetParentID(), tree.DatabaseLookupFlags{Required: true})
ctx, p.txn, desc.GetParentID(), tree.DatabaseLookupFlags{
Required: true,
AvoidLeased: true,
})
if err != nil {
return err
}
Expand Down
11 changes: 8 additions & 3 deletions pkg/sql/importer/import_job.go
Original file line number Diff line number Diff line change
Expand Up @@ -1515,6 +1515,7 @@ func (r *importResumer) dropTables(

b := txn.NewBatch()
tablesToGC := make([]descpb.ID, 0, len(details.Tables))
toWrite := make([]*tabledesc.Mutable, 0, len(details.Tables))
for _, tbl := range details.Tables {
newTableDesc, err := descsCol.GetMutableTableVersionByID(ctx, tbl.Desc.ID, txn)
if err != nil {
Expand All @@ -1536,9 +1537,13 @@ func (r *importResumer) dropTables(
// IMPORT did not create this table, so we should not drop it.
newTableDesc.SetPublic()
}
if err := descsCol.WriteDescToBatch(
ctx, false /* kvTrace */, newTableDesc, b,
); err != nil {
// Accumulate the changes before adding them to the batch to avoid
// making any table invalid before having read it.
toWrite = append(toWrite, newTableDesc)
}
for _, d := range toWrite {
const kvTrace = false
if err := descsCol.WriteDescToBatch(ctx, kvTrace, d, b); err != nil {
return err
}
}
Expand Down
3 changes: 2 additions & 1 deletion pkg/sql/region_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -1113,7 +1113,8 @@ func (p *planner) ResetMultiRegionZoneConfigsForDatabase(ctx context.Context, id
p.txn,
descpb.ID(id),
tree.DatabaseLookupFlags{
Required: true,
Required: true,
AvoidLeased: true,
},
)
if err != nil {
Expand Down
6 changes: 4 additions & 2 deletions pkg/sql/resolver.go
Original file line number Diff line number Diff line change
Expand Up @@ -797,7 +797,8 @@ func (p *planner) getQualifiedSchemaName(
) (*tree.ObjectNamePrefix, error) {
_, dbDesc, err := p.Descriptors().GetImmutableDatabaseByID(ctx, p.txn, desc.GetParentID(),
tree.DatabaseLookupFlags{
Required: true,
Required: true,
AvoidLeased: true,
})
if err != nil {
return nil, err
Expand All @@ -817,7 +818,8 @@ func (p *planner) getQualifiedTypeName(
) (*tree.TypeName, error) {
_, dbDesc, err := p.Descriptors().GetImmutableDatabaseByID(ctx, p.txn, desc.GetParentID(),
tree.DatabaseLookupFlags{
Required: true,
Required: true,
AvoidLeased: true,
})
if err != nil {
return nil, err
Expand Down
10 changes: 8 additions & 2 deletions pkg/sql/schema_changer.go
Original file line number Diff line number Diff line change
Expand Up @@ -1060,7 +1060,10 @@ func (sc *SchemaChanger) RunStateMachineBeforeBackfill(ctx context.Context) erro
ctx,
txn,
tbl.GetParentID(),
tree.DatabaseLookupFlags{Required: true},
tree.DatabaseLookupFlags{
Required: true,
AvoidLeased: true,
},
)
if err != nil {
return err
Expand Down Expand Up @@ -1315,7 +1318,10 @@ func (sc *SchemaChanger) done(ctx context.Context) error {
ctx,
txn,
scTable.GetParentID(),
tree.DatabaseLookupFlags{Required: true},
tree.DatabaseLookupFlags{
Required: true,
AvoidLeased: true,
},
)
if err != nil {
return err
Expand Down
17 changes: 14 additions & 3 deletions pkg/sql/type_change.go
Original file line number Diff line number Diff line change
Expand Up @@ -768,7 +768,12 @@ func (t *typeSchemaChanger) canRemoveEnumValue(
descsCol *descs.Collection,
) error {
for _, ID := range typeDesc.ReferencingDescriptorIDs {
desc, err := descsCol.GetImmutableTableByID(ctx, txn, ID, tree.ObjectLookupFlags{})
desc, err := descsCol.GetImmutableTableByID(ctx, txn, ID, tree.ObjectLookupFlags{
CommonLookupFlags: tree.CommonLookupFlags{
AvoidLeased: true,
Required: true,
},
})
if err != nil {
return errors.Wrapf(err,
"could not validate enum value removal for %q", member.LogicalRepresentation)
Expand Down Expand Up @@ -927,7 +932,10 @@ func (t *typeSchemaChanger) canRemoveEnumValue(
// because the enum value may be used in a view expression, which is
// name resolved in the context of the type's database.
_, dbDesc, err := descsCol.GetImmutableDatabaseByID(
ctx, txn, typeDesc.ParentID, tree.DatabaseLookupFlags{Required: true})
ctx, txn, typeDesc.ParentID, tree.DatabaseLookupFlags{
Required: true,
AvoidLeased: true,
})
const validationErr = "could not validate removal of enum value %q"
if err != nil {
return errors.Wrapf(err, validationErr, member.LogicalRepresentation)
Expand Down Expand Up @@ -1156,7 +1164,10 @@ func (t *typeSchemaChanger) canRemoveEnumValueFromArrayUsages(
query.WriteString(fmt.Sprintf(") WHERE unnest = %s", sqlPhysRep))

_, dbDesc, err := descsCol.GetImmutableDatabaseByID(
ctx, txn, arrayTypeDesc.GetParentID(), tree.DatabaseLookupFlags{Required: true})
ctx, txn, arrayTypeDesc.GetParentID(), tree.DatabaseLookupFlags{
Required: true,
AvoidLeased: true,
})
if err != nil {
return errors.Wrapf(err, validationErr, member.LogicalRepresentation)
}
Expand Down