Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

release-21.1: sql: stop mutating AST in AlterPrimaryKey #61792

Merged
merged 1 commit into from
Mar 10, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions pkg/sql/alter_primary_key.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ type alterPrimaryKeyLocalitySwap struct {
func (p *planner) AlterPrimaryKey(
ctx context.Context,
tableDesc *tabledesc.Mutable,
alterPKNode *tree.AlterTableAlterPrimaryKey,
alterPKNode tree.AlterTableAlterPrimaryKey,
alterPrimaryKeyLocalitySwap *alterPrimaryKeyLocalitySwap,
) error {
if alterPKNode.Interleave != nil {
Expand Down Expand Up @@ -153,7 +153,7 @@ func (p *planner) AlterPrimaryKey(
// primary index, which would mean nothing needs to be modified
// here.
{
requiresIndexChange, err := p.shouldCreateIndexes(ctx, tableDesc, alterPKNode, alterPrimaryKeyLocalitySwap)
requiresIndexChange, err := p.shouldCreateIndexes(ctx, tableDesc, &alterPKNode, alterPrimaryKeyLocalitySwap)
if err != nil {
return err
}
Expand Down Expand Up @@ -190,12 +190,12 @@ func (p *planner) AlterPrimaryKey(
// If the new index is requested to be sharded, set up the index descriptor
// to be sharded, and add the new shard column if it is missing.
if alterPKNode.Sharded != nil {
shardCol, newColumn, err := setupShardedIndex(
shardCol, newColumns, newColumn, err := setupShardedIndex(
ctx,
p.EvalContext(),
&p.semaCtx,
p.SessionData().HashShardedIndexesEnabled,
&alterPKNode.Columns,
alterPKNode.Columns,
alterPKNode.Sharded.ShardBuckets,
tableDesc,
newPrimaryIndexDesc,
Expand All @@ -204,6 +204,7 @@ func (p *planner) AlterPrimaryKey(
if err != nil {
return err
}
alterPKNode.Columns = newColumns
if newColumn {
if err := p.setupFamilyAndConstraintForShard(
ctx,
Expand Down
4 changes: 2 additions & 2 deletions pkg/sql/alter_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ func (n *alterTableNode) startExec(params runParams) error {
if err := params.p.AlterPrimaryKey(
params.ctx,
n.tableDesc,
alterPK,
*alterPK,
nil, /* localityConfigSwap */
); err != nil {
return err
Expand Down Expand Up @@ -373,7 +373,7 @@ func (n *alterTableNode) startExec(params runParams) error {
if err := params.p.AlterPrimaryKey(
params.ctx,
n.tableDesc,
t,
*t,
nil, /* localityConfigSwap */
); err != nil {
return err
Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/alter_table_locality.go
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ func (n *alterTableSetLocalityNode) alterTableLocalityFromOrToRegionalByRow(
if err := params.p.AlterPrimaryKey(
params.ctx,
n.tableDesc,
&tree.AlterTableAlterPrimaryKey{
tree.AlterTableAlterPrimaryKey{
Name: tree.Name(n.tableDesc.PrimaryIndex.Name),
Columns: cols,
},
Expand Down
35 changes: 20 additions & 15 deletions pkg/sql/create_index.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ func (p *planner) setupFamilyAndConstraintForShard(
// is hash sharded. Note that `tableDesc` will be modified when this method is called for
// a hash sharded index.
func MakeIndexDescriptor(
params runParams, n *tree.CreateIndex, tableDesc *tabledesc.Mutable,
params runParams, n tree.CreateIndex, tableDesc *tabledesc.Mutable,
) (*descpb.IndexDescriptor, error) {
// Ensure that the columns we want to index exist before trying to create the
// index.
Expand Down Expand Up @@ -200,7 +200,7 @@ func MakeIndexDescriptor(
}
telemetry.Inc(sqltelemetry.InvertedIndexCounter)
}

columns := n.Columns
if n.Sharded != nil {
if n.PartitionByIndex.ContainsPartitions() {
return nil, pgerror.New(pgcode.FeatureNotSupported, "sharded indexes don't support partitioning")
Expand All @@ -211,19 +211,20 @@ func MakeIndexDescriptor(
if n.Interleave != nil {
return nil, pgerror.New(pgcode.FeatureNotSupported, "interleaved indexes cannot also be hash sharded")
}
shardCol, newColumn, err := setupShardedIndex(
shardCol, newColumns, newColumn, err := setupShardedIndex(
params.ctx,
params.EvalContext(),
&params.p.semaCtx,
params.SessionData().HashShardedIndexesEnabled,
&n.Columns,
n.Columns,
n.Sharded.ShardBuckets,
tableDesc,
&indexDesc,
false /* isNewTable */)
if err != nil {
return nil, err
}
columns = newColumns
if newColumn {
if err := params.p.setupFamilyAndConstraintForShard(params.ctx, tableDesc, shardCol,
indexDesc.Sharded.ColumnNames, indexDesc.Sharded.ShardBuckets); err != nil {
Expand All @@ -243,7 +244,7 @@ func MakeIndexDescriptor(
telemetry.Inc(sqltelemetry.PartialIndexCounter)
}

if err := indexDesc.FillColumns(n.Columns); err != nil {
if err := indexDesc.FillColumns(columns); err != nil {
return nil, err
}

Expand Down Expand Up @@ -285,46 +286,50 @@ func (n *createIndexNode) ReadingOwnWrites() {}
var hashShardedIndexesDisabledError = pgerror.Newf(pgcode.FeatureNotSupported,
"hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable")

// setupShardedIndex updates the index descriptor with the relevant new column.
// It also returns the column so it can be added to the table. It returns the
// new column set for the index. This set must be used regardless of whether or
// not there is a newly created column.
func setupShardedIndex(
ctx context.Context,
evalCtx *tree.EvalContext,
semaCtx *tree.SemaContext,
shardedIndexEnabled bool,
columns *tree.IndexElemList,
columns tree.IndexElemList,
bucketsExpr tree.Expr,
tableDesc *tabledesc.Mutable,
indexDesc *descpb.IndexDescriptor,
isNewTable bool,
) (shard *descpb.ColumnDescriptor, newColumn bool, err error) {
) (shard *descpb.ColumnDescriptor, newColumns tree.IndexElemList, newColumn bool, err error) {
if !shardedIndexEnabled {
return nil, false, hashShardedIndexesDisabledError
return nil, nil, false, hashShardedIndexesDisabledError
}

colNames := make([]string, 0, len(*columns))
for _, c := range *columns {
colNames := make([]string, 0, len(columns))
for _, c := range columns {
colNames = append(colNames, string(c.Column))
}
buckets, err := tabledesc.EvalShardBucketCount(ctx, semaCtx, evalCtx, bucketsExpr)
if err != nil {
return nil, false, err
return nil, nil, false, err
}
shardCol, newColumn, err := maybeCreateAndAddShardCol(int(buckets), tableDesc,
colNames, isNewTable)
if err != nil {
return nil, false, err
return nil, nil, false, err
}
shardIdxElem := tree.IndexElem{
Column: tree.Name(shardCol.Name),
Direction: tree.Ascending,
}
*columns = append(tree.IndexElemList{shardIdxElem}, *columns...)
newColumns = append(tree.IndexElemList{shardIdxElem}, columns...)
indexDesc.Sharded = descpb.ShardedDescriptor{
IsSharded: true,
Name: shardCol.Name,
ShardBuckets: buckets,
ColumnNames: colNames,
}
return shardCol, newColumn, nil
return shardCol, newColumns, newColumn, nil
}

// maybeCreateAndAddShardCol adds a new hidden computed shard column (or its mutation) to
Expand Down Expand Up @@ -437,7 +442,7 @@ func (n *createIndexNode) startExec(params runParams) error {
}
}

indexDesc, err := MakeIndexDescriptor(params, n.n, n.tableDesc)
indexDesc, err := MakeIndexDescriptor(params, *n.n, n.tableDesc)
if err != nil {
return err
}
Expand Down
34 changes: 21 additions & 13 deletions pkg/sql/create_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -1877,36 +1877,38 @@ func NewTableDesc(
}
}

setupShardedIndexForNewTable := func(d *tree.IndexTableDef, idx *descpb.IndexDescriptor) error {
setupShardedIndexForNewTable := func(
d tree.IndexTableDef, idx *descpb.IndexDescriptor,
) (columns tree.IndexElemList, _ error) {
if n.PartitionByTable.ContainsPartitions() {
return pgerror.New(pgcode.FeatureNotSupported, "sharded indexes don't support partitioning")
return nil, pgerror.New(pgcode.FeatureNotSupported, "sharded indexes don't support partitioning")
}
shardCol, newColumn, err := setupShardedIndex(
shardCol, newColumns, newColumn, err := setupShardedIndex(
ctx,
evalCtx,
semaCtx,
sessionData.HashShardedIndexesEnabled,
&d.Columns,
d.Columns,
d.Sharded.ShardBuckets,
&desc,
idx,
true /* isNewTable */)
if err != nil {
return err
return nil, err
}
if newColumn {
buckets, err := tabledesc.EvalShardBucketCount(ctx, semaCtx, evalCtx, d.Sharded.ShardBuckets)
if err != nil {
return err
return nil, err
}
checkConstraint, err := makeShardCheckConstraintDef(&desc, int(buckets), shardCol)
if err != nil {
return err
return nil, err
}
n.Defs = append(n.Defs, checkConstraint)
columnDefaultExprs = append(columnDefaultExprs, nil)
}
return nil
return newColumns, nil
}

idxValidator := schemaexpr.MakeIndexPredicateValidator(ctx, n.Table, &desc, semaCtx)
Expand All @@ -1929,18 +1931,21 @@ func NewTableDesc(
if d.Inverted {
idx.Type = descpb.IndexDescriptor_INVERTED
}
columns := d.Columns
if d.Sharded != nil {
if d.Interleave != nil {
return nil, pgerror.New(pgcode.FeatureNotSupported, "interleaved indexes cannot also be hash sharded")
}
if isRegionalByRow {
return nil, hashShardedIndexesOnRegionalByRowError()
}
if err := setupShardedIndexForNewTable(d, &idx); err != nil {
var err error
columns, err = setupShardedIndexForNewTable(*d, &idx)
if err != nil {
return nil, err
}
}
if err := idx.FillColumns(d.Columns); err != nil {
if err := idx.FillColumns(columns); err != nil {
return nil, err
}
if d.Inverted {
Expand Down Expand Up @@ -2024,18 +2029,21 @@ func NewTableDesc(
StoreColumnNames: d.Storing.ToStrings(),
Version: indexEncodingVersion,
}
columns := d.Columns
if d.Sharded != nil {
if n.Interleave != nil && d.PrimaryKey {
return nil, pgerror.New(pgcode.FeatureNotSupported, "interleaved indexes cannot also be hash sharded")
}
if isRegionalByRow {
return nil, hashShardedIndexesOnRegionalByRowError()
}
if err := setupShardedIndexForNewTable(&d.IndexTableDef, &idx); err != nil {
var err error
columns, err = setupShardedIndexForNewTable(d.IndexTableDef, &idx)
if err != nil {
return nil, err
}
}
if err := idx.FillColumns(d.Columns); err != nil {
if err := idx.FillColumns(columns); err != nil {
return nil, err
}
// Specifying a partitioning on a PRIMARY KEY constraint should be disallowed by the
Expand Down Expand Up @@ -2096,7 +2104,7 @@ func NewTableDesc(
"interleave not supported in primary key constraint definition",
)
}
for _, c := range d.Columns {
for _, c := range columns {
primaryIndexColumnSet[string(c.Column)] = struct{}{}
}
}
Expand Down