Skip to content

Commit

Permalink
sql: stop mutating AST in AlterPrimaryKey
Browse files Browse the repository at this point in the history
The code in cockroachdb#61345 added a test that, under stress, nicely revealed a bug.
The bug is that we're mutating the AST in place and then, on retries, we hit
an error. The fix is to not mutate the AST.

I wish I had a more comprehensive testing strategy to ensure this didn't
happen. On some level, we could clone the AST when passing it to various DDL
plan node constructors. That's very defensive but also probably fine. Another
thing that would be cool would be to just assert that after planning the AST
did not change.

Touches cockroachdb#60824.

Release note: None
  • Loading branch information
ajwerner committed Mar 10, 2021
1 parent 8e04b62 commit 3a45eae
Show file tree
Hide file tree
Showing 5 changed files with 45 additions and 33 deletions.
11 changes: 7 additions & 4 deletions pkg/sql/alter_primary_key.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/errors"
)
Expand All @@ -48,7 +49,7 @@ type alterPrimaryKeyLocalitySwap struct {
func (p *planner) AlterPrimaryKey(
ctx context.Context,
tableDesc *tabledesc.Mutable,
alterPKNode *tree.AlterTableAlterPrimaryKey,
alterPKNode tree.AlterTableAlterPrimaryKey,
alterPrimaryKeyLocalitySwap *alterPrimaryKeyLocalitySwap,
) error {
if alterPKNode.Interleave != nil {
Expand Down Expand Up @@ -153,11 +154,12 @@ func (p *planner) AlterPrimaryKey(
// primary index, which would mean nothing needs to be modified
// here.
{
requiresIndexChange, err := p.shouldCreateIndexes(ctx, tableDesc, alterPKNode, alterPrimaryKeyLocalitySwap)
requiresIndexChange, err := p.shouldCreateIndexes(ctx, tableDesc, &alterPKNode, alterPrimaryKeyLocalitySwap)
if err != nil {
return err
}
if !requiresIndexChange {
log.Infof(ctx, "does not require index change %v %v", tableDesc, alterPKNode)
return nil
}
}
Expand Down Expand Up @@ -190,12 +192,12 @@ func (p *planner) AlterPrimaryKey(
// If the new index is requested to be sharded, set up the index descriptor
// to be sharded, and add the new shard column if it is missing.
if alterPKNode.Sharded != nil {
shardCol, newColumn, err := setupShardedIndex(
shardCol, newColumns, newColumn, err := setupShardedIndex(
ctx,
p.EvalContext(),
&p.semaCtx,
p.SessionData().HashShardedIndexesEnabled,
&alterPKNode.Columns,
alterPKNode.Columns,
alterPKNode.Sharded.ShardBuckets,
tableDesc,
newPrimaryIndexDesc,
Expand All @@ -205,6 +207,7 @@ func (p *planner) AlterPrimaryKey(
return err
}
if newColumn {
alterPKNode.Columns = newColumns
if err := p.setupFamilyAndConstraintForShard(
ctx,
tableDesc,
Expand Down
4 changes: 2 additions & 2 deletions pkg/sql/alter_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ func (n *alterTableNode) startExec(params runParams) error {
if err := params.p.AlterPrimaryKey(
params.ctx,
n.tableDesc,
alterPK,
*alterPK,
nil, /* localityConfigSwap */
); err != nil {
return err
Expand Down Expand Up @@ -373,7 +373,7 @@ func (n *alterTableNode) startExec(params runParams) error {
if err := params.p.AlterPrimaryKey(
params.ctx,
n.tableDesc,
t,
*t,
nil, /* localityConfigSwap */
); err != nil {
return err
Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/alter_table_locality.go
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ func (n *alterTableSetLocalityNode) alterTableLocalityFromOrToRegionalByRow(
if err := params.p.AlterPrimaryKey(
params.ctx,
n.tableDesc,
&tree.AlterTableAlterPrimaryKey{
tree.AlterTableAlterPrimaryKey{
Name: tree.Name(n.tableDesc.PrimaryIndex.Name),
Columns: cols,
},
Expand Down
27 changes: 14 additions & 13 deletions pkg/sql/create_index.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ func (p *planner) setupFamilyAndConstraintForShard(
// is hash sharded. Note that `tableDesc` will be modified when this method is called for
// a hash sharded index.
func MakeIndexDescriptor(
params runParams, n *tree.CreateIndex, tableDesc *tabledesc.Mutable,
params runParams, n tree.CreateIndex, tableDesc *tabledesc.Mutable,
) (*descpb.IndexDescriptor, error) {
// Ensure that the columns we want to index exist before trying to create the
// index.
Expand Down Expand Up @@ -211,12 +211,12 @@ func MakeIndexDescriptor(
if n.Interleave != nil {
return nil, pgerror.New(pgcode.FeatureNotSupported, "interleaved indexes cannot also be hash sharded")
}
shardCol, newColumn, err := setupShardedIndex(
shardCol, newColumns, newColumn, err := setupShardedIndex(
params.ctx,
params.EvalContext(),
&params.p.semaCtx,
params.SessionData().HashShardedIndexesEnabled,
&n.Columns,
n.Columns,
n.Sharded.ShardBuckets,
tableDesc,
&indexDesc,
Expand All @@ -225,6 +225,7 @@ func MakeIndexDescriptor(
return nil, err
}
if newColumn {
n.Columns = newColumns
if err := params.p.setupFamilyAndConstraintForShard(params.ctx, tableDesc, shardCol,
indexDesc.Sharded.ColumnNames, indexDesc.Sharded.ShardBuckets); err != nil {
return nil, err
Expand Down Expand Up @@ -290,41 +291,41 @@ func setupShardedIndex(
evalCtx *tree.EvalContext,
semaCtx *tree.SemaContext,
shardedIndexEnabled bool,
columns *tree.IndexElemList,
columns tree.IndexElemList,
bucketsExpr tree.Expr,
tableDesc *tabledesc.Mutable,
indexDesc *descpb.IndexDescriptor,
isNewTable bool,
) (shard *descpb.ColumnDescriptor, newColumn bool, err error) {
) (shard *descpb.ColumnDescriptor, newColumns tree.IndexElemList, newColumn bool, err error) {
if !shardedIndexEnabled {
return nil, false, hashShardedIndexesDisabledError
return nil, nil, false, hashShardedIndexesDisabledError
}

colNames := make([]string, 0, len(*columns))
for _, c := range *columns {
colNames := make([]string, 0, len(columns))
for _, c := range columns {
colNames = append(colNames, string(c.Column))
}
buckets, err := tabledesc.EvalShardBucketCount(ctx, semaCtx, evalCtx, bucketsExpr)
if err != nil {
return nil, false, err
return nil, nil, false, err
}
shardCol, newColumn, err := maybeCreateAndAddShardCol(int(buckets), tableDesc,
colNames, isNewTable)
if err != nil {
return nil, false, err
return nil, nil, false, err
}
shardIdxElem := tree.IndexElem{
Column: tree.Name(shardCol.Name),
Direction: tree.Ascending,
}
*columns = append(tree.IndexElemList{shardIdxElem}, *columns...)
newColumns = append(tree.IndexElemList{shardIdxElem}, columns...)
indexDesc.Sharded = descpb.ShardedDescriptor{
IsSharded: true,
Name: shardCol.Name,
ShardBuckets: buckets,
ColumnNames: colNames,
}
return shardCol, newColumn, nil
return shardCol, newColumns, newColumn, nil
}

// maybeCreateAndAddShardCol adds a new hidden computed shard column (or its mutation) to
Expand Down Expand Up @@ -437,7 +438,7 @@ func (n *createIndexNode) startExec(params runParams) error {
}
}

indexDesc, err := MakeIndexDescriptor(params, n.n, n.tableDesc)
indexDesc, err := MakeIndexDescriptor(params, *n.n, n.tableDesc)
if err != nil {
return err
}
Expand Down
34 changes: 21 additions & 13 deletions pkg/sql/create_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -1877,36 +1877,38 @@ func NewTableDesc(
}
}

setupShardedIndexForNewTable := func(d *tree.IndexTableDef, idx *descpb.IndexDescriptor) error {
setupShardedIndexForNewTable := func(
d tree.IndexTableDef, idx *descpb.IndexDescriptor,
) (columns tree.IndexElemList, _ error) {
if n.PartitionByTable.ContainsPartitions() {
return pgerror.New(pgcode.FeatureNotSupported, "sharded indexes don't support partitioning")
return nil, pgerror.New(pgcode.FeatureNotSupported, "sharded indexes don't support partitioning")
}
shardCol, newColumn, err := setupShardedIndex(
shardCol, newColumns, newColumn, err := setupShardedIndex(
ctx,
evalCtx,
semaCtx,
sessionData.HashShardedIndexesEnabled,
&d.Columns,
d.Columns,
d.Sharded.ShardBuckets,
&desc,
idx,
true /* isNewTable */)
if err != nil {
return err
return nil, err
}
if newColumn {
buckets, err := tabledesc.EvalShardBucketCount(ctx, semaCtx, evalCtx, d.Sharded.ShardBuckets)
if err != nil {
return err
return nil, err
}
checkConstraint, err := makeShardCheckConstraintDef(&desc, int(buckets), shardCol)
if err != nil {
return err
return nil, err
}
n.Defs = append(n.Defs, checkConstraint)
columnDefaultExprs = append(columnDefaultExprs, nil)
}
return nil
return newColumns, nil
}

idxValidator := schemaexpr.MakeIndexPredicateValidator(ctx, n.Table, &desc, semaCtx)
Expand All @@ -1929,18 +1931,21 @@ func NewTableDesc(
if d.Inverted {
idx.Type = descpb.IndexDescriptor_INVERTED
}
columns := d.Columns
if d.Sharded != nil {
if d.Interleave != nil {
return nil, pgerror.New(pgcode.FeatureNotSupported, "interleaved indexes cannot also be hash sharded")
}
if isRegionalByRow {
return nil, hashShardedIndexesOnRegionalByRowError()
}
if err := setupShardedIndexForNewTable(d, &idx); err != nil {
var err error
columns, err = setupShardedIndexForNewTable(*d, &idx)
if err != nil {
return nil, err
}
}
if err := idx.FillColumns(d.Columns); err != nil {
if err := idx.FillColumns(columns); err != nil {
return nil, err
}
if d.Inverted {
Expand Down Expand Up @@ -2024,18 +2029,21 @@ func NewTableDesc(
StoreColumnNames: d.Storing.ToStrings(),
Version: indexEncodingVersion,
}
columns := d.Columns
if d.Sharded != nil {
if n.Interleave != nil && d.PrimaryKey {
return nil, pgerror.New(pgcode.FeatureNotSupported, "interleaved indexes cannot also be hash sharded")
}
if isRegionalByRow {
return nil, hashShardedIndexesOnRegionalByRowError()
}
if err := setupShardedIndexForNewTable(&d.IndexTableDef, &idx); err != nil {
var err error
columns, err = setupShardedIndexForNewTable(d.IndexTableDef, &idx)
if err != nil {
return nil, err
}
}
if err := idx.FillColumns(d.Columns); err != nil {
if err := idx.FillColumns(columns); err != nil {
return nil, err
}
// Specifying a partitioning on a PRIMARY KEY constraint should be disallowed by the
Expand Down Expand Up @@ -2096,7 +2104,7 @@ func NewTableDesc(
"interleave not supported in primary key constraint definition",
)
}
for _, c := range d.Columns {
for _, c := range columns {
primaryIndexColumnSet[string(c.Column)] = struct{}{}
}
}
Expand Down

0 comments on commit 3a45eae

Please sign in to comment.