Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
61784: sql: stop mutating AST in AlterPrimaryKey r=fqazi a=ajwerner

The code in #61345 added a test that, under stress, nicely revealed a bug.
The bug is that we're mutating the AST in place and then, on retries, we hit
an error. The fix is to not mutate the AST.

I wish I had a more comprehensive testing strategy to ensure this didn't
happen. On some level, we could clone the AST when passing it to various DDL
plan node constructors. That's very defensive but also probably fine. Another
thing that would be cool would be to just assert that after planning the AST
did not change.

Touches #60824.

Release note: None

61791: kv/kvserver: skip TestEagerReplication r=RaduBerinde a=adityamaru

Refs: #54646

Reason: flaky test

Generated by bin/skip-test.

Release justification: non-production code changes

Release note: None

61796: bazel: make `pkg/internal/rsg/yacc` test runnable in Bazel r=rickystewart a=rickystewart

This test was missing a dependency on `sql.y`.

Release note: None

61797: bazel: make `pkg/sql/parser` test runnable in Bazel r=rickystewart a=rickystewart

There was a typo in the `genrule` definition here.

Release note: None

Co-authored-by: Andrew Werner <[email protected]>
Co-authored-by: Aditya Maru <[email protected]>
Co-authored-by: Ricky Stewart <[email protected]>
  • Loading branch information
4 people committed Mar 10, 2021
5 parents cd96b86 + dd32aa8 + 2d28a86 + 3457f76 + 2ef08dd commit a885cb9
Show file tree
Hide file tree
Showing 9 changed files with 71 additions and 40 deletions.
7 changes: 5 additions & 2 deletions pkg/internal/rsg/yacc/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,10 @@ go_test(
name = "yacc_test",
size = "small",
srcs = ["parse_test.go"],
data = ["//pkg/sql/parser:sql.y"],
embed = [":yacc"],
tags = ["broken_in_bazel"],
deps = ["//pkg/util/log"],
deps = [
"//pkg/build/bazel",
"//pkg/util/log",
],
)
15 changes: 14 additions & 1 deletion pkg/internal/rsg/yacc/parse_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,24 @@ import (
"io/ioutil"
"testing"

"github.com/cockroachdb/cockroach/pkg/build/bazel"
// Needed for the -verbosity flag on circleci tests.
_ "github.com/cockroachdb/cockroach/pkg/util/log"
)

const sqlYPath = "../../../sql/parser/sql.y"
var sqlYPath string

func init() {
if bazel.BuiltWithBazel() {
runfile, err := bazel.Runfile("pkg/sql/parser/sql.y")
if err != nil {
panic(err)
}
sqlYPath = runfile
} else {
sqlYPath = "../../../sql/parser/sql.y"
}
}

func TestLex(t *testing.T) {
b, err := ioutil.ReadFile(sqlYPath)
Expand Down
2 changes: 2 additions & 0 deletions pkg/kv/kvserver/replicate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
Expand All @@ -29,6 +30,7 @@ import (

func TestEagerReplication(t *testing.T) {
defer leaktest.AfterTest(t)()
skip.WithIssue(t, 54646, "flaky test")
defer log.Scope(t).Close(t)

ctx := context.Background()
Expand Down
9 changes: 5 additions & 4 deletions pkg/sql/alter_primary_key.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ type alterPrimaryKeyLocalitySwap struct {
func (p *planner) AlterPrimaryKey(
ctx context.Context,
tableDesc *tabledesc.Mutable,
alterPKNode *tree.AlterTableAlterPrimaryKey,
alterPKNode tree.AlterTableAlterPrimaryKey,
alterPrimaryKeyLocalitySwap *alterPrimaryKeyLocalitySwap,
) error {
if alterPKNode.Interleave != nil {
Expand Down Expand Up @@ -153,7 +153,7 @@ func (p *planner) AlterPrimaryKey(
// primary index, which would mean nothing needs to be modified
// here.
{
requiresIndexChange, err := p.shouldCreateIndexes(ctx, tableDesc, alterPKNode, alterPrimaryKeyLocalitySwap)
requiresIndexChange, err := p.shouldCreateIndexes(ctx, tableDesc, &alterPKNode, alterPrimaryKeyLocalitySwap)
if err != nil {
return err
}
Expand Down Expand Up @@ -190,12 +190,12 @@ func (p *planner) AlterPrimaryKey(
// If the new index is requested to be sharded, set up the index descriptor
// to be sharded, and add the new shard column if it is missing.
if alterPKNode.Sharded != nil {
shardCol, newColumn, err := setupShardedIndex(
shardCol, newColumns, newColumn, err := setupShardedIndex(
ctx,
p.EvalContext(),
&p.semaCtx,
p.SessionData().HashShardedIndexesEnabled,
&alterPKNode.Columns,
alterPKNode.Columns,
alterPKNode.Sharded.ShardBuckets,
tableDesc,
newPrimaryIndexDesc,
Expand All @@ -204,6 +204,7 @@ func (p *planner) AlterPrimaryKey(
if err != nil {
return err
}
alterPKNode.Columns = newColumns
if newColumn {
if err := p.setupFamilyAndConstraintForShard(
ctx,
Expand Down
4 changes: 2 additions & 2 deletions pkg/sql/alter_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ func (n *alterTableNode) startExec(params runParams) error {
if err := params.p.AlterPrimaryKey(
params.ctx,
n.tableDesc,
alterPK,
*alterPK,
nil, /* localityConfigSwap */
); err != nil {
return err
Expand Down Expand Up @@ -373,7 +373,7 @@ func (n *alterTableNode) startExec(params runParams) error {
if err := params.p.AlterPrimaryKey(
params.ctx,
n.tableDesc,
t,
*t,
nil, /* localityConfigSwap */
); err != nil {
return err
Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/alter_table_locality.go
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ func (n *alterTableSetLocalityNode) alterTableLocalityFromOrToRegionalByRow(
if err := params.p.AlterPrimaryKey(
params.ctx,
n.tableDesc,
&tree.AlterTableAlterPrimaryKey{
tree.AlterTableAlterPrimaryKey{
Name: tree.Name(n.tableDesc.PrimaryIndex.Name),
Columns: cols,
},
Expand Down
35 changes: 20 additions & 15 deletions pkg/sql/create_index.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ func (p *planner) setupFamilyAndConstraintForShard(
// is hash sharded. Note that `tableDesc` will be modified when this method is called for
// a hash sharded index.
func MakeIndexDescriptor(
params runParams, n *tree.CreateIndex, tableDesc *tabledesc.Mutable,
params runParams, n tree.CreateIndex, tableDesc *tabledesc.Mutable,
) (*descpb.IndexDescriptor, error) {
// Ensure that the columns we want to index exist before trying to create the
// index.
Expand Down Expand Up @@ -200,7 +200,7 @@ func MakeIndexDescriptor(
}
telemetry.Inc(sqltelemetry.InvertedIndexCounter)
}

columns := n.Columns
if n.Sharded != nil {
if n.PartitionByIndex.ContainsPartitions() {
return nil, pgerror.New(pgcode.FeatureNotSupported, "sharded indexes don't support partitioning")
Expand All @@ -211,19 +211,20 @@ func MakeIndexDescriptor(
if n.Interleave != nil {
return nil, pgerror.New(pgcode.FeatureNotSupported, "interleaved indexes cannot also be hash sharded")
}
shardCol, newColumn, err := setupShardedIndex(
shardCol, newColumns, newColumn, err := setupShardedIndex(
params.ctx,
params.EvalContext(),
&params.p.semaCtx,
params.SessionData().HashShardedIndexesEnabled,
&n.Columns,
n.Columns,
n.Sharded.ShardBuckets,
tableDesc,
&indexDesc,
false /* isNewTable */)
if err != nil {
return nil, err
}
columns = newColumns
if newColumn {
if err := params.p.setupFamilyAndConstraintForShard(params.ctx, tableDesc, shardCol,
indexDesc.Sharded.ColumnNames, indexDesc.Sharded.ShardBuckets); err != nil {
Expand All @@ -243,7 +244,7 @@ func MakeIndexDescriptor(
telemetry.Inc(sqltelemetry.PartialIndexCounter)
}

if err := indexDesc.FillColumns(n.Columns); err != nil {
if err := indexDesc.FillColumns(columns); err != nil {
return nil, err
}

Expand Down Expand Up @@ -285,46 +286,50 @@ func (n *createIndexNode) ReadingOwnWrites() {}
var hashShardedIndexesDisabledError = pgerror.Newf(pgcode.FeatureNotSupported,
"hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable")

// setupShardedIndex updates the index descriptor with the relevant new column.
// It also returns the column so it can be added to the table. It returns the
// new column set for the index. This set must be used regardless of whether or
// not there is a newly created column.
func setupShardedIndex(
ctx context.Context,
evalCtx *tree.EvalContext,
semaCtx *tree.SemaContext,
shardedIndexEnabled bool,
columns *tree.IndexElemList,
columns tree.IndexElemList,
bucketsExpr tree.Expr,
tableDesc *tabledesc.Mutable,
indexDesc *descpb.IndexDescriptor,
isNewTable bool,
) (shard *descpb.ColumnDescriptor, newColumn bool, err error) {
) (shard *descpb.ColumnDescriptor, newColumns tree.IndexElemList, newColumn bool, err error) {
if !shardedIndexEnabled {
return nil, false, hashShardedIndexesDisabledError
return nil, nil, false, hashShardedIndexesDisabledError
}

colNames := make([]string, 0, len(*columns))
for _, c := range *columns {
colNames := make([]string, 0, len(columns))
for _, c := range columns {
colNames = append(colNames, string(c.Column))
}
buckets, err := tabledesc.EvalShardBucketCount(ctx, semaCtx, evalCtx, bucketsExpr)
if err != nil {
return nil, false, err
return nil, nil, false, err
}
shardCol, newColumn, err := maybeCreateAndAddShardCol(int(buckets), tableDesc,
colNames, isNewTable)
if err != nil {
return nil, false, err
return nil, nil, false, err
}
shardIdxElem := tree.IndexElem{
Column: tree.Name(shardCol.Name),
Direction: tree.Ascending,
}
*columns = append(tree.IndexElemList{shardIdxElem}, *columns...)
newColumns = append(tree.IndexElemList{shardIdxElem}, columns...)
indexDesc.Sharded = descpb.ShardedDescriptor{
IsSharded: true,
Name: shardCol.Name,
ShardBuckets: buckets,
ColumnNames: colNames,
}
return shardCol, newColumn, nil
return shardCol, newColumns, newColumn, nil
}

// maybeCreateAndAddShardCol adds a new hidden computed shard column (or its mutation) to
Expand Down Expand Up @@ -437,7 +442,7 @@ func (n *createIndexNode) startExec(params runParams) error {
}
}

indexDesc, err := MakeIndexDescriptor(params, n.n, n.tableDesc)
indexDesc, err := MakeIndexDescriptor(params, *n.n, n.tableDesc)
if err != nil {
return err
}
Expand Down
34 changes: 21 additions & 13 deletions pkg/sql/create_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -1877,36 +1877,38 @@ func NewTableDesc(
}
}

setupShardedIndexForNewTable := func(d *tree.IndexTableDef, idx *descpb.IndexDescriptor) error {
setupShardedIndexForNewTable := func(
d tree.IndexTableDef, idx *descpb.IndexDescriptor,
) (columns tree.IndexElemList, _ error) {
if n.PartitionByTable.ContainsPartitions() {
return pgerror.New(pgcode.FeatureNotSupported, "sharded indexes don't support partitioning")
return nil, pgerror.New(pgcode.FeatureNotSupported, "sharded indexes don't support partitioning")
}
shardCol, newColumn, err := setupShardedIndex(
shardCol, newColumns, newColumn, err := setupShardedIndex(
ctx,
evalCtx,
semaCtx,
sessionData.HashShardedIndexesEnabled,
&d.Columns,
d.Columns,
d.Sharded.ShardBuckets,
&desc,
idx,
true /* isNewTable */)
if err != nil {
return err
return nil, err
}
if newColumn {
buckets, err := tabledesc.EvalShardBucketCount(ctx, semaCtx, evalCtx, d.Sharded.ShardBuckets)
if err != nil {
return err
return nil, err
}
checkConstraint, err := makeShardCheckConstraintDef(&desc, int(buckets), shardCol)
if err != nil {
return err
return nil, err
}
n.Defs = append(n.Defs, checkConstraint)
columnDefaultExprs = append(columnDefaultExprs, nil)
}
return nil
return newColumns, nil
}

idxValidator := schemaexpr.MakeIndexPredicateValidator(ctx, n.Table, &desc, semaCtx)
Expand All @@ -1929,18 +1931,21 @@ func NewTableDesc(
if d.Inverted {
idx.Type = descpb.IndexDescriptor_INVERTED
}
columns := d.Columns
if d.Sharded != nil {
if d.Interleave != nil {
return nil, pgerror.New(pgcode.FeatureNotSupported, "interleaved indexes cannot also be hash sharded")
}
if isRegionalByRow {
return nil, hashShardedIndexesOnRegionalByRowError()
}
if err := setupShardedIndexForNewTable(d, &idx); err != nil {
var err error
columns, err = setupShardedIndexForNewTable(*d, &idx)
if err != nil {
return nil, err
}
}
if err := idx.FillColumns(d.Columns); err != nil {
if err := idx.FillColumns(columns); err != nil {
return nil, err
}
if d.Inverted {
Expand Down Expand Up @@ -2024,18 +2029,21 @@ func NewTableDesc(
StoreColumnNames: d.Storing.ToStrings(),
Version: indexEncodingVersion,
}
columns := d.Columns
if d.Sharded != nil {
if n.Interleave != nil && d.PrimaryKey {
return nil, pgerror.New(pgcode.FeatureNotSupported, "interleaved indexes cannot also be hash sharded")
}
if isRegionalByRow {
return nil, hashShardedIndexesOnRegionalByRowError()
}
if err := setupShardedIndexForNewTable(&d.IndexTableDef, &idx); err != nil {
var err error
columns, err = setupShardedIndexForNewTable(d.IndexTableDef, &idx)
if err != nil {
return nil, err
}
}
if err := idx.FillColumns(d.Columns); err != nil {
if err := idx.FillColumns(columns); err != nil {
return nil, err
}
// Specifying a partitioning on a PRIMARY KEY constraint should be disallowed by the
Expand Down Expand Up @@ -2096,7 +2104,7 @@ func NewTableDesc(
"interleave not supported in primary key constraint definition",
)
}
for _, c := range d.Columns {
for _, c := range columns {
primaryIndexColumnSet[string(c.Column)] = struct{}{}
}
}
Expand Down
3 changes: 1 addition & 2 deletions pkg/sql/parser/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ go_test(
],
data = glob(["testdata/**"]),
embed = [":parser"],
tags = ["broken_in_bazel"],
deps = [
"//pkg/sql/lex",
"//pkg/sql/pgwire/pgerror",
Expand Down Expand Up @@ -108,7 +107,7 @@ genrule(
],
outs = ["helpmap_test.go"],
cmd = """
$(location :help-gen-test) < $< >$@
$(location :help-gen-test) < $< >$@.tmp
mv -f [email protected] $@
""",
tools = [
Expand Down

0 comments on commit a885cb9

Please sign in to comment.