Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
61394: sql: add drop type to the prepared statement generator r=rafiss a=tharun208

Release justification: fixes for high-priority or high-severity bugs in existing functionality

Previously, prepare statement doesn't have support for drop type
in the optimizer, so it panics during execution as no flags are generated.

This patch fixes this by adding `dropType` to the optimizer.

Resolves cockroachdb#61226

Release note: none

Signed-off-by: Tharun <[email protected]>

<img width="454" alt="Screen Shot 2021-03-03 at 2 39 29 PM" src="https://user-images.githubusercontent.com/14926492/109781995-6c2a8d80-7c2e-11eb-902e-e596aa4f2bb3.png">


61437: sql: block interleaved tables with REGIONAL BY ROW r=ajstorm a=otan

Release justification: Low-risk change to new functionality.

Release note: None

61449: sql: fix UNIQUE column definition for REGIONAL BY ROW r=rytaft a=otan

This was buggy as the implicitly added crdb_region column was not
initialized before the UNIQUE definition was available. Instead, defer
these index creations until after all column definitions have been
initialized.

Release justification: low-risk bug fix to new functionality

Release note: None

Co-authored-by: Tharun <[email protected]>
Co-authored-by: Oliver Tan <[email protected]>
  • Loading branch information
3 people committed Mar 4, 2021
4 parents a8d8f06 + 556ea20 + dc91331 + b55e41e commit 69ed26b
Show file tree
Hide file tree
Showing 8 changed files with 162 additions and 58 deletions.
10 changes: 10 additions & 0 deletions pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality
Original file line number Diff line number Diff line change
Expand Up @@ -2627,3 +2627,13 @@ CREATE TABLE hash_sharded_idx_table (

statement error cannot convert a table to REGIONAL BY ROW if table table contains hash sharded indexes
ALTER TABLE hash_sharded_idx_table SET LOCALITY REGIONAL BY ROW

statement ok
CREATE TABLE parent_table (pk INT PRIMARY KEY);
CREATE TABLE cannot_interleave_regional_by_row (
pk INT NOT NULL PRIMARY KEY
)
INTERLEAVE IN PARENT parent_table(pk)

statement error interleaved tables are not compatible with REGIONAL BY ROW tables
ALTER TABLE cannot_interleave_regional_by_row SET LOCALITY REGIONAL BY ROW
67 changes: 63 additions & 4 deletions pkg/ccl/logictestccl/testdata/logic_test/regional_by_row
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,16 @@ CREATE TABLE regional_by_row_table (
)
LOCALITY REGIONAL BY ROW

statement ok
CREATE TABLE parent_table (pk INT PRIMARY KEY)

statement error interleaved tables are not compatible with REGIONAL BY ROW tables
CREATE TABLE regional_by_row_table (
pk INT NOT NULL PRIMARY KEY
)
INTERLEAVE IN PARENT parent_table(pk)
LOCALITY REGIONAL BY ROW

statement ok
CREATE TABLE regional_by_row_table_explicit_crdb_region_column (
pk int PRIMARY KEY,
Expand Down Expand Up @@ -432,6 +442,7 @@ query TTTTIT colnames
SHOW TABLES
----
schema_name table_name type owner estimated_row_count locality
public parent_table table root 0 REGIONAL BY TABLE IN PRIMARY REGION
public regional_by_row_table table root 0 REGIONAL BY ROW
public regional_by_row_table_explicit_crdb_region_column table root 0 REGIONAL BY ROW

Expand Down Expand Up @@ -465,6 +476,9 @@ CREATE INDEX bad_idx ON regional_by_row_table(a) USING HASH WITH BUCKET_COUNT =
statement error hash sharded indexes are not compatible with REGIONAL BY ROW tables
ALTER TABLE regional_by_row_table ALTER PRIMARY KEY USING COLUMNS(pk2) USING HASH WITH BUCKET_COUNT = 8

statement error interleaved tables are not compatible with REGIONAL BY ROW tables
CREATE INDEX bad_idx ON regional_by_row_table(pk) INTERLEAVE IN PARENT parent_table(pk)

# Insert some rows into the regional_by_row_table.
query TI
INSERT INTO regional_by_row_table (pk, pk2, a, b, j) VALUES
Expand Down Expand Up @@ -597,7 +611,7 @@ SELECT message FROM [SHOW KV TRACE FOR SESSION] WITH ORDINALITY
OR message LIKE 'Scan%'
ORDER BY ordinality ASC
----
Scan /Table/69/1/"@"/1{-/#}, /Table/69/1/"\x80"/1{-/#}, /Table/69/1/"\xc0"/1{-/#}
Scan /Table/71/1/"@"/1{-/#}, /Table/71/1/"\x80"/1{-/#}, /Table/71/1/"\xc0"/1{-/#}
fetched: /regional_by_row_table/primary/'ap-southeast-2'/1/pk2/a/b/j -> /1/2/3/'{"a": "b"}'
output row: [1 1 2 3 '{"a": "b"}']

Expand Down Expand Up @@ -636,7 +650,7 @@ SELECT message FROM [SHOW KV TRACE FOR SESSION] WITH ORDINALITY
OR message LIKE 'Scan%'
ORDER BY ordinality ASC
----
Scan /Table/69/1/"@"/1{-/#}
Scan /Table/71/1/"@"/1{-/#}
fetched: /regional_by_row_table/primary/'ap-southeast-2'/1/pk2/a/b/j -> /1/2/3/'{"a": "b"}'
output row: [1 1 2 3 '{"a": "b"}']

Expand All @@ -651,8 +665,8 @@ SELECT message FROM [SHOW KV TRACE FOR SESSION] WITH ORDINALITY
OR message LIKE 'Scan%'
ORDER BY ordinality ASC
----
Scan /Table/69/1/"@"/10{-/#}
Scan /Table/69/1/"\x80"/10{-/#}, /Table/69/1/"\xc0"/10{-/#}
Scan /Table/71/1/"@"/10{-/#}
Scan /Table/71/1/"\x80"/10{-/#}, /Table/71/1/"\xc0"/10{-/#}
fetched: /regional_by_row_table/primary/'ca-central-1'/10/pk2/a/b -> /10/11/12
output row: [10 10 11 12 NULL]

Expand Down Expand Up @@ -1795,6 +1809,51 @@ DATABASE add_regions_in_txn ALTER DATABASE add_regions_in_txn CONFIGURE ZONE US
voter_constraints = '[+region=ca-central-1]',
lease_preferences = '[[+region=ca-central-1]]'

statement ok
CREATE TABLE regional_by_row_unique_in_column (
a INT PRIMARY KEY,
b INT UNIQUE,
c INT,
FAMILY (a, b, c)
) LOCALITY REGIONAL BY ROW

query TT
SHOW CREATE TABLE regional_by_row_unique_in_column
----
regional_by_row_unique_in_column CREATE TABLE public.regional_by_row_unique_in_column (
a INT8 NOT NULL,
b INT8 NULL,
c INT8 NULL,
crdb_region public.crdb_internal_region NOT VISIBLE NOT NULL DEFAULT default_to_database_primary_region(gateway_region())::public.crdb_internal_region,
CONSTRAINT "primary" PRIMARY KEY (a ASC),
UNIQUE INDEX regional_by_row_unique_in_column_b_key (b ASC),
FAMILY fam_0_a_b_c_crdb_region (a, b, c, crdb_region)
) LOCALITY REGIONAL BY ROW

statement ok
CREATE TABLE regional_by_row_fk (
d INT PRIMARY KEY,
e INT UNIQUE REFERENCES regional_by_row_unique_in_column(a),
f INT UNIQUE REFERENCES regional_by_row_unique_in_column(b),
FAMILY (d, e, f)
) LOCALITY REGIONAL BY ROW

query TT
SHOW CREATE TABLE regional_by_row_fk
----
regional_by_row_fk CREATE TABLE public.regional_by_row_fk (
d INT8 NOT NULL,
e INT8 NULL,
f INT8 NULL,
crdb_region public.crdb_internal_region NOT VISIBLE NOT NULL DEFAULT default_to_database_primary_region(gateway_region())::public.crdb_internal_region,
CONSTRAINT "primary" PRIMARY KEY (d ASC),
CONSTRAINT fk_e_ref_regional_by_row_unique_in_column FOREIGN KEY (e) REFERENCES public.regional_by_row_unique_in_column(a),
CONSTRAINT fk_f_ref_regional_by_row_unique_in_column FOREIGN KEY (f) REFERENCES public.regional_by_row_unique_in_column(b),
UNIQUE INDEX regional_by_row_fk_e_key (e ASC),
UNIQUE INDEX regional_by_row_fk_f_key (f ASC),
FAMILY fam_0_d_e_f_crdb_region (d, e, f, crdb_region)
) LOCALITY REGIONAL BY ROW

statement ok
CREATE DATABASE drop_regions PRIMARY REGION "ca-central-1" REGIONS "us-east-1", "ap-southeast-2";
USE drop_regions;
Expand Down
4 changes: 4 additions & 0 deletions pkg/sql/alter_table_locality.go
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,10 @@ func (n *alterTableSetLocalityNode) alterTableLocalityToRegionalByRow(
primaryIndexColIdxStart = int(n.tableDesc.PrimaryIndex.Partitioning.NumImplicitColumns)
}

if n.tableDesc.IsInterleaved() {
return interleaveOnRegionalByRowError()
}

for _, idx := range n.tableDesc.NonDropIndexes() {
if idx.IsSharded() {
return pgerror.New(pgcode.FeatureNotSupported, "cannot convert a table to REGIONAL BY ROW if table table contains hash sharded indexes")
Expand Down
72 changes: 49 additions & 23 deletions pkg/sql/create_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -1195,6 +1195,10 @@ func addInterleave(
7854, "unsupported shorthand %s", interleave.DropBehavior)
}

if desc.IsLocalityRegionalByRow() {
return interleaveOnRegionalByRowError()
}

parentTable, err := resolver.ResolveExistingTableObject(
ctx, vt, &interleave.Parent, tree.ObjectLookupFlagsWithRequiredTableKind(tree.ResolveRequireTableDesc),
)
Expand Down Expand Up @@ -1571,6 +1575,11 @@ func NewTableDesc(
)
}

// Check no interleaving is on the table.
if n.Interleave != nil {
return nil, interleaveOnRegionalByRowError()
}

// Check PARTITION BY is not set on anything partitionable, and also check
// for the existence of the column to partition by.
regionalByRowColExists := false
Expand Down Expand Up @@ -1689,6 +1698,16 @@ func NewTableDesc(
allowImplicitPartitioning := (sessionData != nil && sessionData.ImplicitColumnPartitioningEnabled) ||
(locality != nil && locality.LocalityLevel == tree.LocalityLevelRow)

// We defer index creation of implicit indexes in column definitions
// until after all columns have been initialized, in case there is
// an implicit index that will depend on a column that has not yet
// been initialized.
type implicitColumnDefIdx struct {
idx *descpb.IndexDescriptor
def *tree.ColumnTableDef
}
var implicitColumnDefIdxs []implicitColumnDefIdx

for i, def := range n.Defs {
if d, ok := def.(*tree.ColumnTableDef); ok {
// NewTableDesc is called sometimes with a nil SemaCtx (for example
Expand Down Expand Up @@ -1780,29 +1799,7 @@ func NewTableDesc(

if idx != nil {
idx.Version = indexEncodingVersion

// If it a non-primary index that is implicitly created, ensure partitioning
// for PARTITION ALL BY.
if desc.PartitionAllBy && !d.PrimaryKey.IsPrimaryKey {
var err error
*idx, err = CreatePartitioning(
ctx,
st,
evalCtx,
&desc,
*idx,
partitionAllBy,
nil, /* allowedNewColumnNames */
allowImplicitPartitioning,
)
if err != nil {
return nil, err
}
}

if err := desc.AddIndex(*idx, d.PrimaryKey.IsPrimaryKey); err != nil {
return nil, err
}
implicitColumnDefIdxs = append(implicitColumnDefIdxs, implicitColumnDefIdx{idx: idx, def: d})
}

if d.HasColumnFamily() {
Expand All @@ -1817,6 +1814,31 @@ func NewTableDesc(
}
}

for _, implicitColumnDefIdx := range implicitColumnDefIdxs {
// If it is a non-primary index that is implicitly created, ensure
// partitioning for PARTITION ALL BY.
if desc.PartitionAllBy && !implicitColumnDefIdx.def.PrimaryKey.IsPrimaryKey {
var err error
*implicitColumnDefIdx.idx, err = CreatePartitioning(
ctx,
st,
evalCtx,
&desc,
*implicitColumnDefIdx.idx,
partitionAllBy,
nil, /* allowedNewColumnNames */
allowImplicitPartitioning,
)
if err != nil {
return nil, err
}
}

if err := desc.AddIndex(*implicitColumnDefIdx.idx, implicitColumnDefIdx.def.PrimaryKey.IsPrimaryKey); err != nil {
return nil, err
}
}

// Now that we've constructed our columns, we pop into any of our computed
// columns so that we can dequalify any column references.
sourceInfo := colinfo.NewSourceInfoForSingleTable(
Expand Down Expand Up @@ -2799,6 +2821,10 @@ func hashShardedIndexesOnRegionalByRowError() error {
return pgerror.New(pgcode.FeatureNotSupported, "hash sharded indexes are not compatible with REGIONAL BY ROW tables")
}

func interleaveOnRegionalByRowError() error {
return pgerror.New(pgcode.FeatureNotSupported, "interleaved tables are not compatible with REGIONAL BY ROW tables")
}

func checkClusterSupportsPartitionByAll(evalCtx *tree.EvalContext) error {
if !evalCtx.Settings.Version.IsActive(evalCtx.Context, clusterversion.MultiRegionFeatures) {
return pgerror.Newf(
Expand Down
51 changes: 21 additions & 30 deletions pkg/sql/drop_type.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,9 @@ import (
"github.com/cockroachdb/errors"
)

type typeToDrop struct {
desc *typedesc.Mutable
fqName string
}

type dropTypeNode struct {
n *tree.DropType
td map[descpb.ID]typeToDrop
n *tree.DropType
toDrop map[descpb.ID]*typedesc.Mutable
}

// Use to satisfy the linter.
Expand All @@ -51,8 +46,8 @@ func (p *planner) DropType(ctx context.Context, n *tree.DropType) (planNode, err
}

node := &dropTypeNode{
n: n,
td: make(map[descpb.ID]typeToDrop),
n: n,
toDrop: make(map[descpb.ID]*typedesc.Mutable),
}
if n.DropBehavior == tree.DropCascade {
return nil, unimplemented.NewWithIssue(51480, "DROP TYPE CASCADE is not yet supported")
Expand All @@ -67,7 +62,7 @@ func (p *planner) DropType(ctx context.Context, n *tree.DropType) (planNode, err
continue
}
// If we've already seen this type, then skip it.
if _, ok := node.td[typeDesc.ID]; ok {
if _, ok := node.toDrop[typeDesc.ID]; ok {
continue
}
switch typeDesc.Kind {
Expand Down Expand Up @@ -105,23 +100,9 @@ func (p *planner) DropType(ctx context.Context, n *tree.DropType) (planNode, err
if err := p.canDropTypeDesc(ctx, mutArrayDesc, n.DropBehavior); err != nil {
return nil, err
}

// Record these descriptors for deletion.
node.td[typeDesc.ID] = typeToDrop{
desc: typeDesc,
fqName: tree.AsStringWithFQNames(name, p.Ann()),
}
arrayFQName, err := getTypeNameFromTypeDescriptor(
oneAtATimeSchemaResolver{ctx, p},
mutArrayDesc,
)
if err != nil {
return nil, err
}
node.td[mutArrayDesc.ID] = typeToDrop{
desc: mutArrayDesc,
fqName: arrayFQName.FQString(),
}
node.toDrop[typeDesc.ID] = typeDesc
node.toDrop[mutArrayDesc.ID] = mutArrayDesc
}
return node, nil
}
Expand All @@ -148,13 +129,23 @@ func (p *planner) canDropTypeDesc(
}

func (n *dropTypeNode) startExec(params runParams) error {
for _, toDrop := range n.td {
typ, fqName := toDrop.desc, toDrop.fqName
if err := params.p.dropTypeImpl(params.ctx, typ, tree.AsStringWithFQNames(n.n, params.Ann()), true /* queueJob */); err != nil {
for _, typeDesc := range n.toDrop {
typeFQName, err := getTypeNameFromTypeDescriptor(
oneAtATimeSchemaResolver{params.ctx, params.p},
typeDesc,
)
if err != nil {
return err
}
err = params.p.dropTypeImpl(params.ctx, typeDesc, "dropping type "+typeFQName.FQString(), true /* queueJob */)
if err != nil {
return err
}
event := &eventpb.DropType{
TypeName: typeFQName.FQString(),
}
// Log a Drop Type event.
if err := params.p.logEvent(params.ctx, typ.ID, &eventpb.DropType{TypeName: fqName}); err != nil {
if err := params.p.logEvent(params.ctx, typeDesc.ID, event); err != nil {
return err
}
}
Expand Down
2 changes: 2 additions & 0 deletions pkg/sql/explain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ func TestStatementReuses(t *testing.T) {
`CREATE SEQUENCE s`,
`CREATE INDEX woo ON a(b)`,
`CREATE USER woo`,
`CREATE TYPE test as ENUM('a')`,
}

for _, s := range initStmts {
Expand All @@ -54,6 +55,7 @@ func TestStatementReuses(t *testing.T) {
`DROP SEQUENCE s`,
`DROP VIEW v`,
`DROP USER woo`,
`DROP TYPE test`,

// Ditto ALTER first, so that erroneous side effects bork what's
// below.
Expand Down
12 changes: 12 additions & 0 deletions pkg/sql/logictest/testdata/logic_test/prepare
Original file line number Diff line number Diff line change
Expand Up @@ -1355,6 +1355,10 @@ CREATE TABLE greeting_table (x greeting NOT NULL, y INT, INDEX (x, y))
statement ok
PREPARE enum_query AS SELECT x, y FROM greeting_table WHERE y = 2

# Create prepared statement to drop type.
statement ok
PREPARE enum_drop AS DROP TYPE greeting

# Now alter the enum to have a new value.
statement ok
ALTER TYPE greeting ADD VALUE 'howdy'
Expand All @@ -1368,3 +1372,11 @@ query TI
EXECUTE enum_query
----
howdy 2

# drop table
statement ok
DROP TABLE greeting_table

# drop the type using prepared statement.
statement ok
EXECUTE enum_drop
2 changes: 1 addition & 1 deletion pkg/sql/plan_opt.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ func (p *planner) prepareUsingOptimizer(ctx context.Context) (planFlags, error)
*tree.CreateSequence,
*tree.CreateStats,
*tree.Deallocate, *tree.Discard, *tree.DropDatabase, *tree.DropIndex,
*tree.DropTable, *tree.DropView, *tree.DropSequence,
*tree.DropTable, *tree.DropView, *tree.DropSequence, *tree.DropType,
*tree.Execute,
*tree.Grant, *tree.GrantRole,
*tree.Prepare,
Expand Down

0 comments on commit 69ed26b

Please sign in to comment.