From dc91331a15d34dbe613869a31b8c276810e01111 Mon Sep 17 00:00:00 2001 From: Oliver Tan Date: Thu, 4 Mar 2021 13:31:14 +1100 Subject: [PATCH 1/3] sql: block interleaved tables with REGIONAL BY ROW Release justification: Low-risk change to new functionality. Release note: None --- .../testdata/logic_test/alter_table_locality | 10 +++++++++ .../testdata/logic_test/regional_by_row | 22 +++++++++++++++---- pkg/sql/alter_table_locality.go | 4 ++++ pkg/sql/create_table.go | 13 +++++++++++ 4 files changed, 45 insertions(+), 4 deletions(-) diff --git a/pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality b/pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality index 9460086d2c30..12e4c218598d 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality +++ b/pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality @@ -2627,3 +2627,13 @@ CREATE TABLE hash_sharded_idx_table ( statement error cannot convert a table to REGIONAL BY ROW if table table contains hash sharded indexes ALTER TABLE hash_sharded_idx_table SET LOCALITY REGIONAL BY ROW + +statement ok +CREATE TABLE parent_table (pk INT PRIMARY KEY); +CREATE TABLE cannot_interleave_regional_by_row ( + pk INT NOT NULL PRIMARY KEY +) +INTERLEAVE IN PARENT parent_table(pk) + +statement error interleaved tables are not compatible with REGIONAL BY ROW tables +ALTER TABLE cannot_interleave_regional_by_row SET LOCALITY REGIONAL BY ROW diff --git a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row index eff870ad39f2..9b682044a56e 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row +++ b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row @@ -94,6 +94,16 @@ CREATE TABLE regional_by_row_table ( ) LOCALITY REGIONAL BY ROW +statement ok +CREATE TABLE parent_table (pk INT PRIMARY KEY) + +statement error interleaved tables are not compatible with REGIONAL BY ROW tables +CREATE TABLE regional_by_row_table ( + pk INT NOT NULL PRIMARY KEY +) +INTERLEAVE IN PARENT parent_table(pk) +LOCALITY REGIONAL BY ROW + statement ok CREATE TABLE regional_by_row_table_explicit_crdb_region_column ( pk int PRIMARY KEY, @@ -432,6 +442,7 @@ query TTTTIT colnames SHOW TABLES ---- schema_name table_name type owner estimated_row_count locality +public parent_table table root 0 REGIONAL BY TABLE IN PRIMARY REGION public regional_by_row_table table root 0 REGIONAL BY ROW public regional_by_row_table_explicit_crdb_region_column table root 0 REGIONAL BY ROW @@ -465,6 +476,9 @@ CREATE INDEX bad_idx ON regional_by_row_table(a) USING HASH WITH BUCKET_COUNT = statement error hash sharded indexes are not compatible with REGIONAL BY ROW tables ALTER TABLE regional_by_row_table ALTER PRIMARY KEY USING COLUMNS(pk2) USING HASH WITH BUCKET_COUNT = 8 +statement error interleaved tables are not compatible with REGIONAL BY ROW tables +CREATE INDEX bad_idx ON regional_by_row_table(pk) INTERLEAVE IN PARENT parent_table(pk) + # Insert some rows into the regional_by_row_table. query TI INSERT INTO regional_by_row_table (pk, pk2, a, b, j) VALUES @@ -597,7 +611,7 @@ SELECT message FROM [SHOW KV TRACE FOR SESSION] WITH ORDINALITY OR message LIKE 'Scan%' ORDER BY ordinality ASC ---- -Scan /Table/69/1/"@"/1{-/#}, /Table/69/1/"\x80"/1{-/#}, /Table/69/1/"\xc0"/1{-/#} +Scan /Table/71/1/"@"/1{-/#}, /Table/71/1/"\x80"/1{-/#}, /Table/71/1/"\xc0"/1{-/#} fetched: /regional_by_row_table/primary/'ap-southeast-2'/1/pk2/a/b/j -> /1/2/3/'{"a": "b"}' output row: [1 1 2 3 '{"a": "b"}'] @@ -636,7 +650,7 @@ SELECT message FROM [SHOW KV TRACE FOR SESSION] WITH ORDINALITY OR message LIKE 'Scan%' ORDER BY ordinality ASC ---- -Scan /Table/69/1/"@"/1{-/#} +Scan /Table/71/1/"@"/1{-/#} fetched: /regional_by_row_table/primary/'ap-southeast-2'/1/pk2/a/b/j -> /1/2/3/'{"a": "b"}' output row: [1 1 2 3 '{"a": "b"}'] @@ -651,8 +665,8 @@ SELECT message FROM [SHOW KV TRACE FOR SESSION] WITH ORDINALITY OR message LIKE 'Scan%' ORDER BY ordinality ASC ---- -Scan /Table/69/1/"@"/10{-/#} -Scan /Table/69/1/"\x80"/10{-/#}, /Table/69/1/"\xc0"/10{-/#} +Scan /Table/71/1/"@"/10{-/#} +Scan /Table/71/1/"\x80"/10{-/#}, /Table/71/1/"\xc0"/10{-/#} fetched: /regional_by_row_table/primary/'ca-central-1'/10/pk2/a/b -> /10/11/12 output row: [10 10 11 12 NULL] diff --git a/pkg/sql/alter_table_locality.go b/pkg/sql/alter_table_locality.go index c46725b5cc63..8d53f8f96067 100644 --- a/pkg/sql/alter_table_locality.go +++ b/pkg/sql/alter_table_locality.go @@ -247,6 +247,10 @@ func (n *alterTableSetLocalityNode) alterTableLocalityToRegionalByRow( primaryIndexColIdxStart = int(n.tableDesc.PrimaryIndex.Partitioning.NumImplicitColumns) } + if n.tableDesc.IsInterleaved() { + return interleaveOnRegionalByRowError() + } + for _, idx := range n.tableDesc.NonDropIndexes() { if idx.IsSharded() { return pgerror.New(pgcode.FeatureNotSupported, "cannot convert a table to REGIONAL BY ROW if table table contains hash sharded indexes") diff --git a/pkg/sql/create_table.go b/pkg/sql/create_table.go index 9ae07509154b..84496b5628db 100644 --- a/pkg/sql/create_table.go +++ b/pkg/sql/create_table.go @@ -1195,6 +1195,10 @@ func addInterleave( 7854, "unsupported shorthand %s", interleave.DropBehavior) } + if desc.IsLocalityRegionalByRow() { + return interleaveOnRegionalByRowError() + } + parentTable, err := resolver.ResolveExistingTableObject( ctx, vt, &interleave.Parent, tree.ObjectLookupFlagsWithRequiredTableKind(tree.ResolveRequireTableDesc), ) @@ -1571,6 +1575,11 @@ func NewTableDesc( ) } + // Check no interleaving is on the table. + if n.Interleave != nil { + return nil, interleaveOnRegionalByRowError() + } + // Check PARTITION BY is not set on anything partitionable, and also check // for the existence of the column to partition by. regionalByRowColExists := false @@ -2799,6 +2808,10 @@ func hashShardedIndexesOnRegionalByRowError() error { return pgerror.New(pgcode.FeatureNotSupported, "hash sharded indexes are not compatible with REGIONAL BY ROW tables") } +func interleaveOnRegionalByRowError() error { + return pgerror.New(pgcode.FeatureNotSupported, "interleaved tables are not compatible with REGIONAL BY ROW tables") +} + func checkClusterSupportsPartitionByAll(evalCtx *tree.EvalContext) error { if !evalCtx.Settings.Version.IsActive(evalCtx.Context, clusterversion.MultiRegionFeatures) { return pgerror.Newf( From b55e41efa472829d1e37b62298727be45c49a970 Mon Sep 17 00:00:00 2001 From: Oliver Tan Date: Fri, 5 Mar 2021 06:25:28 +1100 Subject: [PATCH 2/3] sql: fix UNIQUE column definition for REGIONAL BY ROW This was buggy as the implicitly added crdb_region column was not initialized before the UNIQUE definition was available. Instead, defer these index creations until after all column definitions have been initialized. Release justification: low-risk bug fix to new functionality Release note: None --- .../testdata/logic_test/regional_by_row | 45 ++++++++++++++ pkg/sql/create_table.go | 59 +++++++++++-------- 2 files changed, 81 insertions(+), 23 deletions(-) diff --git a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row index eff870ad39f2..f5d5ee8b2e99 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row +++ b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row @@ -1795,6 +1795,51 @@ DATABASE add_regions_in_txn ALTER DATABASE add_regions_in_txn CONFIGURE ZONE US voter_constraints = '[+region=ca-central-1]', lease_preferences = '[[+region=ca-central-1]]' +statement ok +CREATE TABLE regional_by_row_unique_in_column ( + a INT PRIMARY KEY, + b INT UNIQUE, + c INT, + FAMILY (a, b, c) +) LOCALITY REGIONAL BY ROW + +query TT +SHOW CREATE TABLE regional_by_row_unique_in_column +---- +regional_by_row_unique_in_column CREATE TABLE public.regional_by_row_unique_in_column ( + a INT8 NOT NULL, + b INT8 NULL, + c INT8 NULL, + crdb_region public.crdb_internal_region NOT VISIBLE NOT NULL DEFAULT default_to_database_primary_region(gateway_region())::public.crdb_internal_region, + CONSTRAINT "primary" PRIMARY KEY (a ASC), + UNIQUE INDEX regional_by_row_unique_in_column_b_key (b ASC), + FAMILY fam_0_a_b_c_crdb_region (a, b, c, crdb_region) +) LOCALITY REGIONAL BY ROW + +statement ok +CREATE TABLE regional_by_row_fk ( + d INT PRIMARY KEY, + e INT UNIQUE REFERENCES regional_by_row_unique_in_column(a), + f INT UNIQUE REFERENCES regional_by_row_unique_in_column(b), + FAMILY (d, e, f) +) LOCALITY REGIONAL BY ROW + +query TT +SHOW CREATE TABLE regional_by_row_fk +---- +regional_by_row_fk CREATE TABLE public.regional_by_row_fk ( + d INT8 NOT NULL, + e INT8 NULL, + f INT8 NULL, + crdb_region public.crdb_internal_region NOT VISIBLE NOT NULL DEFAULT default_to_database_primary_region(gateway_region())::public.crdb_internal_region, + CONSTRAINT "primary" PRIMARY KEY (d ASC), + CONSTRAINT fk_e_ref_regional_by_row_unique_in_column FOREIGN KEY (e) REFERENCES public.regional_by_row_unique_in_column(a), + CONSTRAINT fk_f_ref_regional_by_row_unique_in_column FOREIGN KEY (f) REFERENCES public.regional_by_row_unique_in_column(b), + UNIQUE INDEX regional_by_row_fk_e_key (e ASC), + UNIQUE INDEX regional_by_row_fk_f_key (f ASC), + FAMILY fam_0_d_e_f_crdb_region (d, e, f, crdb_region) +) LOCALITY REGIONAL BY ROW + statement ok CREATE DATABASE drop_regions PRIMARY REGION "ca-central-1" REGIONS "us-east-1", "ap-southeast-2"; USE drop_regions; diff --git a/pkg/sql/create_table.go b/pkg/sql/create_table.go index 9ae07509154b..cb57d31bf53e 100644 --- a/pkg/sql/create_table.go +++ b/pkg/sql/create_table.go @@ -1689,6 +1689,16 @@ func NewTableDesc( allowImplicitPartitioning := (sessionData != nil && sessionData.ImplicitColumnPartitioningEnabled) || (locality != nil && locality.LocalityLevel == tree.LocalityLevelRow) + // We defer index creation of implicit indexes in column definitions + // until after all columns have been initialized, in case there is + // an implicit index that will depend on a column that has not yet + // been initialized. + type implicitColumnDefIdx struct { + idx *descpb.IndexDescriptor + def *tree.ColumnTableDef + } + var implicitColumnDefIdxs []implicitColumnDefIdx + for i, def := range n.Defs { if d, ok := def.(*tree.ColumnTableDef); ok { // NewTableDesc is called sometimes with a nil SemaCtx (for example @@ -1780,29 +1790,7 @@ func NewTableDesc( if idx != nil { idx.Version = indexEncodingVersion - - // If it a non-primary index that is implicitly created, ensure partitioning - // for PARTITION ALL BY. - if desc.PartitionAllBy && !d.PrimaryKey.IsPrimaryKey { - var err error - *idx, err = CreatePartitioning( - ctx, - st, - evalCtx, - &desc, - *idx, - partitionAllBy, - nil, /* allowedNewColumnNames */ - allowImplicitPartitioning, - ) - if err != nil { - return nil, err - } - } - - if err := desc.AddIndex(*idx, d.PrimaryKey.IsPrimaryKey); err != nil { - return nil, err - } + implicitColumnDefIdxs = append(implicitColumnDefIdxs, implicitColumnDefIdx{idx: idx, def: d}) } if d.HasColumnFamily() { @@ -1817,6 +1805,31 @@ func NewTableDesc( } } + for _, implicitColumnDefIdx := range implicitColumnDefIdxs { + // If it is a non-primary index that is implicitly created, ensure + // partitioning for PARTITION ALL BY. + if desc.PartitionAllBy && !implicitColumnDefIdx.def.PrimaryKey.IsPrimaryKey { + var err error + *implicitColumnDefIdx.idx, err = CreatePartitioning( + ctx, + st, + evalCtx, + &desc, + *implicitColumnDefIdx.idx, + partitionAllBy, + nil, /* allowedNewColumnNames */ + allowImplicitPartitioning, + ) + if err != nil { + return nil, err + } + } + + if err := desc.AddIndex(*implicitColumnDefIdx.idx, implicitColumnDefIdx.def.PrimaryKey.IsPrimaryKey); err != nil { + return nil, err + } + } + // Now that we've constructed our columns, we pop into any of our computed // columns so that we can dequalify any column references. sourceInfo := colinfo.NewSourceInfoForSingleTable( From 556ea201465ae677b8c815c42aa35ba12a2bc8b5 Mon Sep 17 00:00:00 2001 From: Tharun Date: Wed, 3 Mar 2021 13:41:10 +0530 Subject: [PATCH 3/3] sql: add drop type to the prepared statement generator Release justification: fixes for high-priority or high-severity bugs in existing functionality Previously, prepare statement doesnt have support for drop type in optimizer, so it panic during execution as no flags are generated. This patch fixes this by adding dropType to the optimizer. Resolves #61226 Release note: none Signed-off-by: Tharun --- pkg/sql/drop_type.go | 51 ++++++++----------- pkg/sql/explain_test.go | 2 + pkg/sql/logictest/testdata/logic_test/prepare | 12 +++++ pkg/sql/plan_opt.go | 2 +- 4 files changed, 36 insertions(+), 31 deletions(-) diff --git a/pkg/sql/drop_type.go b/pkg/sql/drop_type.go index 93c69479e004..ae1d251fe0e7 100644 --- a/pkg/sql/drop_type.go +++ b/pkg/sql/drop_type.go @@ -28,14 +28,9 @@ import ( "github.com/cockroachdb/errors" ) -type typeToDrop struct { - desc *typedesc.Mutable - fqName string -} - type dropTypeNode struct { - n *tree.DropType - td map[descpb.ID]typeToDrop + n *tree.DropType + toDrop map[descpb.ID]*typedesc.Mutable } // Use to satisfy the linter. @@ -51,8 +46,8 @@ func (p *planner) DropType(ctx context.Context, n *tree.DropType) (planNode, err } node := &dropTypeNode{ - n: n, - td: make(map[descpb.ID]typeToDrop), + n: n, + toDrop: make(map[descpb.ID]*typedesc.Mutable), } if n.DropBehavior == tree.DropCascade { return nil, unimplemented.NewWithIssue(51480, "DROP TYPE CASCADE is not yet supported") @@ -67,7 +62,7 @@ func (p *planner) DropType(ctx context.Context, n *tree.DropType) (planNode, err continue } // If we've already seen this type, then skip it. - if _, ok := node.td[typeDesc.ID]; ok { + if _, ok := node.toDrop[typeDesc.ID]; ok { continue } switch typeDesc.Kind { @@ -105,23 +100,9 @@ func (p *planner) DropType(ctx context.Context, n *tree.DropType) (planNode, err if err := p.canDropTypeDesc(ctx, mutArrayDesc, n.DropBehavior); err != nil { return nil, err } - // Record these descriptors for deletion. - node.td[typeDesc.ID] = typeToDrop{ - desc: typeDesc, - fqName: tree.AsStringWithFQNames(name, p.Ann()), - } - arrayFQName, err := getTypeNameFromTypeDescriptor( - oneAtATimeSchemaResolver{ctx, p}, - mutArrayDesc, - ) - if err != nil { - return nil, err - } - node.td[mutArrayDesc.ID] = typeToDrop{ - desc: mutArrayDesc, - fqName: arrayFQName.FQString(), - } + node.toDrop[typeDesc.ID] = typeDesc + node.toDrop[mutArrayDesc.ID] = mutArrayDesc } return node, nil } @@ -148,13 +129,23 @@ func (p *planner) canDropTypeDesc( } func (n *dropTypeNode) startExec(params runParams) error { - for _, toDrop := range n.td { - typ, fqName := toDrop.desc, toDrop.fqName - if err := params.p.dropTypeImpl(params.ctx, typ, tree.AsStringWithFQNames(n.n, params.Ann()), true /* queueJob */); err != nil { + for _, typeDesc := range n.toDrop { + typeFQName, err := getTypeNameFromTypeDescriptor( + oneAtATimeSchemaResolver{params.ctx, params.p}, + typeDesc, + ) + if err != nil { return err } + err = params.p.dropTypeImpl(params.ctx, typeDesc, "dropping type "+typeFQName.FQString(), true /* queueJob */) + if err != nil { + return err + } + event := &eventpb.DropType{ + TypeName: typeFQName.FQString(), + } // Log a Drop Type event. - if err := params.p.logEvent(params.ctx, typ.ID, &eventpb.DropType{TypeName: fqName}); err != nil { + if err := params.p.logEvent(params.ctx, typeDesc.ID, event); err != nil { return err } } diff --git a/pkg/sql/explain_test.go b/pkg/sql/explain_test.go index d1ecceed43ab..2438a50a794d 100644 --- a/pkg/sql/explain_test.go +++ b/pkg/sql/explain_test.go @@ -37,6 +37,7 @@ func TestStatementReuses(t *testing.T) { `CREATE SEQUENCE s`, `CREATE INDEX woo ON a(b)`, `CREATE USER woo`, + `CREATE TYPE test as ENUM('a')`, } for _, s := range initStmts { @@ -54,6 +55,7 @@ func TestStatementReuses(t *testing.T) { `DROP SEQUENCE s`, `DROP VIEW v`, `DROP USER woo`, + `DROP TYPE test`, // Ditto ALTER first, so that erroneous side effects bork what's // below. diff --git a/pkg/sql/logictest/testdata/logic_test/prepare b/pkg/sql/logictest/testdata/logic_test/prepare index 2e0f639bc4c0..8fb45ae9b91a 100644 --- a/pkg/sql/logictest/testdata/logic_test/prepare +++ b/pkg/sql/logictest/testdata/logic_test/prepare @@ -1355,6 +1355,10 @@ CREATE TABLE greeting_table (x greeting NOT NULL, y INT, INDEX (x, y)) statement ok PREPARE enum_query AS SELECT x, y FROM greeting_table WHERE y = 2 +# Create prepared statement to drop type. +statement ok +PREPARE enum_drop AS DROP TYPE greeting + # Now alter the enum to have a new value. statement ok ALTER TYPE greeting ADD VALUE 'howdy' @@ -1368,3 +1372,11 @@ query TI EXECUTE enum_query ---- howdy 2 + +# drop table +statement ok +DROP TABLE greeting_table + +# drop the type using prepared statement. +statement ok +EXECUTE enum_drop diff --git a/pkg/sql/plan_opt.go b/pkg/sql/plan_opt.go index 55688430cc87..14e59c82378c 100644 --- a/pkg/sql/plan_opt.go +++ b/pkg/sql/plan_opt.go @@ -61,7 +61,7 @@ func (p *planner) prepareUsingOptimizer(ctx context.Context) (planFlags, error) *tree.CreateSequence, *tree.CreateStats, *tree.Deallocate, *tree.Discard, *tree.DropDatabase, *tree.DropIndex, - *tree.DropTable, *tree.DropView, *tree.DropSequence, + *tree.DropTable, *tree.DropView, *tree.DropSequence, *tree.DropType, *tree.Execute, *tree.Grant, *tree.GrantRole, *tree.Prepare,