diff --git a/pkg/cli/interactive_tests/test_demo_partitioning.tcl b/pkg/cli/interactive_tests/test_demo_partitioning.tcl.disabled similarity index 100% rename from pkg/cli/interactive_tests/test_demo_partitioning.tcl rename to pkg/cli/interactive_tests/test_demo_partitioning.tcl.disabled diff --git a/pkg/cmd/roachtest/cluster.go b/pkg/cmd/roachtest/cluster.go index 40ff107bfec1..c9dcfe6cb34a 100644 --- a/pkg/cmd/roachtest/cluster.go +++ b/pkg/cmd/roachtest/cluster.go @@ -853,21 +853,32 @@ func (s *clusterSpec) args() []string { machineTypeArg := machineTypeFlag(machineType) + "=" + machineType args = append(args, machineTypeArg) } - if s.Zones != "" { - switch cloud { - case gce: - if s.Geo { - args = append(args, "--gce-zones="+s.Zones) - } else { - args = append(args, "--gce-zones="+firstZone(s.Zones)) + + if !local { + zones := s.Zones + if zones == "" { + zones = zonesF + } + if zones != "" { + if !s.Geo { + zones = firstZone(zones) } - case azure: - args = append(args, "--azure-locations="+s.Zones) - default: - fmt.Fprintf(os.Stderr, "specifying zones is not yet supported on %s", cloud) - os.Exit(1) + var arg string + switch cloud { + case aws: + arg = "--aws-zones=" + zones + case gce: + arg = "--gce-zones=" + zones + case azure: + arg = "--azure-locations=" + zones + default: + fmt.Fprintf(os.Stderr, "specifying zones is not yet supported on %s", cloud) + os.Exit(1) + } + args = append(args, arg) } } + if s.Geo { args = append(args, "--geo") } @@ -1175,13 +1186,6 @@ func (f *clusterFactory) newCluster( sargs := []string{roachprod, "create", c.name, "-n", fmt.Sprint(c.spec.NodeCount)} sargs = append(sargs, cfg.spec.args()...) - if !local && zonesF != "" && cfg.spec.Zones == "" { - if cfg.spec.Geo { - sargs = append(sargs, "--gce-zones="+zonesF) - } else { - sargs = append(sargs, "--gce-zones="+firstZone(zonesF)) - } - } if !cfg.useIOBarrier { sargs = append(sargs, "--local-ssd-no-ext4-barrier") } diff --git a/pkg/sql/alter_table.go b/pkg/sql/alter_table.go index 12431aeb7dc5..8394179a2f30 100644 --- a/pkg/sql/alter_table.go +++ b/pkg/sql/alter_table.go @@ -334,7 +334,7 @@ func (n *alterTableNode) startExec(params runParams) error { return err } - if err := params.p.dropSequencesOwnedByCol(params.ctx, colToDrop); err != nil { + if err := params.p.dropSequencesOwnedByCol(params.ctx, colToDrop, true /* queueJob */); err != nil { return err } diff --git a/pkg/sql/drop_table.go b/pkg/sql/drop_table.go index 22b616620230..095952992f17 100644 --- a/pkg/sql/drop_table.go +++ b/pkg/sql/drop_table.go @@ -291,7 +291,7 @@ func (p *planner) dropTableImpl( // Drop sequences that the columns of the table own for _, col := range tableDesc.Columns { - if err := p.dropSequencesOwnedByCol(ctx, &col); err != nil { + if err := p.dropSequencesOwnedByCol(ctx, &col, queueJob); err != nil { return droppedViews, err } } @@ -338,7 +338,7 @@ func (p *planner) initiateDropTable( drainName bool, ) error { if tableDesc.Dropped() { - return fmt.Errorf("table %q is being dropped", tableDesc.Name) + return errors.Errorf("table %q is already being dropped", tableDesc.Name) } // If the table is not interleaved , use the delayed GC mechanism to diff --git a/pkg/sql/logictest/testdata/logic_test/drop_index b/pkg/sql/logictest/testdata/logic_test/drop_index index cb299e3e3da1..ac4a58072297 100644 --- a/pkg/sql/logictest/testdata/logic_test/drop_index +++ b/pkg/sql/logictest/testdata/logic_test/drop_index @@ -297,3 +297,19 @@ CREATE TABLE drop_index_test(a int); CREATE INDEX drop_index_test_index ON drop_ ---- NOTICE: the data for dropped indexes is reclaimed asynchronously HINT: The reclamation delay can be customized in the zone configuration for the table. + +# test correct error reporting from NewUniquenessConstraintViolationError; see #46376 +subtest new_uniqueness_constraint_error + +statement ok +CREATE TABLE t (a INT PRIMARY KEY, b DECIMAL(10,1) NOT NULL DEFAULT(0), UNIQUE INDEX t_secondary (b), FAMILY (a, b)); +INSERT INTO t VALUES (100, 500.5); + +statement ok +BEGIN; +DROP INDEX t_secondary CASCADE; +ALTER TABLE t DROP COLUMN b; +INSERT INTO t SELECT a + 1 FROM t; + +statement error pq: duplicate key value +UPSERT INTO t SELECT a + 1 FROM t; diff --git a/pkg/sql/logictest/testdata/logic_test/json_builtins b/pkg/sql/logictest/testdata/logic_test/json_builtins index 78fb0c536eb8..b96807f00762 100644 --- a/pkg/sql/logictest/testdata/logic_test/json_builtins +++ b/pkg/sql/logictest/testdata/logic_test/json_builtins @@ -324,29 +324,11 @@ SELECT to_json(x.*) FROM (VALUES (1,2)) AS x(a); ---- {"a": 1, "column2": 2} -# TODO(#44465): Implement the test cases below to be compatible with Postgres -# and delete this one query T SELECT to_json(x.*) FROM (VALUES (1,2)) AS x(column2); ---- {"column2": 2} -# Odd, but postgres-compatible -# query T -# SELECT to_json(x.*) FROM (VALUES (1,2)) AS x(a,a); -# ---- -# {"a": 1, "a": 2} - -# query T -# SELECT to_json(x.*) FROM (VALUES (1,2)) AS x(column1); -# ---- -# {"column1": 1, "column2": 2} - -# query T -# SELECT to_json(x.*) FROM (VALUES (1,2)) AS x(column2); -# ---- -# {"column2": 1, "column2": 2} - # Regression test for #39502. statement ok SELECT json_agg((3808362714,)) diff --git a/pkg/sql/logictest/testdata/logic_test/sequences b/pkg/sql/logictest/testdata/logic_test/sequences index 2bf2292603f0..46d72702117f 100644 --- a/pkg/sql/logictest/testdata/logic_test/sequences +++ b/pkg/sql/logictest/testdata/logic_test/sequences @@ -1,3 +1,4 @@ +# LogicTest: !3node-tenant(50840) # see also files `drop_sequence`, `alter_sequence`, `rename_sequence` # USING THE `lastval` FUNCTION @@ -1095,3 +1096,89 @@ DROP SEQUENCE seq_50649 statement ok DROP TABLE t_50649 + +subtest regression_50712 + +statement ok +CREATE DATABASE db_50712 + +statement ok +CREATE TABLE db_50712.t_50712(a INT PRIMARY KEY) + +statement ok +CREATE SEQUENCE db_50712.seq_50712 OWNED BY db_50712.t_50712.a + +statement ok +DROP DATABASE db_50712 CASCADE + +# Same test like above, except the table is lexicographically less than the +# sequence, which results in drop database dropping the table before the +# sequence. +statement ok +CREATE DATABASE db_50712 + +statement ok +CREATE TABLE db_50712.a_50712(a INT PRIMARY KEY) + +statement ok +CREATE SEQUENCE db_50712.seq_50712 OWNED BY db_50712.a_50712.a + +statement ok +DROP DATABASE db_50712 CASCADE + +# Same test like above, except the db is switched as the current db +statement ok +CREATE DATABASE db_50712 + +statement ok +SET DATABASE = db_50712 + +statement ok +CREATE TABLE a_50712(a INT PRIMARY KEY) + +statement ok +CREATE SEQUENCE seq_50712 OWNED BY a_50712.a + +statement ok +DROP DATABASE db_50712 + +statement ok +SET DATABASE = test + +# Tests db drop. +# Sequence: outside db. +# Owner: inside db. +# The sequence should be automatically dropped. +statement ok +CREATE DATABASE db_50712 + +statement ok +CREATE TABLE db_50712.t_50712(a INT PRIMARY KEY) + +statement ok +CREATE SEQUENCE seq_50712 OWNED BY db_50712.t_50712.a + +statement ok +DROP DATABASE db_50712 CASCADE + +statement error pq: relation "seq_50712" does not exist +SELECT * FROM seq_50712 + +# Tests db drop. +# Sequence: inside db +# Owner: outside db +# It should be possible to drop the table later. +statement ok +CREATE DATABASE db_50712 + +statement ok +CREATE TABLE t_50712(a INT PRIMARY KEY) + +statement ok +CREATE SEQUENCE db_50712.seq_50712 OWNED BY t_50712.a + +statement ok +DROP DATABASE db_50712 CASCADE + +statement ok +DROP TABLE t_50712 diff --git a/pkg/sql/row/errors.go b/pkg/sql/row/errors.go index 2457cab00a3a..6e5afe2cda87 100644 --- a/pkg/sql/row/errors.go +++ b/pkg/sql/row/errors.go @@ -123,7 +123,11 @@ func NewUniquenessConstraintViolationError( &sqlbase.DatumAlloc{}, tableArgs, ); err != nil { - return err + return pgerror.Newf(pgcode.UniqueViolation, + "duplicate key value (%s)=(%v) violates unique constraint %q", + strings.Join(index.ColumnNames, ","), + errors.Wrapf(err, "couldn't fetch value"), + index.Name) } f := singleKVFetcher{kvs: [1]roachpb.KeyValue{{Key: key}}} if value != nil { diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go index dbb876c1ddab..a01cef2e2eb7 100644 --- a/pkg/sql/sem/builtins/builtins.go +++ b/pkg/sql/sem/builtins/builtins.go @@ -2774,6 +2774,8 @@ may increase either contention or retry errors, or both.`, })), // JSON functions. + // The behavior of both the JSON and JSONB data types in CockroachDB is + // similar to the behavior of the JSONB data type in Postgres. "json_to_recordset": makeBuiltin(tree.FunctionProperties{UnsupportedWithIssue: 33285, Category: categoryJSON}), "jsonb_to_recordset": makeBuiltin(tree.FunctionProperties{UnsupportedWithIssue: 33285, Category: categoryJSON}), diff --git a/pkg/sql/sequence.go b/pkg/sql/sequence.go index 84868b7c40f9..6c86d6c4157a 100644 --- a/pkg/sql/sequence.go +++ b/pkg/sql/sequence.go @@ -335,6 +335,11 @@ func removeSequenceOwnerIfExists( if err != nil { return err } + // If the table descriptor has already been dropped, there is no need to + // remove the reference. + if tableDesc.Dropped() { + return nil + } col, err := tableDesc.FindColumnByID(opts.SequenceOwner.OwnerColumnID) if err != nil { return err @@ -469,17 +474,21 @@ func maybeAddSequenceDependencies( // dropSequencesOwnedByCol drops all the sequences from col.OwnsSequenceIDs. // Called when the respective column (or the whole table) is being dropped. func (p *planner) dropSequencesOwnedByCol( - ctx context.Context, col *sqlbase.ColumnDescriptor, + ctx context.Context, col *sqlbase.ColumnDescriptor, queueJob bool, ) error { for _, sequenceID := range col.OwnsSequenceIds { seqDesc, err := p.Tables().GetMutableTableVersionByID(ctx, sequenceID, p.txn) if err != nil { return err } + // This sequence is already getting dropped. Don't do it twice. + if seqDesc.Dropped() { + continue + } jobDesc := fmt.Sprintf("removing sequence %q dependent on column %q which is being dropped", seqDesc.Name, col.ColName()) if err := p.dropSequenceImpl( - ctx, seqDesc, true /* queueJob */, jobDesc, tree.DropRestrict, + ctx, seqDesc, queueJob, jobDesc, tree.DropRestrict, ); err != nil { return err } diff --git a/pkg/sql/table.go b/pkg/sql/table.go index 899f17a6815d..e5300281933b 100644 --- a/pkg/sql/table.go +++ b/pkg/sql/table.go @@ -12,7 +12,6 @@ package sql import ( "context" - "fmt" "strings" "github.com/cockroachdb/cockroach/pkg/clusterversion" @@ -198,7 +197,8 @@ func (p *planner) writeSchemaChange( } if tableDesc.Dropped() { // We don't allow schema changes on a dropped table. - return fmt.Errorf("table %q is being dropped", tableDesc.Name) + return errors.Errorf("no schema changes allowed on table %q as it is being dropped", + tableDesc.Name) } if err := p.createOrUpdateSchemaChangeJob(ctx, tableDesc, jobDesc, mutationID); err != nil { return err @@ -214,7 +214,8 @@ func (p *planner) writeSchemaChangeToBatch( } if tableDesc.Dropped() { // We don't allow schema changes on a dropped table. - return fmt.Errorf("table %q is being dropped", tableDesc.Name) + return errors.Errorf("no schema changes allowed on table %q as it is being dropped", + tableDesc.Name) } return p.writeTableDescToBatch(ctx, tableDesc, b) }