diff --git a/pkg/server/updates_test.go b/pkg/server/updates_test.go index 605ab9275434..a8212f83eacb 100644 --- a/pkg/server/updates_test.go +++ b/pkg/server/updates_test.go @@ -656,10 +656,6 @@ func TestReportUsage(t *testing.T) { `[false,false,false] ALTER TABLE _ CONFIGURE ZONE = _`, `[false,false,false] CREATE DATABASE _`, `[false,false,false] CREATE TABLE _ (_ INT, CONSTRAINT _ CHECK (_ > _))`, - `[false,false,false] INSERT INTO _ SELECT unnest(ARRAY[_, _, __more2__])`, - `[false,false,false] INSERT INTO _ VALUES (_), (__more2__)`, - `[false,false,false] INSERT INTO _ VALUES (length($1::STRING)), (__more1__)`, - `[false,false,false] INSERT INTO _(_, _) VALUES (_, _)`, `[false,false,false] SET CLUSTER SETTING "cluster.organization" = _`, `[false,false,false] SET CLUSTER SETTING "diagnostics.reporting.send_crash_reports" = _`, `[false,false,false] SET CLUSTER SETTING "server.time_until_store_dead" = _`, @@ -668,6 +664,10 @@ func TestReportUsage(t *testing.T) { `[false,false,false] SET application_name = _`, `[false,false,false] UPDATE _ SET _ = _ + _`, `[false,false,true] CREATE TABLE _ (_ INT PRIMARY KEY, _ INT, INDEX (_) INTERLEAVE IN PARENT _ (_))`, + `[true,false,false] INSERT INTO _ SELECT unnest(ARRAY[_, _, __more2__])`, + `[true,false,false] INSERT INTO _ VALUES (_), (__more2__)`, + `[true,false,false] INSERT INTO _ VALUES (length($1::STRING)), (__more1__)`, + `[true,false,false] INSERT INTO _(_, _) VALUES (_, _)`, `[true,false,false] SELECT (_, _, __more2__) = (SELECT _, _, _, _ FROM _ LIMIT _)`, `[true,false,true] SELECT _ / $1`, `[true,false,true] SELECT _ / _`, diff --git a/pkg/sql/data_source.go b/pkg/sql/data_source.go index f6d47c264860..cde401b09aaf 100644 --- a/pkg/sql/data_source.go +++ b/pkg/sql/data_source.go @@ -440,7 +440,8 @@ func (p *planner) getAliasedTableName(n tree.TableExpr) (*tree.TableName, *tree. if ate, ok := n.(*tree.AliasedTableExpr); ok { n = ate.Expr // It's okay to ignore the As columns here, as they're not permitted in - // DML aliases where this function is used. + // DML aliases where this function is used. The grammar does not allow + // them, so the parser would have reported an error if they were present. if ate.As.Alias != "" { alias = tree.NewUnqualifiedTableName(ate.As.Alias) } diff --git a/pkg/sql/insert.go b/pkg/sql/insert.go index 0bc7b3beac2a..0c020fc9f381 100644 --- a/pkg/sql/insert.go +++ b/pkg/sql/insert.go @@ -224,7 +224,7 @@ func (p *planner) Insert( // TODO(justin): this is too restrictive. It should // be possible to allow INSERT INTO (x) VALUES (DEFAULT) // if x is a computed column. See #22434. - return nil, sqlbase.CannotWriteToComputedColError(&insertCols[maxInsertIdx]) + return nil, sqlbase.CannotWriteToComputedColError(insertCols[maxInsertIdx].Name) } arityChecked = true } @@ -254,7 +254,7 @@ func (p *planner) Insert( return nil, err } if numExprs > maxInsertIdx { - return nil, sqlbase.CannotWriteToComputedColError(&insertCols[maxInsertIdx]) + return nil, sqlbase.CannotWriteToComputedColError(insertCols[maxInsertIdx].Name) } } diff --git a/pkg/sql/logictest/testdata/logic_test/builtin_function b/pkg/sql/logictest/testdata/logic_test/builtin_function index d3d8b467ea4c..0e30b8f72df0 100644 --- a/pkg/sql/logictest/testdata/logic_test/builtin_function +++ b/pkg/sql/logictest/testdata/logic_test/builtin_function @@ -1987,8 +1987,12 @@ SELECT pg_catalog.pg_encoding_to_char(6), pg_catalog.pg_encoding_to_char(7) ---- UTF8 NULL +# TODO(jordan): Restore this to original form by removing FROM +# clause once issue 32422 is fixed. query TITI SELECT pg_catalog.inet_client_addr(), pg_catalog.inet_client_port(), pg_catalog.inet_server_addr(), pg_catalog.inet_server_port() +FROM pg_class +WHERE relname = 'pg_constraint' ---- ::/0 0 ::/0 0 diff --git a/pkg/sql/logictest/testdata/logic_test/computed b/pkg/sql/logictest/testdata/logic_test/computed index 36951cb78a7f..f2903d39fbc6 100644 --- a/pkg/sql/logictest/testdata/logic_test/computed +++ b/pkg/sql/logictest/testdata/logic_test/computed @@ -722,7 +722,7 @@ CREATE TABLE error_check (k INT PRIMARY KEY, s STRING, i INT AS (s::INT) STORED) statement ok INSERT INTO error_check VALUES(1, '1') -statement error computed column i: +statement error could not parse "foo" as type int: strconv.ParseInt INSERT INTO error_check VALUES(2, 'foo') statement error computed column i: diff --git a/pkg/sql/logictest/testdata/logic_test/optimizer b/pkg/sql/logictest/testdata/logic_test/optimizer index 248cdcefe20e..0e655b148dee 100644 --- a/pkg/sql/logictest/testdata/logic_test/optimizer +++ b/pkg/sql/logictest/testdata/logic_test/optimizer @@ -97,8 +97,8 @@ CREATE SEQUENCE seq statement ok SET OPTIMIZER = ALWAYS -query error pq: unsupported statement: \*tree\.Insert -INSERT INTO test (k, v) VALUES (5, 50) +query error pq: unsupported statement: \*tree\.Delete +DELETE FROM test WHERE k=5 # Don't fall back to heuristic planner in ALWAYS mode. query error pq: aggregates with FILTER are not supported yet diff --git a/pkg/sql/logictest/testdata/logic_test/pgoidtype b/pkg/sql/logictest/testdata/logic_test/pgoidtype index 1bd690293947..0ebef3e7adc2 100644 --- a/pkg/sql/logictest/testdata/logic_test/pgoidtype +++ b/pkg/sql/logictest/testdata/logic_test/pgoidtype @@ -40,26 +40,42 @@ SELECT pg_typeof('upper'::REGPROC), pg_typeof('upper'::REGPROCEDURE), pg_typeof( ---- regproc regprocedure regtype +# TODO(jordan): Restore this to original form by removing FROM +# clause once issue 32422 is fixed. query OO SELECT 'pg_constraint'::REGCLASS, 'pg_catalog.pg_constraint'::REGCLASS +FROM pg_class +WHERE relname = 'pg_constraint' ---- pg_constraint pg_constraint query error pgcode 42P01 relation "foo.pg_constraint" does not exist SELECT 'foo.pg_constraint'::REGCLASS +# TODO(jordan): Restore this to original form by removing FROM +# clause once issue 32422 is fixed. query OO SELECT '"pg_constraint"'::REGCLASS, ' "pg_constraint" '::REGCLASS +FROM pg_class +WHERE relname = 'pg_constraint' ---- pg_constraint pg_constraint +# TODO(jordan): Restore this to original form by removing FROM +# clause once issue 32422 is fixed. query OO SELECT 'pg_constraint '::REGCLASS, ' pg_constraint '::REGCLASS +FROM pg_class +WHERE relname = 'pg_constraint' ---- pg_constraint pg_constraint +# TODO(jordan): Restore this to original form by removing FROM +# clause once issue 32422 is fixed. query OO SELECT 'pg_constraint '::REGCLASS, '"pg_constraint"'::REGCLASS::OID +FROM pg_class +WHERE relname = 'pg_constraint' ---- pg_constraint 139623798 @@ -75,16 +91,24 @@ WHERE relname = 'pg_constraint' ---- 139623798 pg_constraint 139623798 pg_constraint pg_constraint +# TODO(jordan): Restore this to original form by removing FROM +# clause once issue 32422 is fixed. query OOOO SELECT 'upper'::REGPROC, 'upper'::REGPROCEDURE, 'pg_catalog.upper'::REGPROCEDURE, 'upper'::REGPROC::OID +FROM pg_class +WHERE relname = 'pg_constraint' ---- upper upper upper 2896429946 query error invalid function name SELECT 'invalid.more.pg_catalog.upper'::REGPROCEDURE +# TODO(jordan): Restore this to original form by removing FROM +# clause once issue 32422 is fixed. query OOO SELECT 'upper(int)'::REGPROC, 'upper(int)'::REGPROCEDURE, 'upper(int)'::REGPROC::OID +FROM pg_class +WHERE relname = 'pg_constraint' ---- upper upper 2896429946 @@ -106,28 +130,48 @@ SELECT 'blah(, )'::REGPROC query error more than one function named 'sqrt' SELECT 'sqrt'::REGPROC +# TODO(jordan): Restore this to original form by removing FROM +# clause once issue 32422 is fixed. query OOOO SELECT 'array_in'::REGPROC, 'array_in(a,b,c)'::REGPROC, 'pg_catalog.array_in'::REGPROC, 'pg_catalog.array_in( a ,b, c )'::REGPROC +FROM pg_class +WHERE relname = 'pg_constraint' ---- array_in array_in array_in array_in +# TODO(jordan): Restore this to original form by removing FROM +# clause once issue 32422 is fixed. query OOOO SELECT 'array_in'::REGPROCEDURE, 'array_in(a,b,c)'::REGPROCEDURE, 'pg_catalog.array_in'::REGPROCEDURE, 'pg_catalog.array_in( a ,b, c )'::REGPROCEDURE +FROM pg_class +WHERE relname = 'pg_constraint' ---- array_in array_in array_in array_in +# TODO(jordan): Restore this to original form by removing FROM +# clause once issue 32422 is fixed. query OO SELECT 'public'::REGNAMESPACE, 'public'::REGNAMESPACE::OID +FROM pg_class +WHERE relname = 'pg_constraint' ---- public 2397796629 +# TODO(jordan): Restore this to original form by removing FROM +# clause once issue 32422 is fixed. query OO SELECT 'bool'::REGTYPE, 'bool'::REGTYPE::OID +FROM pg_class +WHERE relname = 'pg_constraint' ---- boolean 16 +# TODO(jordan): Restore this to original form by removing FROM +# clause once issue 32422 is fixed. query OO SELECT 'numeric(10,3)'::REGTYPE, 'numeric( 10, 3 )'::REGTYPE +FROM pg_class +WHERE relname = 'pg_constraint' ---- numeric numeric @@ -151,14 +195,22 @@ SELECT 'blah'::REGTYPE ## Test other cast syntaxes +# TODO(jordan): Restore this to original form by removing FROM +# clause once issue 32422 is fixed. query O SELECT CAST ('pg_constraint' AS REGCLASS) +FROM pg_class +WHERE relname = 'pg_constraint' ---- pg_constraint +# TODO(jordan): Restore this to original form by removing FROM +# clause once issue 32422 is fixed. # This forces the b_expr form of the cast syntax. query OO SELECT ('pg_constraint')::REGCLASS, ('pg_constraint')::REGCLASS::OID +FROM pg_class +WHERE relname = 'pg_constraint' ---- pg_constraint 139623798 @@ -289,13 +341,21 @@ SELECT NOT (prorettype::regtype::text = 'foo') AND proretset FROM pg_proc WHERE ---- false +# TODO(jordan): Restore this to original form by removing FROM +# clause once issue 32422 is fixed. query TTTTT SELECT crdb_internal.create_regtype(10, 'foo'), crdb_internal.create_regclass(10, 'foo'), crdb_internal.create_regproc(10, 'foo'), crdb_internal.create_regprocedure(10, 'foo'), crdb_internal.create_regnamespace(10, 'foo') +FROM pg_class +WHERE relname = 'pg_constraint' ---- foo foo foo foo foo +# TODO(jordan): Restore this to original form by removing FROM +# clause once issue 32422 is fixed. query OOOOO SELECT crdb_internal.create_regtype(10, 'foo')::oid, crdb_internal.create_regclass(10, 'foo')::oid, crdb_internal.create_regproc(10, 'foo')::oid, crdb_internal.create_regprocedure(10, 'foo')::oid, crdb_internal.create_regnamespace(10, 'foo')::oid +FROM pg_class +WHERE relname = 'pg_constraint' ---- 10 10 10 10 10 diff --git a/pkg/sql/logictest/testdata/logic_test/statement_statistics b/pkg/sql/logictest/testdata/logic_test/statement_statistics index 0db49e846eec..8376eb9bd694 100644 --- a/pkg/sql/logictest/testdata/logic_test/statement_statistics +++ b/pkg/sql/logictest/testdata/logic_test/statement_statistics @@ -112,7 +112,7 @@ SELECT key,flags WHERE application_name = 'valuetest' ORDER BY key, flags ---- key flags -INSERT INTO test VALUES (_, _, __more1__), (__more1__) - +INSERT INTO test VALUES (_, _, __more1__), (__more1__) · SELECT (_, _, __more3__) FROM test WHERE _ · SELECT key FROM test.crdb_internal.node_statement_statistics · SELECT sin(_) · diff --git a/pkg/sql/logictest/testdata/logic_test/txn b/pkg/sql/logictest/testdata/logic_test/txn index ad415714dd1d..900ac5015466 100644 --- a/pkg/sql/logictest/testdata/logic_test/txn +++ b/pkg/sql/logictest/testdata/logic_test/txn @@ -931,19 +931,19 @@ statement ok COMMIT statement error cannot execute CREATE TABLE in a read-only transaction -CREATE TABLE a (a int) +CREATE TABLE tab (a int) statement error cannot execute INSERT in a read-only transaction -INSERT INTO a VALUES(1) +INSERT INTO kv VALUES('foo') statement error cannot execute UPDATE in a read-only transaction -UPDATE a SET a = 1 +UPDATE kv SET v = 'foo' statement error cannot execute INSERT in a read-only transaction -UPSERT INTO a VALUES(2) +UPSERT INTO kv VALUES('foo') statement error cannot execute DELETE in a read-only transaction -DELETE FROM a +DELETE FROM kv statement error cannot execute nextval\(\) in a read-only transaction SELECT nextval('a') diff --git a/pkg/sql/logictest/testdata/logic_test/views b/pkg/sql/logictest/testdata/logic_test/views index 27bc4bd7aa3f..a95e0e99c550 100644 --- a/pkg/sql/logictest/testdata/logic_test/views +++ b/pkg/sql/logictest/testdata/logic_test/views @@ -525,3 +525,21 @@ query T SELECT create_statement FROM [SHOW CREATE w3] ---- CREATE VIEW w3 (x) AS (WITH t AS (SELECT x FROM test.public.w) SELECT x FROM t) + +# Test CRUD privilege in view. + +statement ok +CREATE TABLE t (a INT PRIMARY KEY, b INT) + +statement ok +CREATE VIEW crud_view AS SELECT a, b FROM [INSERT INTO t (a, b) VALUES (100, 100) RETURNING a, b] + +statement ok +GRANT SELECT ON crud_view TO testuser + +user testuser + +query error user testuser does not have INSERT privilege on relation t +SELECT * FROM crud_view + +user root diff --git a/pkg/sql/logictest/testdata/planner_test/distsql_subquery b/pkg/sql/logictest/testdata/planner_test/distsql_subquery index d79075d2475d..69e823db1319 100644 --- a/pkg/sql/logictest/testdata/planner_test/distsql_subquery +++ b/pkg/sql/logictest/testdata/planner_test/distsql_subquery @@ -1,4 +1,4 @@ -# LogicTest: 5node-dist 5node-dist-opt +# LogicTest: 5node-dist query T SELECT url FROM [EXPLAIN (DISTSQL) SELECT 1 WHERE EXISTS (SELECT 1)] diff --git a/pkg/sql/opt/bench/stub_factory.go b/pkg/sql/opt/bench/stub_factory.go index ca621f0e4a23..012c08886327 100644 --- a/pkg/sql/opt/bench/stub_factory.go +++ b/pkg/sql/opt/bench/stub_factory.go @@ -186,3 +186,9 @@ func (f *stubFactory) ConstructExplain( func (f *stubFactory) ConstructShowTrace(typ tree.ShowTraceType, compact bool) (exec.Node, error) { return struct{}{}, nil } + +func (f *stubFactory) ConstructInsert( + input exec.Node, table opt.Table, rowsNeeded bool, +) (exec.Node, error) { + return struct{}{}, nil +} diff --git a/pkg/sql/opt/catalog.go b/pkg/sql/opt/catalog.go index 189468d9d42a..1e9c04f86299 100644 --- a/pkg/sql/opt/catalog.go +++ b/pkg/sql/opt/catalog.go @@ -108,10 +108,10 @@ type Table interface { IndexCount() int // Index returns the ith index, where i < IndexCount. The table's primary - // index is always the 0th index, and is always present (use the - // opt.PrimaryIndex to select it). The primary index corresponds to the - // table's primary key. If a primary key was not explicitly specified, then - // the system implicitly creates one based on a hidden rowid column. + // index is always the 0th index, and is always present (use opt.PrimaryIndex + // to select it). The primary index corresponds to the table's primary key. + // If a primary key was not explicitly specified, then the system implicitly + // creates one based on a hidden rowid column. Index(i int) Index // StatisticCount returns the number of statistics available for the table. @@ -119,6 +119,21 @@ type Table interface { // Statistic returns the ith statistic, where i < StatisticCount. Statistic(i int) TableStatistic + + // MutationColumnCount returns the number of columns that are in the process + // of being added or dropped and that need to be set to their default values + // when inserting new rows. These columns are in the DELETE_AND_WRITE_ONLY + // state. See this RFC for more details: + // + // cockroachdb/cockroach/docs/RFCS/20151014_online_schema_change.md + // + MutationColumnCount() int + + // MutationColumn returns a Column interface for one of the columns that is + // in the process of being added or dropped. The index of the column must be + // <= MutationColumnCount. The set of columns returned by MutationColumn are + // always disjoint from those returned by the Column method. + MutationColumn(i int) Column } // View is an interface to a database view, exposing only the information needed @@ -149,12 +164,38 @@ type Column interface { // DatumType returns the data type of the column. DatumType() types.T + // ColTypeStr returns the SQL data type of the column, as a string. Note that + // this is sometimes different than DatumType().String(), since datum types + // are a subset of column types. + ColTypeStr() string + // IsNullable returns true if the column is nullable. IsNullable() bool // IsHidden returns true if the column is hidden (e.g., there is always a // hidden column called rowid if there is no primary key on the table). IsHidden() bool + + // HasDefault returns true if the column has a default value. DefaultExprStr + // will be set to the SQL expression string in that case. + HasDefault() bool + + // DefaultExprStr is set to the SQL expression string that describes the + // column's default value. It is used when the user does not provide a value + // for the column when inserting a row. Default values cannot depend on other + // columns. + DefaultExprStr() string + + // IsComputed returns true if the column is a computed value. ComputedExprStr + // will be set to the SQL expression string in that case. + IsComputed() bool + + // ComputedExprStr is set to the SQL expression string that describes the + // column's computed value. It is always used to provide the column's value + // when inserting or updating a row. Computed values cannot depend on other + // computed columns, but they can depend on all other columns, including + // columns with default values. + ComputedExprStr() string } // IndexColumn describes a single column that is part of an index definition. diff --git a/pkg/sql/opt/exec/execbuilder/relational_builder.go b/pkg/sql/opt/exec/execbuilder/relational_builder.go index a1f7662f6313..c8e0c1c367c3 100644 --- a/pkg/sql/opt/exec/execbuilder/relational_builder.go +++ b/pkg/sql/opt/exec/execbuilder/relational_builder.go @@ -25,6 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/opt/ordering" "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -118,6 +119,14 @@ func (ep *execPlan) sqlOrdering(ordering opt.Ordering) sqlbase.ColumnOrdering { func (b *Builder) buildRelational(e memo.RelExpr) (execPlan, error) { var ep execPlan var err error + + // Raise error if a mutation op is part of a read-only transaction. + if opt.IsMutationOp(e) && b.evalCtx.TxnReadOnly { + return execPlan{}, pgerror.NewErrorf(pgerror.CodeReadOnlySQLTransactionError, + "cannot execute %s in a read-only transaction", e.Op().SyntaxTag()) + } + + // Handle read-only operators which never write data or modify schema. switch t := e.(type) { case *memo.ValuesExpr: ep, err = b.buildValues(t) @@ -167,6 +176,9 @@ func (b *Builder) buildRelational(e memo.RelExpr) (execPlan, error) { case *memo.ProjectSetExpr: ep, err = b.buildProjectSet(t) + case *memo.InsertExpr: + ep, err = b.buildInsert(t) + default: if opt.IsSetOp(e) { ep, err = b.buildSetOp(e) @@ -179,11 +191,12 @@ func (b *Builder) buildRelational(e memo.RelExpr) (execPlan, error) { if opt.IsJoinApplyOp(e) { return execPlan{}, b.decorrelationError() } - return execPlan{}, errors.Errorf("unsupported relational op %s", e.Op()) } if err != nil { return execPlan{}, err } + + // Wrap the expression in a render expression if presentation requires it. if p := e.RequiredPhysical(); !p.Presentation.Any() { ep, err = b.applyPresentation(ep, p) } @@ -937,6 +950,36 @@ func (b *Builder) buildProjectSet(projectSet *memo.ProjectSetExpr) (execPlan, er return ep, nil } +func (b *Builder) buildInsert(ins *memo.InsertExpr) (execPlan, error) { + // Build the input query and ensure that the input columns that correspond to + // the table columns are projected. + input, err := b.buildRelational(ins.Input) + if err != nil { + return execPlan{}, err + } + input, err = b.ensureColumns(input, ins.InputCols, nil, ins.ProvidedPhysical().Ordering) + if err != nil { + return execPlan{}, err + } + + // Construct the Insert node. + tab := b.mem.Metadata().Table(ins.Table) + node, err := b.factory.ConstructInsert(input.root, tab, ins.NeedResults) + if err != nil { + return execPlan{}, err + } + + // If INSERT returns rows, they contain all non-mutation columns from the + // table, in the same order they're defined in the table. + ep := execPlan{root: node} + if ins.NeedResults { + for i, n := 0, tab.ColumnCount(); i < n; i++ { + ep.outputCols.Set(int(ins.InputCols[i]), i) + } + } + return ep, nil +} + // needProjection figures out what projection is needed on top of the input plan // to produce the given list of columns. If the input plan already produces // the columns (in the same order), returns needProj=false. diff --git a/pkg/sql/opt/exec/execbuilder/testdata/aggregate b/pkg/sql/opt/exec/execbuilder/testdata/aggregate index a68580f487bc..af4bc23949fe 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/aggregate +++ b/pkg/sql/opt/exec/execbuilder/testdata/aggregate @@ -73,36 +73,36 @@ render · · (min int, max i query TTTTT EXPLAIN (TYPES) SELECT min(1), count(1), max(1), sum_int(1), avg(1)::float, sum(1), stddev(1), variance(1), bool_and(true), bool_or(true), to_hex(xor_agg(b'\x01')) ---- -render · · (min int, count int, max int, sum_int int, avg float, sum decimal, stddev decimal, variance decimal, bool_and bool, bool_or bool, to_hex string) · - │ render 0 (agg0)[int] · · - │ render 1 (agg1)[int] · · - │ render 2 (agg2)[int] · · - │ render 3 (agg3)[int] · · - │ render 4 ((agg4)[decimal]::FLOAT8)[float] · · - │ render 5 (agg5)[decimal] · · - │ render 6 (agg6)[decimal] · · - │ render 7 (agg7)[decimal] · · - │ render 8 (agg8)[bool] · · - │ render 9 (agg9)[bool] · · - │ render 10 (to_hex((agg10)[bytes]))[string] · · - └── group · · (agg0 int, agg1 int, agg2 int, agg3 int, agg4 decimal, agg5 decimal, agg6 decimal, agg7 decimal, agg8 bool, agg9 bool, agg10 bytes) · - │ aggregate 0 min(column1) · · - │ aggregate 1 count(column1) · · - │ aggregate 2 max(column1) · · - │ aggregate 3 sum_int(column1) · · - │ aggregate 4 avg(column1) · · - │ aggregate 5 sum(column1) · · - │ aggregate 6 stddev(column1) · · - │ aggregate 7 variance(column1) · · - │ aggregate 8 bool_and(column10) · · - │ aggregate 9 bool_or(column10) · · - │ aggregate 10 xor_agg(column13) · · - │ scalar · · · - └── render · · (column1 int, column10 bool, column13 bytes) · - │ render 0 (1)[int] · · - │ render 1 (true)[bool] · · - │ render 2 ('\x01')[bytes] · · - └── emptyrow · · () · +render · · (min int, count int, max int, sum_int int, avg float, sum decimal, stddev decimal, variance decimal, bool_and bool, bool_or bool, to_hex string) · + │ render 0 (agg0)[int] · · + │ render 1 (agg1)[int] · · + │ render 2 (agg2)[int] · · + │ render 3 (agg3)[int] · · + │ render 4 ((agg4)[decimal]::FLOAT8)[float] · · + │ render 5 (agg5)[decimal] · · + │ render 6 (agg6)[decimal] · · + │ render 7 (agg7)[decimal] · · + │ render 8 (agg8)[bool] · · + │ render 9 (agg9)[bool] · · + │ render 10 (to_hex((agg10)[bytes]))[string] · · + └── group · · (agg0 int, agg1 int, agg2 int, agg3 int, agg4 decimal, agg5 decimal, agg6 decimal, agg7 decimal, agg8 bool, agg9 bool, agg10 bytes) · + │ aggregate 0 min(column1) · · + │ aggregate 1 count(column1) · · + │ aggregate 2 max(column1) · · + │ aggregate 3 sum_int(column1) · · + │ aggregate 4 avg(column1) · · + │ aggregate 5 sum(column1) · · + │ aggregate 6 stddev(column1) · · + │ aggregate 7 variance(column1) · · + │ aggregate 8 bool_and(column10) · · + │ aggregate 9 bool_or(column10) · · + │ aggregate 10 xor_agg(column13) · · + │ scalar · · · + └── values · · (column1 int, column10 bool, column13 bytes) · +· size 3 columns, 1 row · · +· row 0, expr 0 (1)[int] · · +· row 0, expr 1 (true)[bool] · · +· row 0, expr 2 ('\x01')[bytes] · · query TTTTT EXPLAIN (TYPES) SELECT count(*), k FROM kv GROUP BY 2 diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_auto_mode b/pkg/sql/opt/exec/execbuilder/testdata/distsql_auto_mode index 2d00ca10d859..c21078aefc9b 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_auto_mode +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_auto_mode @@ -12,8 +12,8 @@ CREATE TABLE kv (k INT PRIMARY KEY, v INT) query BT colnames EXPLAIN (DISTSQL) SELECT 1 ---- -automatic url -false https://cockroachdb.github.io/distsqlplan/decode.html#eJyMjzFLBDEUhHt_xTKVQsBs-0q7bU45xEZSLMlwLq7JkpeAcOx_l00KsRCunJnk-3hXxBR4mr-okHeMcAZbTp6qKR9VfzCFb4g1WOJWy1E7A58yIVeUpayE4G1eK_XRwiCwzMvaiHZ4Gu7t4D9q_NQHGJwZA7MMo4hMp1e43SDV8svVMl8Isbu53X2mbikq_7j_JzsDhgv7fZpq9nzJyTdNj8_tXysCtfR17GGKfdrdfvcTAAD__5-jaAo= +automatic url +false https://cockroachdb.github.io/distsqlplan/decode.html#eJyMj7GKwzAQRPv7CjO14ORWf-DmLqQNKoS0GIOiNVopBIz-PVgq0rqcNzM77IHEgf7ckwTmgRlWYc_sSYTziUZgCW8YrbClvZYTWwXPmWAOlK1EgkFk7-L0crGSTPpXQyFQcVvs-abAtXzbUtxKMHNT1xfuJDsnoUuXdbMKFFYaXwjX7OmW2feZIf97r4NAUoY7D7GkYTXbfj4BAAD__xM4YY8= # Check the JSON column is still there, albeit hidden. query T colnames diff --git a/pkg/sql/opt/exec/execbuilder/testdata/explain b/pkg/sql/opt/exec/execbuilder/testdata/explain index 24185b4f1dc2..9369ae18023c 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/explain +++ b/pkg/sql/opt/exec/execbuilder/testdata/explain @@ -24,27 +24,27 @@ render · · query TTTTT colnames EXPLAIN (PLAN, VERBOSE) SELECT 1 a ---- -tree field description columns ordering -render · · (a) · - │ render 0 1 · · - └── emptyrow · · () · +tree field description columns ordering +values · · (a) · +· size 1 column, 1 row · · +· row 0, expr 0 1 · · query TTTTT colnames EXPLAIN (VERBOSE,PLAN) SELECT 1 a ---- -tree field description columns ordering -render · · (a) · - │ render 0 1 · · - └── emptyrow · · () · +tree field description columns ordering +values · · (a) · +· size 1 column, 1 row · · +· row 0, expr 0 1 · · query TTTTT colnames EXPLAIN (TYPES) SELECT 1 a ---- -tree field description columns ordering -render · · (a int) · - │ render 0 (1)[int] · · - └── emptyrow · · () · +tree field description columns ordering +values · · (a int) · +· size 1 column, 1 row · · +· row 0, expr 0 (1)[int] · · statement error cannot set EXPLAIN mode more than once EXPLAIN (PLAN,PLAN) SELECT 1 @@ -420,9 +420,9 @@ count · · () query TTTTT EXPLAIN (TYPES) SELECT 42 AS a ---- -render · · (a int) · - │ render 0 (42)[int] · · - └── emptyrow · · () · +values · · (a int) · +· size 1 column, 1 row · · +· row 0, expr 0 (42)[int] · · query TTTTT EXPLAIN (TYPES) SELECT * FROM t @@ -569,36 +569,32 @@ scan · · query TTTTT EXPLAIN (TYPES) SELECT abs(2-3) AS a ---- -render · · (a int) · - │ render 0 (1)[int] · · - └── emptyrow · · () · +values · · (a int) · +· size 1 column, 1 row · · +· row 0, expr 0 (1)[int] · · # Check array subscripts (#13811) query TTTTT EXPLAIN (TYPES) SELECT x[1] FROM (SELECT ARRAY[1,2,3] AS x) ---- -render · · (x int) · - │ render 0 (1)[int] · · - └── emptyrow · · () · +render · · (x int) · + │ render 0 ((x)[int[]][(1)[int]])[int] · · + └── values · · (x int[]) · +· size 1 column, 1 row · · +· row 0, expr 0 (ARRAY[(1)[int],(2)[int],(3)[int]])[int[]] · · query T EXPLAIN (OPT) SELECT 1 AS r ---- -project - ├── columns: r:1(int!null) +values + ├── columns: r:1(int) ├── cardinality: [1 - 1] ├── stats: [rows=1] - ├── cost: 0.05 + ├── cost: 0.02 ├── key: () ├── fd: ()-->(1) ├── prune: (1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── stats: [rows=1] - │ ├── cost: 0.02 - │ ├── key: () - │ └── tuple [type=tuple] - └── projections + └── tuple [type=tuple{int}] └── const: 1 [type=int] # Test with an unsupported statement. diff --git a/pkg/sql/opt/exec/execbuilder/testdata/insert b/pkg/sql/opt/exec/execbuilder/testdata/insert index 780fd815b52b..fd51122aeab7 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/insert +++ b/pkg/sql/opt/exec/execbuilder/testdata/insert @@ -298,20 +298,22 @@ SELECT tree, field, description FROM [ EXPLAIN (VERBOSE) INSERT INTO insert_t TABLE select_t ORDER BY v DESC ] ---- -count · · - └── insert · · - │ into insert_t(x, v, rowid) - │ default 0 NULL - │ default 1 NULL - │ default 2 unique_rowid() - └── sort · · - │ order -v - └── render · · - │ render 0 test.public.select_t.x - │ render 1 test.public.select_t.v - └── scan · · -· table select_t@primary -· spans ALL +count · · + └── insert · · + │ into insert_t(x, v, rowid) + └── render · · + │ render 0 x + │ render 1 v + │ render 2 column7 + └── sort · · + │ order -v + └── render · · + │ render 0 unique_rowid() + │ render 1 x + │ render 2 v + └── scan · · +· table select_t@primary +· spans ALL # Check that INSERT supports LIMIT (MySQL extension) query TTT @@ -319,72 +321,93 @@ SELECT tree, field, description FROM [ EXPLAIN (VERBOSE) INSERT INTO insert_t SELECT * FROM select_t LIMIT 1 ] ---- -count · · - └── insert · · - │ into insert_t(x, v, rowid) - │ default 0 NULL - │ default 1 NULL - │ default 2 unique_rowid() - └── limit · · - │ count 1 - └── render · · - │ render 0 test.public.select_t.x - │ render 1 test.public.select_t.v - └── scan · · -· table select_t@primary -· spans ALL -· limit 1 +count · · + └── insert · · + │ into insert_t(x, v, rowid) + └── render · · + │ render 0 x + │ render 1 v + │ render 2 unique_rowid() + └── scan · · +· table select_t@primary +· spans ALL +· limit 1 # Check the grouping of LIMIT and ORDER BY query TTT EXPLAIN (PLAN) INSERT INTO insert_t VALUES (1,1), (2,2) LIMIT 1 ---- -count · · - └── insert · · - │ into insert_t(x, v, rowid) - └── limit · · - │ count 1 - └── values · · -· size 2 columns, 2 rows +count · · + └── insert · · + │ into insert_t(x, v, rowid) + └── render · · + └── limit · · + │ count 1 + └── values · · +· size 2 columns, 2 rows query TTT EXPLAIN (PLAN) INSERT INTO insert_t VALUES (1,1), (2,2) ORDER BY 2 LIMIT 1 ---- -count · · - └── insert · · - │ into insert_t(x, v, rowid) - └── limit · · - │ count 1 - └── sort · · - │ order +column2 - │ strategy top 1 - └── values · · -· size 2 columns, 2 rows +count · · + └── insert · · + │ into insert_t(x, v, rowid) + └── render · · + └── limit · · + │ count 1 + └── sort · · + │ order +column2 + └── values · · +· size 2 columns, 2 rows query TTT EXPLAIN (PLAN) INSERT INTO insert_t (VALUES (1,1), (2,2) ORDER BY 2) LIMIT 1 ---- -count · · - └── insert · · - │ into insert_t(x, v, rowid) - └── limit · · - │ count 1 - └── sort · · - │ order +column2 - │ strategy top 1 - └── values · · -· size 2 columns, 2 rows +count · · + └── insert · · + │ into insert_t(x, v, rowid) + └── render · · + └── limit · · + │ count 1 + └── sort · · + │ order +column2 + └── values · · +· size 2 columns, 2 rows query TTT EXPLAIN (PLAN) INSERT INTO insert_t (VALUES (1,1), (2,2) ORDER BY 2 LIMIT 1) ---- -count · · - └── insert · · - │ into insert_t(x, v, rowid) - └── limit · · - │ count 1 - └── sort · · - │ order +column2 - │ strategy top 1 - └── values · · -· size 2 columns, 2 rows +count · · + └── insert · · + │ into insert_t(x, v, rowid) + └── render · · + └── limit · · + │ count 1 + └── sort · · + │ order +column2 + └── values · · +· size 2 columns, 2 rows + +# ORDER BY expression that's not inserted into table. +query TTTTT +EXPLAIN (VERBOSE) INSERT INTO insert_t (SELECT length(k), 2 FROM kv ORDER BY k || v) RETURNING x+v +---- +render · · ("?column?") · + │ render 0 x + v · · + └── run · · (x, v, rowid[hidden]) · + └── insert · · (x, v, rowid[hidden]) · + │ into insert_t(x, v, rowid) · · + └── render · · (length, "?column?", column9) · + │ render 0 length · · + │ render 1 "?column?" · · + │ render 2 column9 · · + └── sort · · (column9, length, "?column?", column8) +column8 + │ order +column8 · · + └── render · · (column9, length, "?column?", column8) · + │ render 0 unique_rowid() · · + │ render 1 length(k) · · + │ render 2 2 · · + │ render 3 k || v · · + └── scan · · (k, v) · +· table kv@primary · · +· spans ALL · · diff --git a/pkg/sql/opt/exec/execbuilder/testdata/orderby b/pkg/sql/opt/exec/execbuilder/testdata/orderby index bbe47e064fbf..31060eafe9b2 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/orderby +++ b/pkg/sql/opt/exec/execbuilder/testdata/orderby @@ -482,15 +482,16 @@ ordinality · · (x, "ordinality") · query TTTTT EXPLAIN (VERBOSE) INSERT INTO t(a, b) SELECT * FROM (SELECT 1 AS x, 2 AS y) ORDER BY x RETURNING b ---- -render · · (b) · - │ render 0 test.public.t.b · · - └── run · · (a, b, c) · - └── insert · · (a, b, c) · - │ into t(a, b) · · - └── render · · (x, y) x=CONST; y=CONST - │ render 0 1 · · - │ render 1 2 · · - └── emptyrow · · () · +render · · (b) · + │ render 0 b · · + └── run · · (a, b, c) · + └── insert · · (a, b, c) · + │ into t(a, b, c) · · + └── values · · (x, y, column6) · +· size 3 columns, 1 row · · +· row 0, expr 0 1 · · +· row 0, expr 1 2 · · +· row 0, expr 2 NULL · · query TTTTT EXPLAIN (VERBOSE) DELETE FROM t WHERE a = 3 RETURNING b diff --git a/pkg/sql/opt/exec/execbuilder/testdata/scalar b/pkg/sql/opt/exec/execbuilder/testdata/scalar index 72baf39351b7..d269b85570c6 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/scalar +++ b/pkg/sql/opt/exec/execbuilder/testdata/scalar @@ -10,37 +10,37 @@ CREATE TABLE t (a INT, b INT, c INT, d INT, j JSONB, s STRING) query TTTTT EXPLAIN (VERBOSE) SELECT 1 + 2 AS r ---- -render · · (r) · - │ render 0 3 · · - └── emptyrow · · () · +values · · (r) · +· size 1 column, 1 row · · +· row 0, expr 0 3 · · query TTTTT EXPLAIN (VERBOSE) SELECT true AS r ---- -render · · (r) · - │ render 0 true · · - └── emptyrow · · () · +values · · (r) · +· size 1 column, 1 row · · +· row 0, expr 0 true · · query TTTTT EXPLAIN (VERBOSE) SELECT false AS r ---- -render · · (r) · - │ render 0 false · · - └── emptyrow · · () · +values · · (r) · +· size 1 column, 1 row · · +· row 0, expr 0 false · · query TTTTT EXPLAIN (VERBOSE) SELECT (1, 2) AS r ---- -render · · (r) · - │ render 0 (1, 2) · · - └── emptyrow · · () · +values · · (r) · +· size 1 column, 1 row · · +· row 0, expr 0 (1, 2) · · query TTTTT EXPLAIN (VERBOSE) SELECT (true, false) AS r ---- -render · · (r) · - │ render 0 (true, false) · · - └── emptyrow · · () · +values · · (r) · +· size 1 column, 1 row · · +· row 0, expr 0 (true, false) · · query TTTTT EXPLAIN (VERBOSE) SELECT 1 + 2 AS r FROM t @@ -554,27 +554,27 @@ filter · · (attrelid, attname, at query TTTTT EXPLAIN (VERBOSE) SELECT CASE WHEN length('foo') = 3 THEN 42 ELSE 1/3 END ---- -render · · ("case") · - │ render 0 42 · · - └── emptyrow · · () · +values · · ("case") · +· size 1 column, 1 row · · +· row 0, expr 0 42 · · # Don't fold the CASE since there is an error in the ELSE clause. query TTTTT EXPLAIN (VERBOSE) SELECT CASE WHEN length('foo') = 3 THEN 42 ELSE 1/0 END ---- -render · · ("case") · - │ render 0 CASE WHEN true THEN 42 ELSE 1 / 0 END · · - └── emptyrow · · () · +values · · ("case") · +· size 1 column, 1 row · · +· row 0, expr 0 CASE WHEN true THEN 42 ELSE 1 / 0 END · · # Don't fold random() or now(), which are impure functions. query TTTTT EXPLAIN (VERBOSE) SELECT random(), current_database(), now() ---- -render · · (random, current_database, now) · - │ render 0 random() · · - │ render 1 'test' · · - │ render 2 now() · · - └── emptyrow · · () · +values · · (random, current_database, now) · +· size 3 columns, 1 row · · +· row 0, expr 0 random() · · +· row 0, expr 1 'test' · · +· row 0, expr 2 now() · · # Don't fold non-constants. query TTTTT diff --git a/pkg/sql/opt/exec/execbuilder/testdata/spool b/pkg/sql/opt/exec/execbuilder/testdata/spool index df0eee150b5f..aef0619d97e0 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/spool +++ b/pkg/sql/opt/exec/execbuilder/testdata/spool @@ -1,22 +1,26 @@ # LogicTest: local-opt statement ok -CREATE TABLE t(x INT PRIMARY KEY); INSERT INTO t VALUES(1); +CREATE TABLE t(x INT PRIMARY KEY) + +statement ok +CREATE TABLE t2(x INT PRIMARY KEY) # Check that if a mutation uses further processing, a spool is added. query TTT -EXPLAIN WITH a AS (INSERT INTO t VALUES (2) RETURNING x) +EXPLAIN WITH a AS (INSERT INTO t SELECT * FROM t2 RETURNING x) SELECT * FROM a LIMIT 1 ---- -limit · · - │ count 1 - └── spool · · - │ limit 1 - └── run · · - └── insert · · - │ into t(x) - └── values · · -· size 1 column, 1 row +limit · · + │ count 1 + └── spool · · + │ limit 1 + └── run · · + └── insert · · + │ into t(x) + └── scan · · +· table t2@primary +· spans ALL query TTT EXPLAIN WITH a AS (DELETE FROM t RETURNING x) @@ -67,17 +71,18 @@ limit · · # Ditto all mutations, with the statement source syntax. query TTT -EXPLAIN SELECT * FROM [INSERT INTO t VALUES (2) RETURNING x] LIMIT 1 +EXPLAIN SELECT * FROM [INSERT INTO t SELECT * FROM t2 RETURNING x] LIMIT 1 ---- -limit · · - │ count 1 - └── spool · · - │ limit 1 - └── run · · - └── insert · · - │ into t(x) - └── values · · -· size 1 column, 1 row +limit · · + │ count 1 + └── spool · · + │ limit 1 + └── run · · + └── insert · · + │ into t(x) + └── scan · · +· table t2@primary +· spans ALL query TTT EXPLAIN SELECT * FROM [DELETE FROM t RETURNING x] LIMIT 1 @@ -124,105 +129,116 @@ limit · · # Check that a spool is also inserted for other processings than LIMIT. query TTT -EXPLAIN SELECT * FROM [INSERT INTO t VALUES (2) RETURNING x] ORDER BY x +EXPLAIN SELECT count(*) FROM [INSERT INTO t SELECT * FROM t2 RETURNING x] ---- -sort · · - │ order +x - └── spool · · - └── run · · - └── insert · · - │ into t(x) - └── values · · -· size 1 column, 1 row +group · · + │ aggregate 0 count_rows() + │ scalar · + └── spool · · + └── render · · + └── run · · + └── insert · · + │ into t(x) + └── scan · · +· table t2@primary +· spans ALL query TTT -EXPLAIN SELECT * FROM [INSERT INTO t VALUES (2) RETURNING x], t +EXPLAIN SELECT * FROM [INSERT INTO t SELECT * FROM t2 RETURNING x], t ---- -join · · - │ type cross - ├── spool · · - │ └── run · · - │ └── insert · · - │ │ into t(x) - │ └── values · · - │ size 1 column, 1 row - └── scan · · -· table t@primary -· spans ALL +join · · + │ type cross + ├── spool · · + │ └── run · · + │ └── insert · · + │ │ into t(x) + │ └── scan · · + │ table t2@primary + │ spans ALL + └── scan · · +· table t@primary +· spans ALL # Check that if a spool is already added at some level, then it is not added # again at levels below. +# TODO(andyk): This optimization is not part of CBO yet. query TTT -EXPLAIN WITH a AS (INSERT INTO t VALUES(2) RETURNING x), +EXPLAIN WITH a AS (INSERT INTO t SELECT * FROM t2 RETURNING x), b AS (INSERT INTO t SELECT x+1 FROM a RETURNING x) SELECT * FROM b LIMIT 1 ---- -limit · · - │ count 1 - └── spool · · - │ limit 1 - └── run · · - └── insert · · - │ into t(x) - └── render · · - └── run · · - └── insert · · - │ into t(x) - └── values · · -· size 1 column, 1 row +limit · · + │ count 1 + └── spool · · + │ limit 1 + └── run · · + └── insert · · + │ into t(x) + └── spool · · + └── render · · + └── run · · + └── insert · · + │ into t(x) + └── scan · · +· table t2@primary +· spans ALL # Check that no spool is inserted if a top-level render is elided. query TTT -EXPLAIN SELECT * FROM [INSERT INTO t VALUES (2) RETURNING x] +EXPLAIN SELECT * FROM [INSERT INTO t SELECT * FROM t2 RETURNING x] ---- -run · · - └── insert · · - │ into t(x) - └── values · · -· size 1 column, 1 row +run · · + └── insert · · + │ into t(x) + └── scan · · +· table t2@primary +· spans ALL # Check that no spool is used for a top-level INSERT, but # sub-INSERTs still get a spool. query TTT -EXPLAIN INSERT INTO t SELECT x+1 FROM [INSERT INTO t VALUES(2) RETURNING x] +EXPLAIN INSERT INTO t SELECT x+1 FROM [INSERT INTO t SELECT * FROM t2 RETURNING x] ---- -count · · - └── insert · · - │ into t(x) - └── spool · · - └── render · · - └── run · · - └── insert · · - │ into t(x) - └── values · · -· size 1 column, 1 row +count · · + └── insert · · + │ into t(x) + └── spool · · + └── render · · + └── run · · + └── insert · · + │ into t(x) + └── scan · · +· table t2@primary +· spans ALL # Check that simple computations using RETURNING get their spool pulled up. query TTT -EXPLAIN SELECT * FROM [INSERT INTO t VALUES (1) RETURNING x+10] WHERE @1 < 3 LIMIT 10 +EXPLAIN SELECT * FROM [INSERT INTO t SELECT * FROM t2 RETURNING x+10] WHERE @1 < 3 LIMIT 10 ---- -limit · · - │ count 10 - └── spool · · - │ limit 10 - └── filter · · - │ filter "?column?" < 3 - └── render · · - └── run · · - └── insert · · - │ into t(x) - └── values · · -· size 1 column, 1 row +render · · + └── limit · · + │ count 10 + └── spool · · + │ limit 10 + └── filter · · + │ filter x < -7 + └── run · · + └── insert · · + │ into t(x) + └── scan · · +· table t2@primary +· spans ALL # Check that a pulled up spool gets elided at the top level. query TTT -EXPLAIN SELECT * FROM [INSERT INTO t VALUES (1) RETURNING x+10] WHERE @1 < 3 +EXPLAIN SELECT * FROM [INSERT INTO t SELECT * FROM t2 RETURNING x+10] WHERE @1 < 3 ---- -filter · · - │ filter "?column?" < 3 - └── render · · - └── run · · - └── insert · · - │ into t(x) - └── values · · -· size 1 column, 1 row +render · · + └── filter · · + │ filter x < -7 + └── run · · + └── insert · · + │ into t(x) + └── scan · · +· table t2@primary +· spans ALL diff --git a/pkg/sql/opt/exec/execbuilder/testdata/subquery b/pkg/sql/opt/exec/execbuilder/testdata/subquery index 0a2aa6e42c27..a22d7a61921d 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/subquery +++ b/pkg/sql/opt/exec/execbuilder/testdata/subquery @@ -32,16 +32,16 @@ ALTER TABLE abc SPLIT AT VALUES ((SELECT 1)) query TTT EXPLAIN SELECT EXISTS (SELECT a FROM abc) ---- -root · · - ├── render · · - │ └── emptyrow · · - └── subquery · · - │ id @S1 - │ original sql EXISTS (SELECT a FROM abc) - │ exec mode exists - └── scan · · -· table abc@primary -· spans ALL +root · · + ├── values · · + │ size 1 column, 1 row + └── subquery · · + │ id @S1 + │ original sql EXISTS (SELECT a FROM abc) + │ exec mode exists + └── scan · · +· table abc@primary +· spans ALL query TTTTT EXPLAIN (VERBOSE) SELECT * FROM abc WHERE a = (SELECT max(a) FROM abc WHERE EXISTS(SELECT * FROM abc WHERE c=a+3)) @@ -98,15 +98,15 @@ sort · · query TTT EXPLAIN VALUES (1), ((SELECT 2)) ---- -root · · - ├── values · · - │ size 1 column, 2 rows - └── subquery · · - │ id @S1 - │ original sql (SELECT 2) - │ exec mode one row - └── render · · - └── emptyrow · · +root · · + ├── values · · + │ size 1 column, 2 rows + └── subquery · · + │ id @S1 + │ original sql (SELECT 2) + │ exec mode one row + └── values · · +· size 1 column, 1 row # This test checks that the double sub-query plan expansion caused by a # sub-expression being shared by two or more plan nodes does not @@ -216,20 +216,20 @@ join · · (x, z) · query TTTTT EXPLAIN (VERBOSE) SELECT ARRAY(SELECT x FROM b) ---- -root · · ("array") · - ├── render · · ("array") · - │ │ render 0 COALESCE(@S1, ARRAY[]) · · - │ └── emptyrow · · () · - └── subquery · · ("array") · - │ id @S1 · · - │ original sql (SELECT x FROM b) · · - │ exec mode one row · · - └── group · · (agg0) · - │ aggregate 0 array_agg(x) · · - │ scalar · · · - └── scan · · (x) · -· table b@primary · · -· spans ALL · · +root · · ("array") · + ├── values · · ("array") · + │ size 1 column, 1 row · · + │ row 0, expr 0 COALESCE(@S1, ARRAY[]) · · + └── subquery · · ("array") · + │ id @S1 · · + │ original sql (SELECT x FROM b) · · + │ exec mode one row · · + └── group · · (agg0) · + │ aggregate 0 array_agg(x) · · + │ scalar · · · + └── scan · · (x) · +· table b@primary · · +· spans ALL · · # Case where the plan has an apply join. query error could not decorrelate subquery diff --git a/pkg/sql/opt/exec/execbuilder/testdata/tuple b/pkg/sql/opt/exec/execbuilder/testdata/tuple index e39b504bc032..c4585c5c857d 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/tuple +++ b/pkg/sql/opt/exec/execbuilder/testdata/tuple @@ -129,10 +129,10 @@ FROM ( SELECT ((1,'2',true) AS e,f,g) AS x ) ---- -render · · (e int, f string, g bool) · - │ render 0 (((x)[tuple{int AS e, string AS f, bool AS g}]).e)[int] · · - │ render 1 (((x)[tuple{int AS e, string AS f, bool AS g}]).f)[string] · · - │ render 2 (((x)[tuple{int AS e, string AS f, bool AS g}]).g)[bool] · · - └── render · · (x tuple{int AS e, string AS f, bool AS g}) · - │ render 0 ((((1)[int], ('2')[string], (true)[bool]) AS e, f, g))[tuple{int AS e, string AS f, bool AS g}] · · - └── emptyrow · · () · +render · · (e int, f string, g bool) · + │ render 0 (((x)[tuple{int AS e, string AS f, bool AS g}]).e)[int] · · + │ render 1 (((x)[tuple{int AS e, string AS f, bool AS g}]).f)[string] · · + │ render 2 (((x)[tuple{int AS e, string AS f, bool AS g}]).g)[bool] · · + └── values · · (x tuple{int AS e, string AS f, bool AS g}) · +· size 1 column, 1 row · · +· row 0, expr 0 ((((1)[int], ('2')[string], (true)[bool]) AS e, f, g))[tuple{int AS e, string AS f, bool AS g}] · · diff --git a/pkg/sql/opt/exec/execbuilder/testdata/values b/pkg/sql/opt/exec/execbuilder/testdata/values index fe3b827e3b65..22a7de2789a9 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/values +++ b/pkg/sql/opt/exec/execbuilder/testdata/values @@ -4,16 +4,16 @@ query TTTTT EXPLAIN (VERBOSE) SELECT 1 a ---- -render · · (a) · - │ render 0 1 · · - └── emptyrow · · () · +values · · (a) · +· size 1 column, 1 row · · +· row 0, expr 0 1 · · query TTTTT EXPLAIN (VERBOSE) SELECT 1 + 2 a ---- -render · · (a) · - │ render 0 3 · · - └── emptyrow · · () · +values · · (a) · +· size 1 column, 1 row · · +· row 0, expr 0 3 · · query TTTTT EXPLAIN (VERBOSE) VALUES (1, 2, 3), (4, 5, 6) diff --git a/pkg/sql/opt/exec/factory.go b/pkg/sql/opt/exec/factory.go index e17c134efaf9..84fed3115758 100644 --- a/pkg/sql/opt/exec/factory.go +++ b/pkg/sql/opt/exec/factory.go @@ -226,6 +226,12 @@ type Factory interface { // ConstructShowTrace returns a node that implements a SHOW TRACE // FOR SESSION statement. ConstructShowTrace(typ tree.ShowTraceType, compact bool) (Node, error) + + // ConstructInsert creates a node that implements an INSERT statement. Each + // tabCols parameter maps input columns into corresponding table columns, by + // ordinal position. The rowsNeeded parameter is true if a RETURNING clause + // needs the inserted row(s) as output. + ConstructInsert(input Node, table opt.Table, rowsNeeded bool) (Node, error) } // OutputOrdering indicates the required output ordering on a Node that is being diff --git a/pkg/sql/opt/memo/check_expr.go b/pkg/sql/opt/memo/check_expr.go index 3a2bea5749df..53a72b0d35da 100644 --- a/pkg/sql/opt/memo/check_expr.go +++ b/pkg/sql/opt/memo/check_expr.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/props" "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" - "github.com/cockroachdb/cockroach/pkg/util" ) // CheckExpr does sanity checking on an Expr. This code is called in testrace @@ -31,9 +30,9 @@ import ( func (m *Memo) checkExpr(e opt.Expr) { // RaceEnabled ensures that checks are run on every PR (as part of make // testrace) while keeping the check code out of non-test builds. - if !util.RaceEnabled { - return - } + //if !util.RaceEnabled { + // return + //} // Check properties. switch t := e.(type) { @@ -160,6 +159,24 @@ func (m *Memo) checkExpr(e opt.Expr) { panic(fmt.Sprintf("lookup join with no lookup columns")) } + case *InsertExpr: + tab := m.Metadata().Table(t.Table) + if len(t.InputCols) != tab.ColumnCount()+tab.MutationColumnCount() { + panic("values not provided for all table columns") + } + + // Output and ordering columns should never include mutation columns. + var mutationCols opt.ColSet + for i, n := tab.ColumnCount(), tab.MutationColumnCount(); i < n; i++ { + mutationCols.Add(int(t.InputCols[i])) + } + if t.Relational().OutputCols.Intersects(mutationCols) { + panic("output columns cannot include mutation columns") + } + if t.Ordering.ColSet().Intersects(mutationCols) { + panic("ordering columns cannot include mutation columns") + } + default: if !opt.IsListOp(e) { for i := 0; i < e.ChildCount(); i++ { diff --git a/pkg/sql/opt/memo/expr_format.go b/pkg/sql/opt/memo/expr_format.go index dff7f626735a..a1c23f9a10b1 100644 --- a/pkg/sql/opt/memo/expr_format.go +++ b/pkg/sql/opt/memo/expr_format.go @@ -139,6 +139,7 @@ func (f *ExprFmtCtx) formatExpr(e opt.Expr, tp treeprinter.Node) { } func (f *ExprFmtCtx) formatRelational(e RelExpr, tp treeprinter.Node) { + md := f.Memo.Metadata() relational := e.Relational() required := e.RequiredPhysical() if required == nil { @@ -158,7 +159,7 @@ func (f *ExprFmtCtx) formatRelational(e RelExpr, tp treeprinter.Node) { FormatPrivate(f, e.Private(), required) f.Buffer.WriteByte(')') - case *ScanExpr, *VirtualScanExpr, *IndexJoinExpr, *ShowTraceForSessionExpr: + case *ScanExpr, *VirtualScanExpr, *IndexJoinExpr, *ShowTraceForSessionExpr, *InsertExpr: fmt.Fprintf(f.Buffer, "%v", e.Op()) FormatPrivate(f, e.Private(), required) @@ -241,14 +242,14 @@ func (f *ExprFmtCtx) formatRelational(e RelExpr, tp treeprinter.Node) { if t.Flags.NoIndexJoin { tp.Childf("flags: no-index-join") } else if t.Flags.ForceIndex { - idx := f.Memo.Metadata().Table(t.Table).Index(t.Flags.Index) + idx := md.Table(t.Table).Index(t.Flags.Index) tp.Childf("flags: force-index=%s", idx.IdxName()) } } case *LookupJoinExpr: idxCols := make(opt.ColList, len(t.KeyCols)) - idx := f.Memo.Metadata().Table(t.Table).Index(t.Index) + idx := md.Table(t.Table).Index(t.Index) for i := range idxCols { idxCols[i] = t.Table.ColumnID(idx.Column(i).Ordinal) } @@ -257,6 +258,16 @@ func (f *ExprFmtCtx) formatRelational(e RelExpr, tp treeprinter.Node) { case *MergeJoinExpr: tp.Childf("left ordering: %s", t.LeftEq) tp.Childf("right ordering: %s", t.RightEq) + + case *InsertExpr: + if len(colList) == 0 { + tp.Child("columns: ") + } + f.formatColList(e, tp, "table columns:", tableColsToList(md, t.Table)) + f.formatColList(e, tp, "input columns:", t.InputCols) + if !t.Ordering.Any() { + tp.Childf("internal-ordering: %s", t.Ordering) + } } if !f.HasFlags(ExprFmtHideMiscProps) { @@ -270,11 +281,26 @@ func (f *ExprFmtCtx) formatRelational(e RelExpr, tp treeprinter.Node) { } } + f.Buffer.Reset() + writeFlag := func(name string) { + if f.Buffer.Len() != 0 { + f.Buffer.WriteString(", ") + } + f.Buffer.WriteString(name) + } + if relational.CanHaveSideEffects { - tp.Child("side-effects") + writeFlag("side-effects") + } + if relational.CanMutate { + writeFlag("mutations") } if relational.HasPlaceholder { - tp.Child("has-placeholder") + writeFlag("has-placeholder") + } + + if f.Buffer.Len() != 0 { + tp.Child(f.Buffer.String()) } } @@ -579,6 +605,10 @@ func FormatPrivate(f *ExprFmtCtx, private interface{}, physProps *physical.Requi tab := f.Memo.metadata.Table(t.Table) fmt.Fprintf(f.Buffer, " %s", tab.Name()) + case *InsertPrivate: + tab := f.Memo.metadata.Table(t.Table) + fmt.Fprintf(f.Buffer, " %s", tab.Name().TableName) + case *RowNumberPrivate: if !t.Ordering.Any() { fmt.Fprintf(f.Buffer, " ordering=%s", t.Ordering) @@ -642,3 +672,15 @@ func isSimpleColumnName(label string) bool { } return true } + +// tableColsToList returns the list of columns in the given table, in the same +// order they're defined in that table. This list contains mutation columns as +// well as regular columns. +func tableColsToList(md *opt.Metadata, tabID opt.TableID) opt.ColList { + tab := md.Table(tabID) + colList := make(opt.ColList, tab.ColumnCount()+tab.MutationColumnCount()) + for i := range colList { + colList[i] = tabID.ColumnID(i) + } + return colList +} diff --git a/pkg/sql/opt/memo/logical_props_builder.go b/pkg/sql/opt/memo/logical_props_builder.go index 64f6505ed99a..9dee79fb6d0a 100644 --- a/pkg/sql/opt/memo/logical_props_builder.go +++ b/pkg/sql/opt/memo/logical_props_builder.go @@ -945,6 +945,65 @@ func (b *logicalPropsBuilder) buildProjectSetProps( } } +func (b *logicalPropsBuilder) buildInsertProps(ins *InsertExpr, rel *props.Relational) { + BuildSharedProps(b.mem, ins, &rel.Shared) + + // If no columns are output by the operator, then all other properties retain + // default values. + if !ins.NeedResults { + return + } + + md := b.mem.Metadata() + tab := md.Table(ins.Table) + inputProps := ins.Input.Relational() + + // Output Columns + // -------------- + // Only non-mutation columns are output columns. + for i, col := range ins.InputCols { + // Column is mutation column if its ordinal is >= count of regular columns + // in the table. + if i < tab.ColumnCount() { + rel.OutputCols.Add(int(col)) + + // Also add to NotNullCols here, in order to avoid another loop below. + if !tab.Column(i).IsNullable() { + rel.NotNullCols.Add(int(col)) + } + } + } + + // Not Null Columns + // ---------------- + // Start with set of not null columns computed above. Add any not null columns + // from input that are also output columns. + rel.NotNullCols.UnionWith(inputProps.NotNullCols) + rel.NotNullCols.IntersectionWith(rel.OutputCols) + + // Outer Columns + // ------------- + // Outer columns were already derived by buildSharedProps. + + // Functional Dependencies + // ----------------------- + // Start with copy of FuncDepSet from input, then possibly simplify by calling + // ProjectCols. + rel.FuncDeps.CopyFrom(&inputProps.FuncDeps) + rel.FuncDeps.ProjectCols(rel.OutputCols) + + // Cardinality + // ----------- + // Inherit cardinality from input. + rel.Cardinality = inputProps.Cardinality + + // Statistics + // ---------- + if !b.disableStats { + b.sb.buildInsert(ins, rel) + } +} + func (b *logicalPropsBuilder) buildFiltersItemProps(item *FiltersItem, scalar *props.Scalar) { BuildSharedProps(b.mem, item.Condition, &scalar.Shared) @@ -1022,6 +1081,10 @@ func BuildSharedProps(mem *Memo, e opt.Expr, shared *props.Shared) { // Impure functions can return different value on each call. shared.CanHaveSideEffects = true } + + case *InsertExpr: + shared.CanHaveSideEffects = true + shared.CanMutate = true } // Recursively build the shared properties. @@ -1046,6 +1109,9 @@ func BuildSharedProps(mem *Memo, e opt.Expr, shared *props.Shared) { if cached.CanHaveSideEffects { shared.CanHaveSideEffects = true } + if cached.CanMutate { + shared.CanMutate = true + } if cached.HasSubquery { shared.HasSubquery = true } diff --git a/pkg/sql/opt/memo/statistics_builder.go b/pkg/sql/opt/memo/statistics_builder.go index 514e8d69dbf1..6d04e6353311 100644 --- a/pkg/sql/opt/memo/statistics_builder.go +++ b/pkg/sql/opt/memo/statistics_builder.go @@ -313,6 +313,9 @@ func (sb *statisticsBuilder) colStat(colSet opt.ColSet, e RelExpr) *props.Column case opt.ProjectSetOp: return sb.colStatProjectSet(colSet, e.(*ProjectSetExpr)) + case opt.InsertOp: + return sb.colStatInsert(colSet, e.(*InsertExpr)) + case opt.ExplainOp, opt.ShowTraceForSessionOp: relProps := e.Relational() return sb.colStatLeaf(colSet, &relProps.Stats, &relProps.FuncDeps, relProps.NotNullCols) @@ -1728,6 +1731,29 @@ func (sb *statisticsBuilder) colStatProjectSet( return colStat } +// +--------+ +// | Insert | +// +--------+ + +func (sb *statisticsBuilder) buildInsert(ins *InsertExpr, relProps *props.Relational) { + s := &relProps.Stats + if zeroCardinality := s.Init(relProps); zeroCardinality { + // Short cut if cardinality is 0. + return + } + + inputStats := &ins.Input.Relational().Stats + + s.RowCount = inputStats.RowCount + sb.finalizeFromCardinality(relProps) +} + +func (sb *statisticsBuilder) colStatInsert( + colSet opt.ColSet, ins *InsertExpr, +) *props.ColumnStatistic { + return sb.colStat(colSet, ins.Input) +} + ///////////////////////////////////////////////// // General helper functions for building stats // ///////////////////////////////////////////////// diff --git a/pkg/sql/opt/memo/testdata/logprops/insert b/pkg/sql/opt/memo/testdata/logprops/insert new file mode 100644 index 000000000000..8525b417766f --- /dev/null +++ b/pkg/sql/opt/memo/testdata/logprops/insert @@ -0,0 +1,233 @@ +exec-ddl +CREATE TABLE abcde ( + a INT NOT NULL, + b INT, + c INT NOT NULL DEFAULT (10), + d INT AS (b + c + 1) STORED, + "e:mutation" INT +) +---- +TABLE abcde + ├── a int not null + ├── b int + ├── c int not null + ├── d int + ├── rowid int not null (hidden) + └── INDEX primary + └── rowid int not null (hidden) + +exec-ddl +CREATE TABLE xyz ( + x TEXT PRIMARY KEY, + y INT8 NOT NULL, + z FLOAT8 +) +---- +TABLE xyz + ├── x string not null + ├── y int not null + ├── z float + └── INDEX primary + └── x string not null + +# Properties with no RETURNING clause. +build +INSERT INTO abcde (a, b) SELECT y, y FROM xyz ORDER BY y, z +---- +insert abcde + ├── columns: + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) rowid:5(int) "e:mutation":6(int) + ├── input columns: y:8(int) y:8(int) column10:10(int) column13:13(int) column11:11(int) column12:12(unknown) + ├── internal-ordering: +8,+9 + ├── cardinality: [0 - 0] + ├── side-effects, mutations + └── sort + ├── columns: y:8(int!null) z:9(float) column10:10(int!null) column11:11(int) column12:12(unknown) column13:13(int) + ├── side-effects + ├── fd: ()-->(10,12), (8)-->(13) + ├── ordering: +8,+9 + └── project + ├── columns: column13:13(int) y:8(int!null) z:9(float) column10:10(int!null) column11:11(int) column12:12(unknown) + ├── side-effects + ├── fd: ()-->(10,12), (8)-->(13) + ├── project + │ ├── columns: column10:10(int!null) column11:11(int) column12:12(unknown) y:8(int!null) z:9(float) + │ ├── side-effects + │ ├── fd: ()-->(10,12) + │ ├── prune: (8-12) + │ ├── project + │ │ ├── columns: y:8(int!null) z:9(float) + │ │ ├── prune: (8,9) + │ │ └── scan xyz + │ │ ├── columns: x:7(string!null) y:8(int!null) z:9(float) + │ │ ├── key: (7) + │ │ ├── fd: (7)-->(8,9) + │ │ ├── prune: (7-9) + │ │ └── interesting orderings: (+7) + │ └── projections + │ ├── const: 10 [type=int] + │ ├── function: unique_rowid [type=int, side-effects] + │ └── null [type=unknown] + └── projections + └── plus [type=int, outer=(8,10)] + ├── plus [type=int] + │ ├── variable: y [type=int] + │ └── variable: column10 [type=int] + └── const: 1 [type=int] + +# Properties with RETURNING clause. +build +INSERT INTO abcde (a, b) SELECT y, y FROM xyz ORDER BY y, z RETURNING * +---- +project + ├── columns: a:8(int!null) b:8(int!null) c:10(int!null) d:13(int) + ├── side-effects, mutations + ├── fd: ()-->(10), (8)-->(13) + ├── prune: (8,10,13) + └── insert abcde + ├── columns: y:8(int!null) column10:10(int!null) column11:11(int!null) column13:13(int) + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) rowid:5(int) "e:mutation":6(int) + ├── input columns: y:8(int!null) y:8(int!null) column10:10(int!null) column13:13(int) column11:11(int!null) column12:12(unknown) + ├── internal-ordering: +8,+9 + ├── side-effects, mutations + ├── fd: ()-->(10), (8)-->(13) + └── sort + ├── columns: y:8(int!null) z:9(float) column10:10(int!null) column11:11(int) column12:12(unknown) column13:13(int) + ├── side-effects + ├── fd: ()-->(10,12), (8)-->(13) + ├── ordering: +8,+9 + └── project + ├── columns: column13:13(int) y:8(int!null) z:9(float) column10:10(int!null) column11:11(int) column12:12(unknown) + ├── side-effects + ├── fd: ()-->(10,12), (8)-->(13) + ├── project + │ ├── columns: column10:10(int!null) column11:11(int) column12:12(unknown) y:8(int!null) z:9(float) + │ ├── side-effects + │ ├── fd: ()-->(10,12) + │ ├── prune: (8-12) + │ ├── project + │ │ ├── columns: y:8(int!null) z:9(float) + │ │ ├── prune: (8,9) + │ │ └── scan xyz + │ │ ├── columns: x:7(string!null) y:8(int!null) z:9(float) + │ │ ├── key: (7) + │ │ ├── fd: (7)-->(8,9) + │ │ ├── prune: (7-9) + │ │ └── interesting orderings: (+7) + │ └── projections + │ ├── const: 10 [type=int] + │ ├── function: unique_rowid [type=int, side-effects] + │ └── null [type=unknown] + └── projections + └── plus [type=int, outer=(8,10)] + ├── plus [type=int] + │ ├── variable: y [type=int] + │ └── variable: column10 [type=int] + └── const: 1 [type=int] + +# Input is cardinality 1 VALUES expression. +build +INSERT INTO abcde (a, b) (VALUES (1, 2)) RETURNING *, rowid; +---- +insert abcde + ├── columns: a:7(int!null) b:8(int) c:9(int!null) d:12(int) rowid:10(int!null) + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) rowid:5(int) "e:mutation":6(int) + ├── input columns: column1:7(int!null) column2:8(int) column9:9(int!null) column12:12(int) column10:10(int!null) column11:11(unknown) + ├── cardinality: [1 - 1] + ├── side-effects, mutations + ├── key: () + ├── fd: ()-->(7-10,12) + └── project + ├── columns: column12:12(int) column1:7(int) column2:8(int) column9:9(int!null) column10:10(int) column11:11(unknown) + ├── cardinality: [1 - 1] + ├── side-effects + ├── key: () + ├── fd: ()-->(7-12) + ├── prune: (7-12) + ├── project + │ ├── columns: column9:9(int!null) column10:10(int) column11:11(unknown) column1:7(int) column2:8(int) + │ ├── cardinality: [1 - 1] + │ ├── side-effects + │ ├── key: () + │ ├── fd: ()-->(7-11) + │ ├── prune: (7-11) + │ ├── values + │ │ ├── columns: column1:7(int) column2:8(int) + │ │ ├── cardinality: [1 - 1] + │ │ ├── key: () + │ │ ├── fd: ()-->(7,8) + │ │ ├── prune: (7,8) + │ │ └── tuple [type=tuple{int, int}] + │ │ ├── const: 1 [type=int] + │ │ └── const: 2 [type=int] + │ └── projections + │ ├── const: 10 [type=int] + │ ├── function: unique_rowid [type=int, side-effects] + │ └── null [type=unknown] + └── projections + └── plus [type=int, outer=(8,9)] + ├── plus [type=int] + │ ├── variable: column2 [type=int] + │ └── variable: column9 [type=int] + └── const: 1 [type=int] + +# Filter FD set. +build +INSERT INTO abcde (a, b) SELECT y, (z+1)::int FROM xyz WHERE y=1 RETURNING a, c; +---- +project + ├── columns: a:8(int!null) c:11(int!null) + ├── side-effects, mutations + ├── fd: ()-->(8,11) + ├── prune: (8,11) + └── insert abcde + ├── columns: y:8(int!null) int:10(int) column11:11(int!null) column12:12(int!null) column14:14(int) + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) rowid:5(int) "e:mutation":6(int) + ├── input columns: y:8(int!null) int:10(int) column11:11(int!null) column14:14(int) column12:12(int!null) column13:13(unknown) + ├── side-effects, mutations + ├── fd: ()-->(8,11), (10)-->(14) + └── project + ├── columns: column14:14(int) y:8(int!null) int:10(int) column11:11(int!null) column12:12(int) column13:13(unknown) + ├── side-effects + ├── fd: ()-->(8,11,13), (10)-->(14) + ├── prune: (8,10-14) + ├── project + │ ├── columns: column11:11(int!null) column12:12(int) column13:13(unknown) y:8(int!null) int:10(int) + │ ├── side-effects + │ ├── fd: ()-->(8,11,13) + │ ├── prune: (8,10-13) + │ ├── project + │ │ ├── columns: int:10(int) y:8(int!null) + │ │ ├── fd: ()-->(8) + │ │ ├── prune: (8,10) + │ │ ├── select + │ │ │ ├── columns: x:7(string!null) y:8(int!null) z:9(float) + │ │ │ ├── key: (7) + │ │ │ ├── fd: ()-->(8), (7)-->(9) + │ │ │ ├── prune: (7,9) + │ │ │ ├── interesting orderings: (+7) + │ │ │ ├── scan xyz + │ │ │ │ ├── columns: x:7(string!null) y:8(int!null) z:9(float) + │ │ │ │ ├── key: (7) + │ │ │ │ ├── fd: (7)-->(8,9) + │ │ │ │ ├── prune: (7-9) + │ │ │ │ └── interesting orderings: (+7) + │ │ │ └── filters + │ │ │ └── eq [type=bool, outer=(8), constraints=(/8: [/1 - /1]; tight), fd=()-->(8)] + │ │ │ ├── variable: y [type=int] + │ │ │ └── const: 1 [type=int] + │ │ └── projections + │ │ └── cast: INT [type=int, outer=(9)] + │ │ └── plus [type=float] + │ │ ├── variable: z [type=float] + │ │ └── const: 1.0 [type=float] + │ └── projections + │ ├── const: 10 [type=int] + │ ├── function: unique_rowid [type=int, side-effects] + │ └── null [type=unknown] + └── projections + └── plus [type=int, outer=(10,11)] + ├── plus [type=int] + │ ├── variable: int [type=int] + │ └── variable: column11 [type=int] + └── const: 1 [type=int] diff --git a/pkg/sql/opt/memo/testdata/logprops/join b/pkg/sql/opt/memo/testdata/logprops/join index f39e63414357..1a409659d9c4 100644 --- a/pkg/sql/opt/memo/testdata/logprops/join +++ b/pkg/sql/opt/memo/testdata/logprops/join @@ -70,9 +70,10 @@ project ├── columns: column1:7(int) ├── prune: (7) ├── inner-join-apply - │ ├── columns: x:1(int!null) y:2(int) column1:5(int) + │ ├── columns: x:1(int!null) y:2(int) column1:5(int) column1:6(int) │ ├── key: (1) - │ ├── fd: (1)-->(2,5) + │ ├── fd: (1)-->(2,5,6) + │ ├── prune: (6) │ ├── interesting orderings: (+1) │ ├── scan xysd │ │ ├── columns: x:1(int!null) y:2(int) @@ -80,24 +81,41 @@ project │ │ ├── fd: (1)-->(2) │ │ ├── prune: (1,2) │ │ └── interesting orderings: (+1) - │ ├── max1-row - │ │ ├── columns: column1:5(int) + │ ├── inner-join-apply + │ │ ├── columns: column1:5(int) column1:6(int) │ │ ├── outer: (1,2) │ │ ├── cardinality: [1 - 1] │ │ ├── key: () - │ │ ├── fd: ()-->(5) - │ │ └── values - │ │ ├── columns: column1:5(int) - │ │ ├── outer: (1,2) - │ │ ├── cardinality: [2 - 2] - │ │ ├── prune: (5) - │ │ ├── tuple [type=tuple{int}] - │ │ │ └── variable: x [type=int] - │ │ └── tuple [type=tuple{int}] - │ │ └── variable: y [type=int] + │ │ ├── fd: ()-->(5,6) + │ │ ├── prune: (6) + │ │ ├── max1-row + │ │ │ ├── columns: column1:5(int) + │ │ │ ├── outer: (1,2) + │ │ │ ├── cardinality: [1 - 1] + │ │ │ ├── key: () + │ │ │ ├── fd: ()-->(5) + │ │ │ └── values + │ │ │ ├── columns: column1:5(int) + │ │ │ ├── outer: (1,2) + │ │ │ ├── cardinality: [2 - 2] + │ │ │ ├── prune: (5) + │ │ │ ├── tuple [type=tuple{int}] + │ │ │ │ └── variable: x [type=int] + │ │ │ └── tuple [type=tuple{int}] + │ │ │ └── variable: y [type=int] + │ │ ├── values + │ │ │ ├── columns: column1:6(int) + │ │ │ ├── outer: (5) + │ │ │ ├── cardinality: [1 - 1] + │ │ │ ├── key: () + │ │ │ ├── fd: ()-->(6) + │ │ │ ├── prune: (6) + │ │ │ └── tuple [type=tuple{int}] + │ │ │ └── variable: column1 [type=int] + │ │ └── filters (true) │ └── filters (true) └── projections - └── variable: column1 [type=int, outer=(5)] + └── variable: column1 [type=int, outer=(6)] # Inner-join-apply nested in inner-join-apply with outer column references to # each parent. diff --git a/pkg/sql/opt/memo/testdata/memo b/pkg/sql/opt/memo/testdata/memo index 1813262f6a00..c795836afc4a 100644 --- a/pkg/sql/opt/memo/testdata/memo +++ b/pkg/sql/opt/memo/testdata/memo @@ -357,25 +357,20 @@ memo (optimized, ~4KB, required=[presentation: field:3]) ├── G1: (distinct-on G2 G3 cols=(3)) │ └── [presentation: field:3] │ ├── best: (distinct-on G2 G3 cols=(3)) - │ └── cost: 0.07 + │ └── cost: 0.04 ├── G2: (explain G4 [presentation: k:1]) │ └── [] │ ├── best: (explain G4="[presentation: k:1]" [presentation: k:1]) - │ └── cost: 0.06 + │ └── cost: 0.03 ├── G3: (aggregations) - ├── G4: (project G5 G6) + ├── G4: (values G5) │ └── [presentation: k:1] - │ ├── best: (project G5 G6) - │ └── cost: 0.05 - ├── G5: (values G7) - │ └── [] - │ ├── best: (values G7) + │ ├── best: (values G5) │ └── cost: 0.02 - ├── G6: (projections G8) - ├── G7: (scalar-list G9) - ├── G8: (const 123) - ├── G9: (tuple G10) - └── G10: (scalar-list) + ├── G5: (scalar-list G6) + ├── G6: (tuple G7) + ├── G7: (scalar-list G8) + └── G8: (const 123) memo SELECT DISTINCT tag FROM [SHOW TRACE FOR SESSION] diff --git a/pkg/sql/opt/memo/testdata/stats/insert b/pkg/sql/opt/memo/testdata/stats/insert new file mode 100644 index 000000000000..3735fd3f5747 --- /dev/null +++ b/pkg/sql/opt/memo/testdata/stats/insert @@ -0,0 +1,112 @@ +exec-ddl +CREATE TABLE abc ( + a INT NOT NULL, + b TEXT DEFAULT ('foo'), + c FLOAT AS (a::float) STORED +) +---- +TABLE abc + ├── a int not null + ├── b string + ├── c float + ├── rowid int not null (hidden) + └── INDEX primary + └── rowid int not null (hidden) + +exec-ddl +ALTER TABLE abc INJECT STATISTICS '[ + { + "columns": ["a"], + "created_at": "2018-01-01 1:00:00.00000+00:00", + "row_count": 2000, + "distinct_count": 2000 + }, + { + "columns": ["b"], + "created_at": "2018-01-01 1:30:00.00000+00:00", + "row_count": 2000, + "distinct_count": 10 + } +]' +---- + +exec-ddl +CREATE TABLE xyz ( + x TEXT PRIMARY KEY, + y INT8 NOT NULL, + z FLOAT8 +) +---- +TABLE xyz + ├── x string not null + ├── y int not null + ├── z float + └── INDEX primary + └── x string not null + +# Statistics should be derived from INSERT input columns and transferred to +# RETURNING columns. +build +SELECT * +FROM [INSERT INTO xyz (x, y, z) SELECT b, a, c FROM abc WHERE b='foo' RETURNING *] +WHERE z > 1.0 +---- +select + ├── columns: x:5(string!null) y:4(int!null) z:6(float!null) + ├── side-effects, mutations + ├── stats: [rows=66, distinct(4)=66, null(4)=0, distinct(5)=1, null(5)=0, distinct(6)=60.3661899, null(6)=0] + ├── fd: ()-->(5) + ├── insert xyz + │ ├── columns: a:4(int!null) b:5(string!null) c:6(float) + │ ├── table columns: x:1(string) y:2(int) z:3(float) + │ ├── input columns: b:5(string!null) a:4(int!null) c:6(float) + │ ├── side-effects, mutations + │ ├── stats: [rows=200] + │ ├── fd: ()-->(5) + │ └── project + │ ├── columns: a:4(int!null) b:5(string!null) c:6(float) + │ ├── stats: [rows=200, distinct(4)=200, null(4)=0, distinct(5)=1, null(5)=0, distinct(6)=130.264312, null(6)=2] + │ ├── fd: ()-->(5) + │ └── select + │ ├── columns: a:4(int!null) b:5(string!null) c:6(float) rowid:7(int!null) + │ ├── stats: [rows=200, distinct(4)=200, null(4)=0, distinct(5)=1, null(5)=0, distinct(6)=130.264312, null(6)=2, distinct(7)=200, null(7)=0] + │ ├── key: (7) + │ ├── fd: ()-->(5), (7)-->(4,6) + │ ├── scan abc + │ │ ├── columns: a:4(int!null) b:5(string) c:6(float) rowid:7(int!null) + │ │ ├── stats: [rows=2000, distinct(4)=2000, null(4)=0, distinct(5)=10, null(5)=0, distinct(6)=200, null(6)=20, distinct(7)=2000, null(7)=0] + │ │ ├── key: (7) + │ │ └── fd: (7)-->(4-6) + │ └── filters + │ └── b = 'foo' [type=bool, outer=(5), constraints=(/5: [/'foo' - /'foo']; tight), fd=()-->(5)] + └── filters + └── c > 1.0 [type=bool, outer=(6), constraints=(/6: [/1.0000000000000002 - ]; tight)] + +# Cardinality is zero. +build +INSERT INTO xyz (x, y, z) SELECT b, a, c FROM abc WHERE False RETURNING * +---- +insert xyz + ├── columns: x:5(string!null) y:4(int!null) z:6(float) + ├── table columns: x:1(string) y:2(int) z:3(float) + ├── input columns: b:5(string!null) a:4(int!null) c:6(float) + ├── cardinality: [0 - 0] + ├── side-effects, mutations + ├── stats: [rows=0] + └── project + ├── columns: a:4(int!null) b:5(string) c:6(float) + ├── cardinality: [0 - 0] + ├── stats: [rows=0] + └── select + ├── columns: a:4(int!null) b:5(string) c:6(float) rowid:7(int!null) + ├── cardinality: [0 - 0] + ├── stats: [rows=0] + ├── key: (7) + ├── fd: (7)-->(4-6) + ├── scan abc + │ ├── columns: a:4(int!null) b:5(string) c:6(float) rowid:7(int!null) + │ ├── stats: [rows=2000] + │ ├── key: (7) + │ └── fd: (7)-->(4-6) + └── filters + └── false [type=bool] diff --git a/pkg/sql/opt/metadata.go b/pkg/sql/opt/metadata.go index 12a1170c6655..6fac79260c4b 100644 --- a/pkg/sql/opt/metadata.go +++ b/pkg/sql/opt/metadata.go @@ -363,6 +363,27 @@ func (md *Metadata) AddTable(tab Table) TableID { return tabID } +// AddTableWithMutations first calls AddTable to add regular columns to the +// metadata. It then appends any columns that are currently undergoing mutation +// (i.e. being added or dropped from the table), and which need to be +// initialized to their default value by INSERT statements. See this RFC for +// more details: +// +// cockroachdb/cockroach/docs/RFCS/20151014_online_schema_change.md +// +func (md *Metadata) AddTableWithMutations(tab Table) TableID { + tabID := md.AddTable(tab) + for i, n := 0, tab.MutationColumnCount(); i < n; i++ { + col := tab.MutationColumn(i) + md.cols = append(md.cols, mdColumn{ + tabID: tabID, + label: string(col.ColName()), + typ: col.DatumType(), + }) + } + return tabID +} + // Table looks up the catalog table associated with the given metadata id. The // same table can be associated with multiple metadata ids. func (md *Metadata) Table(tabID TableID) Table { diff --git a/pkg/sql/opt/norm/custom_funcs.go b/pkg/sql/opt/norm/custom_funcs.go index 0d5335b87f21..a86010b085a2 100644 --- a/pkg/sql/opt/norm/custom_funcs.go +++ b/pkg/sql/opt/norm/custom_funcs.go @@ -550,6 +550,46 @@ func (c *CustomFuncs) MergeProjections( return newProjections } +// MergeProjectWithValues merges a Project operator with its input Values +// operator. This is only possible in certain circumstances, which are described +// in the MergeProjectWithValues rule comment. +// +// Values columns that are part of the Project passthrough columns are retained +// in the final Values operator, and Project synthesized columns are added to +// it. Any unreferenced Values columns are discarded. For example: +// +// SELECT column1, 3 FROM (VALUES (1, 2)) +// => +// (VALUES (1, 3)) +// +func (c *CustomFuncs) MergeProjectWithValues( + projections memo.ProjectionsExpr, passthrough opt.ColSet, input memo.RelExpr, +) memo.RelExpr { + newExprs := make(memo.ScalarListExpr, 0, len(projections)+passthrough.Len()) + newTypes := make([]types.T, 0, len(newExprs)) + newCols := make(opt.ColList, 0, len(newExprs)) + + values := input.(*memo.ValuesExpr) + tuple := values.Rows[0].(*memo.TupleExpr) + for i, colID := range values.Cols { + if passthrough.Contains(int(colID)) { + newExprs = append(newExprs, tuple.Elems[i]) + newTypes = append(newTypes, tuple.Elems[i].DataType()) + newCols = append(newCols, colID) + } + } + + for i := range projections { + item := &projections[i] + newExprs = append(newExprs, item.Element) + newTypes = append(newTypes, item.Element.DataType()) + newCols = append(newCols, item.Col) + } + + rows := memo.ScalarListExpr{c.f.ConstructTuple(newExprs, types.TTuple{Types: newTypes})} + return c.f.ConstructValues(rows, newCols) +} + // ProjectionCols returns the ids of the columns synthesized by the given // Projections operator. func (c *CustomFuncs) ProjectionCols(projections memo.ProjectionsExpr) opt.ColSet { @@ -570,6 +610,19 @@ func (c *CustomFuncs) ProjectionOuterCols(projections memo.ProjectionsExpr) opt. return colSet } +// AreProjectionsCorrelated returns true if any element in the projections +// references any of the given columns. +func (c *CustomFuncs) AreProjectionsCorrelated( + projections memo.ProjectionsExpr, cols opt.ColSet, +) bool { + for i := range projections { + if projections[i].ScalarProps(c.mem).OuterCols.Intersects(cols) { + return true + } + } + return false +} + // ProjectColMapLeft returns a Projections operator that maps the left side // columns in a SetPrivate to the output columns in it. Useful for replacing set // operations with simpler constructs. diff --git a/pkg/sql/opt/norm/ordering.go b/pkg/sql/opt/norm/ordering.go index 2969b52dd00c..208a7c818da8 100644 --- a/pkg/sql/opt/norm/ordering.go +++ b/pkg/sql/opt/norm/ordering.go @@ -52,7 +52,7 @@ func (c *CustomFuncs) CanSimplifyGroupingOrdering( func (c *CustomFuncs) SimplifyGroupingOrdering( in memo.RelExpr, private *memo.GroupingPrivate, ) *memo.GroupingPrivate { - // Copy GroupByDef to stack and replace Ordering field. + // Copy GroupingPrivate to stack and replace Ordering field. copy := *private copy.Ordering = c.simplifyOrdering(in, private.Ordering) return © @@ -73,7 +73,7 @@ func (c *CustomFuncs) CanSimplifyRowNumberOrdering( func (c *CustomFuncs) SimplifyRowNumberOrdering( in memo.RelExpr, private *memo.RowNumberPrivate, ) *memo.RowNumberPrivate { - // Copy RowNumberDef to stack and replace Ordering field. + // Copy RowNumberPrivate to stack and replace Ordering field. copy := *private copy.Ordering = c.simplifyOrdering(in, private.Ordering) return © @@ -94,8 +94,8 @@ func (c *CustomFuncs) CanSimplifyExplainOrdering( func (c *CustomFuncs) SimplifyExplainOrdering( in memo.RelExpr, private *memo.ExplainPrivate, ) *memo.ExplainPrivate { - // Copy ExplainOpDef and its physical properties to stack and replace Ordering - // field in the copied properties. + // Copy ExplainPrivate and its physical properties to stack and replace + // Ordering field in the copied properties. copy := *private copyProps := *private.Props copyProps.Ordering = c.simplifyOrdering(in, private.Props.Ordering) @@ -103,6 +103,25 @@ func (c *CustomFuncs) SimplifyExplainOrdering( return © } +// CanSimplifyInsertOrdering returns true if the ordering required by the +// Insert operator can be made less restrictive, so that the input operator has +// more ordering choices. +func (c *CustomFuncs) CanSimplifyInsertOrdering(in memo.RelExpr, private *memo.InsertPrivate) bool { + return c.canSimplifyOrdering(in, private.Ordering) +} + +// SimplifyInsertOrdering makes the ordering required by the Insert operator +// less restrictive by removing optional columns, adding equivalent columns, and +// removing redundant columns. +func (c *CustomFuncs) SimplifyInsertOrdering( + in memo.RelExpr, private *memo.InsertPrivate, +) *memo.InsertPrivate { + // Copy InsertPrivate to stack and replace Ordering field. + copy := *private + copy.Ordering = c.simplifyOrdering(in, private.Ordering) + return © +} + func (c *CustomFuncs) canSimplifyOrdering(in memo.RelExpr, ordering physical.OrderingChoice) bool { // If any ordering is allowed, nothing to simplify. if ordering.Any() { diff --git a/pkg/sql/opt/norm/rules/ordering.opt b/pkg/sql/opt/norm/rules/ordering.opt index 61ae9e9a4f28..193df5e311fa 100644 --- a/pkg/sql/opt/norm/rules/ordering.opt +++ b/pkg/sql/opt/norm/rules/ordering.opt @@ -68,3 +68,13 @@ ) => (Explain $input (SimplifyExplainOrdering $input $explainPrivate)) + +# SimplifyInsertOrdering removes redundant columns from the Insert operator's +# input ordering. +[SimplifyInsertOrdering, Normalize] +(Insert + $input:* + $insertPrivate:* & (CanSimplifyInsertOrdering $input $insertPrivate) +) +=> +(Insert $input (SimplifyInsertOrdering $input $insertPrivate)) diff --git a/pkg/sql/opt/norm/rules/project.opt b/pkg/sql/opt/norm/rules/project.opt index 810992de5ed5..5ac3b2c7224b 100644 --- a/pkg/sql/opt/norm/rules/project.opt +++ b/pkg/sql/opt/norm/rules/project.opt @@ -29,3 +29,23 @@ $input (MergeProjections $projections $innerProjections $passthrough) (DifferenceCols $passthrough (ProjectionCols $innerProjections)) ) + +# MergeProjectWithValues merges an outer Project operator with an inner Values +# operator that has a single row, as long as: +# +# 1. The Values operator has a single row (since if not, the projections would +# need to replicated for each row, which is undesirable). +# +# 2. The projections do not reference Values columns, since combined Values +# columns cannot reference one another. +# +# This rule has the side effect of pruning unused columns of the Values +# operator. +[MergeProjectWithValues, Normalize] +(Project + $input:(Values [ * ]) + $projections:* & ^(AreProjectionsCorrelated $projections (OutputCols $input)) + $passthrough:* +) +=> +(MergeProjectWithValues $projections $passthrough $input) diff --git a/pkg/sql/opt/norm/testdata/rules/bool b/pkg/sql/opt/norm/testdata/rules/bool index 105e1d208848..78614a95de7a 100644 --- a/pkg/sql/opt/norm/testdata/rules/bool +++ b/pkg/sql/opt/norm/testdata/rules/bool @@ -298,18 +298,12 @@ project opt expect=(FoldNotTrue,FoldNotFalse) SELECT NOT(1=1), NOT(1=2) ---- -project - ├── columns: "?column?":1(bool!null) "?column?":2(bool!null) +values + ├── columns: "?column?":1(bool) "?column?":2(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1,2) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - ├── false [type=bool] - └── true [type=bool] + └── (false, true) [type=tuple{bool, bool}] # -------------------------------------------------- # NegateComparison diff --git a/pkg/sql/opt/norm/testdata/rules/comp b/pkg/sql/opt/norm/testdata/rules/comp index 8aecbc6468c7..0fd6f16feb7f 100644 --- a/pkg/sql/opt/norm/testdata/rules/comp +++ b/pkg/sql/opt/norm/testdata/rules/comp @@ -308,17 +308,12 @@ values opt expect=FoldIsNull SELECT NULL IS NULL AS r ---- -project - ├── columns: r:1(bool!null) +values + ├── columns: r:1(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── true [type=bool] + └── (true,) [type=tuple{bool}] # -------------------------------------------------- # FoldNonNullIsNull @@ -326,32 +321,22 @@ project opt expect=FoldNonNullIsNull SELECT 1 IS NULL AS r ---- -project - ├── columns: r:1(bool!null) +values + ├── columns: r:1(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── false [type=bool] + └── (false,) [type=tuple{bool}] opt expect=FoldNonNullIsNull SELECT (1, 2, 3) IS NULL AS r ---- -project - ├── columns: r:1(bool!null) +values + ├── columns: r:1(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── false [type=bool] + └── (false,) [type=tuple{bool}] # -------------------------------------------------- # FoldIsNotNull @@ -359,18 +344,12 @@ project opt expect=FoldIsNotNull SELECT NULL IS NOT NULL AS r, NULL IS NOT TRUE AS s ---- -project - ├── columns: r:1(bool!null) s:2(bool!null) +values + ├── columns: r:1(bool) s:2(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1,2) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - ├── false [type=bool] - └── true [type=bool] + └── (false, true) [type=tuple{bool, bool}] # -------------------------------------------------- # FoldNonNullIsNotNull @@ -396,17 +375,12 @@ project opt expect=FoldNonNullIsNotNull SELECT (1, 2, 3) IS NOT NULL AS r ---- -project - ├── columns: r:1(bool!null) +values + ├── columns: r:1(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── true [type=bool] + └── (true,) [type=tuple{bool}] # -------------------------------------------------- # CommuteNullIs @@ -414,15 +388,9 @@ project opt expect=CommuteNullIs SELECT NULL IS NOT TRUE AS r, NULL IS TRUE AS s ---- -project - ├── columns: r:1(bool!null) s:2(bool!null) +values + ├── columns: r:1(bool) s:2(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1,2) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - ├── true [type=bool] - └── false [type=bool] + └── (true, false) [type=tuple{bool, bool}] diff --git a/pkg/sql/opt/norm/testdata/rules/decorrelate b/pkg/sql/opt/norm/testdata/rules/decorrelate index 005e2ddbe139..16c257418938 100644 --- a/pkg/sql/opt/norm/testdata/rules/decorrelate +++ b/pkg/sql/opt/norm/testdata/rules/decorrelate @@ -3499,16 +3499,12 @@ project opt expect=HoistProjectSubquery SELECT EXISTS(SELECT EXISTS(SELECT * FROM xy WHERE y=i) FROM a) ---- -project +values ├── columns: exists:13(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(13) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections + └── tuple [type=tuple{bool}] └── exists [type=bool] └── left-join ├── columns: i:2(int) y:7(int) true:9(bool) rownum:12(int!null) @@ -3551,23 +3547,33 @@ SELECT i, y FROM a INNER JOIN xy ON (SELECT k+1) = x ---- project ├── columns: i:2(int) y:7(int) - └── inner-join - ├── columns: i:2(int) x:6(int!null) y:7(int) column9:9(int!null) - ├── fd: (6)-->(7), (6)==(9), (9)==(6) - ├── project - │ ├── columns: column9:9(int) i:2(int) - │ ├── scan a - │ │ ├── columns: k:1(int!null) i:2(int) - │ │ ├── key: (1) - │ │ └── fd: (1)-->(2) - │ └── projections - │ └── k + 1 [type=int, outer=(1)] - ├── scan xy - │ ├── columns: x:6(int!null) y:7(int) + └── inner-join-apply + ├── columns: k:1(int!null) i:2(int) x:6(int!null) y:7(int) "?column?":8(int) + ├── key: (1,6) + ├── fd: (1)-->(2), (1,6)-->(7,8), (6)==(8), (8)==(6) + ├── scan a + │ ├── columns: k:1(int!null) i:2(int) + │ ├── key: (1) + │ └── fd: (1)-->(2) + ├── inner-join + │ ├── columns: x:6(int!null) y:7(int) "?column?":8(int) + │ ├── outer: (1) │ ├── key: (6) - │ └── fd: (6)-->(7) + │ ├── fd: ()-->(8), (6)-->(7) + │ ├── scan xy + │ │ ├── columns: x:6(int!null) y:7(int) + │ │ ├── key: (6) + │ │ └── fd: (6)-->(7) + │ ├── values + │ │ ├── columns: "?column?":8(int) + │ │ ├── outer: (1) + │ │ ├── cardinality: [1 - 1] + │ │ ├── key: () + │ │ ├── fd: ()-->(8) + │ │ └── (k + 1,) [type=tuple{int}] + │ └── filters (true) └── filters - └── column9 = x [type=bool, outer=(6,9), constraints=(/6: (/NULL - ]; /9: (/NULL - ]), fd=(6)==(9), (9)==(6)] + └── x = ?column? [type=bool, outer=(6,8), constraints=(/6: (/NULL - ]; /8: (/NULL - ]), fd=(6)==(8), (8)==(6)] # Right join + multiple subqueries. opt expect=HoistJoinSubquery @@ -3576,21 +3582,42 @@ SELECT y FROM a RIGHT JOIN xy ON (SELECT k+1) = (SELECT x+1) project ├── columns: y:7(int) └── right-join-apply - ├── columns: k:1(int) y:7(int) "?column?":8(int) "?column?":9(int) + ├── columns: k:1(int) x:6(int!null) y:7(int) "?column?":8(int) "?column?":9(int) + ├── key: (1,6) + ├── fd: (1,6)-->(7-9) ├── scan a │ ├── columns: k:1(int!null) │ └── key: (1) - ├── project - │ ├── columns: "?column?":9(int) "?column?":8(int) y:7(int) + ├── inner-join-apply + │ ├── columns: x:6(int!null) y:7(int) "?column?":8(int) "?column?":9(int) │ ├── outer: (1) - │ ├── fd: ()-->(8) - │ ├── scan xy - │ │ ├── columns: x:6(int!null) y:7(int) + │ ├── key: (6) + │ ├── fd: ()-->(8), (6)-->(7,9) + │ ├── inner-join + │ │ ├── columns: x:6(int!null) y:7(int) "?column?":8(int) + │ │ ├── outer: (1) │ │ ├── key: (6) - │ │ └── fd: (6)-->(7) - │ └── projections - │ ├── x + 1 [type=int, outer=(6)] - │ └── k + 1 [type=int, outer=(1)] + │ │ ├── fd: ()-->(8), (6)-->(7) + │ │ ├── scan xy + │ │ │ ├── columns: x:6(int!null) y:7(int) + │ │ │ ├── key: (6) + │ │ │ └── fd: (6)-->(7) + │ │ ├── values + │ │ │ ├── columns: "?column?":8(int) + │ │ │ ├── outer: (1) + │ │ │ ├── cardinality: [1 - 1] + │ │ │ ├── key: () + │ │ │ ├── fd: ()-->(8) + │ │ │ └── (k + 1,) [type=tuple{int}] + │ │ └── filters (true) + │ ├── values + │ │ ├── columns: "?column?":9(int) + │ │ ├── outer: (6) + │ │ ├── cardinality: [1 - 1] + │ │ ├── key: () + │ │ ├── fd: ()-->(9) + │ │ └── (x + 1,) [type=tuple{int}] + │ └── filters (true) └── filters └── ?column? = ?column? [type=bool, outer=(8,9), constraints=(/8: (/NULL - ]; /9: (/NULL - ]), fd=(8)==(9), (9)==(8)] @@ -3736,19 +3763,27 @@ project │ │ ├── outer: (1,2) │ │ ├── cardinality: [3 - 3] │ │ ├── fd: ()-->(6,7) - │ │ ├── project - │ │ │ ├── columns: s:7(int) r:6(int) + │ │ ├── inner-join + │ │ │ ├── columns: r:6(int) s:7(int) │ │ │ ├── outer: (1,2) │ │ │ ├── cardinality: [1 - 1] │ │ │ ├── key: () │ │ │ ├── fd: ()-->(6,7) │ │ │ ├── values + │ │ │ │ ├── columns: r:6(int) + │ │ │ │ ├── outer: (2) + │ │ │ │ ├── cardinality: [1 - 1] + │ │ │ │ ├── key: () + │ │ │ │ ├── fd: ()-->(6) + │ │ │ │ └── (i + 1,) [type=tuple{int}] + │ │ │ ├── values + │ │ │ │ ├── columns: s:7(int) + │ │ │ │ ├── outer: (1) │ │ │ │ ├── cardinality: [1 - 1] │ │ │ │ ├── key: () - │ │ │ │ └── tuple [type=tuple] - │ │ │ └── projections - │ │ │ ├── k + 1 [type=int, outer=(1)] - │ │ │ └── i + 1 [type=int, outer=(2)] + │ │ │ │ ├── fd: ()-->(7) + │ │ │ │ └── (k + 1,) [type=tuple{int}] + │ │ │ └── filters (true) │ │ ├── values │ │ │ ├── columns: column1:8(int) │ │ │ ├── outer: (6,7) diff --git a/pkg/sql/opt/norm/testdata/rules/fold_constants b/pkg/sql/opt/norm/testdata/rules/fold_constants index c69d38a28456..a1e7a8c2cda0 100644 --- a/pkg/sql/opt/norm/testdata/rules/fold_constants +++ b/pkg/sql/opt/norm/testdata/rules/fold_constants @@ -37,17 +37,12 @@ project opt expect=FoldArray SELECT ARRAY['foo', 'bar'] ---- -project - ├── columns: array:1(string[]!null) +values + ├── columns: array:1(string[]) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: ARRAY['foo','bar'] [type=string[]] + └── (ARRAY['foo','bar'],) [type=tuple{string[]}] # -------------------------------------------------- # FoldBinary @@ -57,274 +52,189 @@ project opt expect=FoldBinary SELECT 1::INT + 2::DECIMAL ---- -project - ├── columns: "?column?":1(decimal!null) +values + ├── columns: "?column?":1(decimal) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: 3 [type=decimal] + └── (3,) [type=tuple{decimal}] # Don't fold: out of range error. opt expect-not=FoldBinary SELECT 9223372036854775800::INT + 9223372036854775800::INT ---- -project +values ├── columns: "?column?":1(int) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── 9223372036854775800 + 9223372036854775800 [type=int] + └── (9223372036854775800 + 9223372036854775800,) [type=tuple{int}] # Fold constant. opt expect=FoldBinary SELECT 1::INT - 2::INT ---- -project - ├── columns: "?column?":1(int!null) +values + ├── columns: "?column?":1(int) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: -1 [type=int] + └── (-1,) [type=tuple{int}] # Don't fold: out of range error. opt expect-not=FoldBinary SELECT (-9223372036854775800)::INT - 9223372036854775800::INT ---- -project +values ├── columns: "?column?":1(int) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── -9223372036854775800 - 9223372036854775800 [type=int] + └── (-9223372036854775800 - 9223372036854775800,) [type=tuple{int}] # Fold constant. opt expect=FoldBinary SELECT 4::INT * 2::INT ---- -project - ├── columns: "?column?":1(int!null) +values + ├── columns: "?column?":1(int) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: 8 [type=int] + └── (8,) [type=tuple{int}] # Don't fold: out of range error. opt expect-not=FoldBinary SELECT 9223372036854775800::INT * 9223372036854775800::INT ---- -project +values ├── columns: "?column?":1(int) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── 9223372036854775800 * 9223372036854775800 [type=int] + └── (9223372036854775800 * 9223372036854775800,) [type=tuple{int}] # Fold constant. opt expect=FoldBinary SELECT 1::FLOAT / 2::FLOAT ---- -project - ├── columns: "?column?":1(float!null) +values + ├── columns: "?column?":1(float) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: 0.5 [type=float] + └── (0.5,) [type=tuple{float}] # Don't fold: divide by zero error. opt expect-not=FoldBinary SELECT 1::INT / 0::INT ---- -project +values ├── columns: "?column?":1(decimal) ├── cardinality: [1 - 1] ├── side-effects ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── 1 / 0 [type=decimal, side-effects] + └── (1 / 0,) [type=tuple{decimal}] # Fold constant. opt expect=FoldBinary SELECT B'01' # B'11' ---- -project - ├── columns: "?column?":1(varbit!null) +values + ├── columns: "?column?":1(varbit) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: B'10' [type=varbit] + └── (B'10',) [type=tuple{varbit}] # Don't fold: cannot mix bit array sizes error. opt expect-not=FoldBinary SELECT B'01' # B'11001001010101' ---- -project +values ├── columns: "?column?":1(varbit) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── B'01' # B'11001001010101' [type=varbit] + └── (B'01' # B'11001001010101',) [type=tuple{varbit}] # Fold constant. opt expect=FoldBinary SELECT B'01' | B'11' ---- -project - ├── columns: "?column?":1(varbit!null) +values + ├── columns: "?column?":1(varbit) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: B'11' [type=varbit] + └── (B'11',) [type=tuple{varbit}] # Don't fold: cannot mix bit array sizes error. opt expect-not=FoldBinary SELECT B'01' | B'11001001010101' ---- -project +values ├── columns: "?column?":1(varbit) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── B'01' | B'11001001010101' [type=varbit] + └── (B'01' | B'11001001010101',) [type=tuple{varbit}] # Fold constant. opt expect=FoldBinary SELECT '2000-05-05 10:00:00+03':::TIMESTAMP - '2000-05-06 10:00:00+03':::TIMESTAMP ---- -project - ├── columns: "?column?":1(interval!null) +values + ├── columns: "?column?":1(interval) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: '-24:00:00' [type=interval] + └── ('-24:00:00',) [type=tuple{interval}] # Fold constant. opt expect=FoldBinary SELECT '2000-05-05 10:00:00+03':::TIMESTAMP - '2000-05-06 10:00:00+03':::TIMESTAMPTZ ---- -project - ├── columns: "?column?":1(interval!null) +values + ├── columns: "?column?":1(interval) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: '-21:00:00' [type=interval] + └── ('-21:00:00',) [type=tuple{interval}] # Fold constant. opt expect=FoldBinary SELECT ARRAY['a','b','c'] || 'd' ---- -project - ├── columns: "?column?":1(string[]!null) +values + ├── columns: "?column?":1(string[]) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: ARRAY['a','b','c','d'] [type=string[]] + └── (ARRAY['a','b','c','d'],) [type=tuple{string[]}] # Fold constant. opt expect=FoldBinary SELECT ARRAY['a','b','c'] || ARRAY['d','e','f'] ---- -project - ├── columns: "?column?":1(string[]!null) +values + ├── columns: "?column?":1(string[]) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: ARRAY['a','b','c','d','e','f'] [type=string[]] + └── (ARRAY['a','b','c','d','e','f'],) [type=tuple{string[]}] # NULL should not be added to the array. opt expect=FoldBinary SELECT ARRAY[1,2,3] || NULL ---- -project - ├── columns: "?column?":1(int[]!null) +values + ├── columns: "?column?":1(int[]) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: ARRAY[1,2,3] [type=int[]] + └── (ARRAY[1,2,3],) [type=tuple{int[]}] # -------------------------------------------------- # FoldUnary @@ -332,94 +242,66 @@ project opt expect=FoldUnary SELECT -(1:::int) ---- -project - ├── columns: "?column?":1(int!null) +values + ├── columns: "?column?":1(int) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: -1 [type=int] + └── (-1,) [type=tuple{int}] opt expect=FoldUnary SELECT -(1:::float) ---- -project - ├── columns: "?column?":1(float!null) +values + ├── columns: "?column?":1(float) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: -1.0 [type=float] + └── (-1.0,) [type=tuple{float}] # TODO(justin): it would be better if this produced an error in the optimizer # rather than falling back to execution to error. opt expect-not=FoldUnary format=show-all SELECT -((-9223372036854775808)::int) ---- -project +values ├── columns: "?column?":1(int) ├── cardinality: [1 - 1] ├── stats: [rows=1] - ├── cost: 0.05 + ├── cost: 0.02 ├── key: () ├── fd: ()-->(1) ├── prune: (1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── stats: [rows=1] - │ ├── cost: 0.02 - │ ├── key: () - │ └── tuple [type=tuple] - └── projections + └── tuple [type=tuple{int}] └── unary-minus [type=int] └── const: -9223372036854775808 [type=int] opt expect=FoldUnary format=show-all SELECT -(1:::decimal) ---- -project - ├── columns: "?column?":1(decimal!null) +values + ├── columns: "?column?":1(decimal) ├── cardinality: [1 - 1] ├── stats: [rows=1] - ├── cost: 0.05 + ├── cost: 0.02 ├── key: () ├── fd: ()-->(1) ├── prune: (1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── stats: [rows=1] - │ ├── cost: 0.02 - │ ├── key: () - │ └── tuple [type=tuple] - └── projections + └── tuple [type=tuple{decimal}] └── const: -1 [type=decimal] opt expect=FoldUnary format=show-all SELECT -('-1d'::interval); ---- -project - ├── columns: "?column?":1(interval!null) +values + ├── columns: "?column?":1(interval) ├── cardinality: [1 - 1] ├── stats: [rows=1] - ├── cost: 0.05 + ├── cost: 0.02 ├── key: () ├── fd: ()-->(1) ├── prune: (1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── stats: [rows=1] - │ ├── cost: 0.02 - │ ├── key: () - │ └── tuple [type=tuple] - └── projections + └── tuple [type=tuple{interval}] └── const: '1 day' [type=interval] # TODO(justin): this seems incorrect but it's consistent with the existing @@ -427,49 +309,34 @@ project opt expect=FoldUnary SELECT -('-9223372036854775808d'::interval); ---- -project - ├── columns: "?column?":1(interval!null) +values + ├── columns: "?column?":1(interval) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: '-9223372036854775808 days' [type=interval] + └── ('-9223372036854775808 days',) [type=tuple{interval}] # Fold constant. opt expect=FoldUnary SELECT ~(500::INT) ---- -project - ├── columns: "?column?":1(int!null) +values + ├── columns: "?column?":1(int) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: -501 [type=int] + └── (-501,) [type=tuple{int}] # Fold constant. opt expect=FoldUnary SELECT ~('35.231.178.195'::INET) ---- -project - ├── columns: "?column?":1(inet!null) +values + ├── columns: "?column?":1(inet) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: '220.24.77.60' [type=inet] + └── ('220.24.77.60',) [type=tuple{inet}] # -------------------------------------------------- # FoldComparison @@ -479,94 +346,64 @@ project opt expect=FoldComparison SELECT 1::INT < 2::INT ---- -project - ├── columns: "?column?":1(bool!null) +values + ├── columns: "?column?":1(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── true [type=bool] + └── (true,) [type=tuple{bool}] # Fold constant. opt expect=FoldComparison SELECT 2.0::DECIMAL = 2::INT ---- -project - ├── columns: "?column?":1(bool!null) +values + ├── columns: "?column?":1(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── true [type=bool] + └── (true,) [type=tuple{bool}] # Fold constant. opt expect=FoldComparison SELECT 100 IS NOT DISTINCT FROM 200 ---- -project - ├── columns: "?column?":1(bool!null) +values + ├── columns: "?column?":1(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── false [type=bool] + └── (false,) [type=tuple{bool}] # Fold constant. opt expect=FoldComparison SELECT 'foo' IN ('a', 'b', 'c') ---- -project - ├── columns: "?column?":1(bool!null) +values + ├── columns: "?column?":1(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── false [type=bool] + └── (false,) [type=tuple{bool}] # Fold constant. opt expect=FoldComparison SELECT '[1, 2]'::JSONB <@ '[1, 2, 3]'::JSONB ---- -project - ├── columns: "?column?":1(bool!null) +values + ├── columns: "?column?":1(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── true [type=bool] + └── (true,) [type=tuple{bool}] # Fold constant. opt expect=FoldComparison SELECT ('a', 'b', 'c') = ('d', 'e', 'f') ---- -project - ├── columns: "?column?":1(bool!null) +values + ├── columns: "?column?":1(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── false [type=bool] + └── (false,) [type=tuple{bool}] diff --git a/pkg/sql/opt/norm/testdata/rules/inline b/pkg/sql/opt/norm/testdata/rules/inline index 6a97be194811..c56ddd6455e6 100644 --- a/pkg/sql/opt/norm/testdata/rules/inline +++ b/pkg/sql/opt/norm/testdata/rules/inline @@ -177,29 +177,23 @@ LIMIT 107 ---- project - ├── columns: c0:1(int!null) + ├── columns: c0:1(int) ├── cardinality: [0 - 1] ├── side-effects ├── key: () ├── fd: ()-->(1) └── select - ├── columns: c0:1(int!null) c1:2(int!null) + ├── columns: c0:1(int) c1:2(int) ├── cardinality: [0 - 1] ├── side-effects ├── key: () ├── fd: ()-->(1,2) - ├── project - │ ├── columns: c0:1(int!null) c1:2(int!null) + ├── values + │ ├── columns: c0:1(int) c1:2(int) │ ├── cardinality: [1 - 1] │ ├── key: () │ ├── fd: ()-->(1,2) - │ ├── values - │ │ ├── cardinality: [1 - 1] - │ │ ├── key: () - │ │ └── tuple [type=tuple] - │ └── projections - │ ├── const: 1 [type=int] - │ └── const: 2 [type=int] + │ └── (1, 2) [type=tuple{int, int}] └── filters └── le [type=bool, outer=(1,2), side-effects] ├── case [type=int] diff --git a/pkg/sql/opt/norm/testdata/rules/max1row b/pkg/sql/opt/norm/testdata/rules/max1row index 7325393afb10..a8da29db3162 100644 --- a/pkg/sql/opt/norm/testdata/rules/max1row +++ b/pkg/sql/opt/norm/testdata/rules/max1row @@ -25,16 +25,12 @@ TABLE b opt expect=EliminateMax1Row SELECT (SELECT i FROM a LIMIT 1) > 5 AS r ---- -project +values ├── columns: r:6(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(6) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections + └── tuple [type=tuple{bool}] └── gt [type=bool] ├── subquery [type=int] │ └── scan a @@ -47,16 +43,12 @@ project opt expect=EliminateMax1Row SELECT (SELECT count(*) FROM a) > 100 AS r ---- -project +values ├── columns: r:7(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(7) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections + └── tuple [type=tuple{bool}] └── gt [type=bool] ├── subquery [type=int] │ └── scalar-group-by @@ -72,16 +64,12 @@ project opt expect=EliminateMax1Row SELECT (SELECT i FROM a LIMIT 0) > 5 AS r ---- -project +values ├── columns: r:6(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(6) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections + └── tuple [type=tuple{bool}] └── gt [type=bool] ├── subquery [type=int] │ └── values @@ -95,16 +83,12 @@ project opt expect-not=EliminateMax1Row SELECT (SELECT i FROM a) > 5 AS r ---- -project +values ├── columns: r:6(bool) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(6) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections + └── tuple [type=tuple{bool}] └── gt [type=bool] ├── subquery [type=int] │ └── max1-row diff --git a/pkg/sql/opt/norm/testdata/rules/ordering b/pkg/sql/opt/norm/testdata/rules/ordering index 56b72bf63adb..996461e2c86e 100644 --- a/pkg/sql/opt/norm/testdata/rules/ordering +++ b/pkg/sql/opt/norm/testdata/rules/ordering @@ -4,7 +4,7 @@ CREATE TABLE abcde ( b INT, c INT, d INT, - e INT, + e INT DEFAULT(10), UNIQUE INDEX bc (b, c) ) ---- @@ -21,6 +21,20 @@ TABLE abcde ├── c int └── a int not null (storing) +exec-ddl +CREATE TABLE xyz ( + x TEXT PRIMARY KEY, + y INT8, + z FLOAT8 +) +---- +TABLE xyz + ├── x string not null + ├── y int + ├── z float + └── INDEX primary + └── x string not null + # -------------------------------------------------- # SimplifyLimitOrdering # -------------------------------------------------- @@ -215,14 +229,41 @@ project ├── columns: field:3(string) └── explain ├── columns: tree:2(string) field:3(string) description:4(string) - └── project - ├── columns: k:1(int!null) + └── values + ├── columns: k:1(int) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] + └── (123,) [type=tuple{int}] + +# -------------------------------------------------- +# SimplifyInsertOrdering +# -------------------------------------------------- +opt expect=SimplifyInsertOrdering +INSERT INTO abcde (a, b, c, d) SELECT y, 1, y+1, 2 FROM xyz ORDER BY y, x, z +---- +insert abcde + ├── columns: + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) + ├── input columns: y:7(int) "?column?":9(int) "?column?":10(int) "?column?":11(int) column12:12(int) + ├── internal-ordering: +7,+6 opt(9,11,12) + ├── cardinality: [0 - 0] + ├── side-effects, mutations + └── sort + ├── columns: x:6(string!null) y:7(int) z:8(float) "?column?":9(int!null) "?column?":10(int) "?column?":11(int!null) column12:12(int!null) + ├── key: (6) + ├── fd: ()-->(9,11,12), (6)-->(7,8), (7)-->(10) + ├── ordering: +7,+6 opt(9,11,12) [provided: +7,+6] + └── project + ├── columns: column12:12(int!null) "?column?":9(int!null) "?column?":10(int) "?column?":11(int!null) x:6(string!null) y:7(int) z:8(float) + ├── key: (6) + ├── fd: ()-->(9,11,12), (6)-->(7,8), (7)-->(10) + ├── scan xyz + │ ├── columns: x:6(string!null) y:7(int) z:8(float) + │ ├── key: (6) + │ └── fd: (6)-->(7,8) └── projections - └── const: 123 [type=int] + ├── const: 10 [type=int] + ├── const: 1 [type=int] + ├── y + 1 [type=int, outer=(7)] + └── const: 2 [type=int] diff --git a/pkg/sql/opt/norm/testdata/rules/project b/pkg/sql/opt/norm/testdata/rules/project index 1673ab4e825c..39969d32f314 100644 --- a/pkg/sql/opt/norm/testdata/rules/project +++ b/pkg/sql/opt/norm/testdata/rules/project @@ -159,3 +159,74 @@ project ├── scan a └── projections └── const: 1 [type=int] + +# -------------------------------------------------- +# MergeProjectWithValues +# -------------------------------------------------- + +opt expect=MergeProjectWithValues +SELECT column1, 3 FROM (VALUES (1, 2)) +---- +values + ├── columns: column1:1(int) "?column?":3(int) + ├── cardinality: [1 - 1] + ├── key: () + ├── fd: ()-->(1,3) + └── (1, 3) [type=tuple{int, int}] + +# Only passthrough columns. +opt expect=MergeProjectWithValues +SELECT column1, column3 FROM (VALUES (1, 2, 3)) +---- +values + ├── columns: column1:1(int) column3:3(int) + ├── cardinality: [1 - 1] + ├── key: () + ├── fd: ()-->(1,3) + └── (1, 3) [type=tuple{int, int}] + +# Only synthesized columns. +opt expect=MergeProjectWithValues +SELECT 4, 5 FROM (VALUES (1, 2, 3)) +---- +values + ├── columns: "?column?":4(int) "?column?":5(int) + ├── cardinality: [1 - 1] + ├── key: () + ├── fd: ()-->(4,5) + └── (4, 5) [type=tuple{int, int}] + +# Don't trigger rule when there is more than one Values row. +opt expect-not=MergeProjectWithValues +SELECT column1, 3 FROM (VALUES (1, 2), (1, 4)) +---- +project + ├── columns: column1:1(int) "?column?":3(int!null) + ├── cardinality: [2 - 2] + ├── fd: ()-->(3) + ├── values + │ ├── columns: column1:1(int) + │ ├── cardinality: [2 - 2] + │ ├── (1,) [type=tuple{int}] + │ └── (1,) [type=tuple{int}] + └── projections + └── const: 3 [type=int] + +# Don't trigger rule when Project column depends on Values column. +opt expect-not=MergeProjectWithValues +SELECT column1+1, 3 FROM (VALUES (1, 2)) +---- +project + ├── columns: "?column?":3(int) "?column?":4(int!null) + ├── cardinality: [1 - 1] + ├── key: () + ├── fd: ()-->(3,4) + ├── values + │ ├── columns: column1:1(int) + │ ├── cardinality: [1 - 1] + │ ├── key: () + │ ├── fd: ()-->(1) + │ └── (1,) [type=tuple{int}] + └── projections + ├── column1 + 1 [type=int, outer=(1)] + └── const: 3 [type=int] diff --git a/pkg/sql/opt/norm/testdata/rules/scalar b/pkg/sql/opt/norm/testdata/rules/scalar index c8b068a96763..6a4a696558bf 100644 --- a/pkg/sql/opt/norm/testdata/rules/scalar +++ b/pkg/sql/opt/norm/testdata/rules/scalar @@ -210,21 +210,12 @@ SELECT null::oidvector, null::int2vector ---- -project +values ├── columns: int:1(int) timestamptz:2(timestamptz) char:3(string) oidvector:4(oid[]) int2vector:5(int[]) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1-5) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - ├── null [type=int] - ├── null [type=timestamptz] - ├── null [type=string] - ├── null [type=oid[]] - └── null [type=int[]] + └── (NULL, NULL, NULL, NULL, NULL) [type=tuple{int, timestamptz, string, oid[], int[]}] # -------------------------------------------------- # FoldNullUnary @@ -569,47 +560,32 @@ select opt expect=SimplifyCaseWhenConstValue SELECT CASE 1 WHEN 1 THEN 'one' END ---- -project - ├── columns: case:1(string!null) +values + ├── columns: case:1(string) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: 'one' [type=string] + └── ('one',) [type=tuple{string}] opt expect=SimplifyCaseWhenConstValue SELECT CASE WHEN 1 = 1 THEN 'one' END ---- -project - ├── columns: case:1(string!null) +values + ├── columns: case:1(string) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: 'one' [type=string] + └── ('one',) [type=tuple{string}] opt expect=SimplifyCaseWhenConstValue SELECT CASE false WHEN 0 = 1 THEN 'one' END ---- -project - ├── columns: case:1(string!null) +values + ├── columns: case:1(string) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: 'one' [type=string] + └── ('one',) [type=tuple{string}] # Verify that a true condition does not remove non-constant expressions # proceeding it. @@ -649,17 +625,12 @@ project opt expect=SimplifyCaseWhenConstValue SELECT CASE 1 WHEN 2 THEN 'one' ELSE 'three' END ---- -project - ├── columns: case:1(string!null) +values + ├── columns: case:1(string) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: 'three' [type=string] + └── ('three',) [type=tuple{string}] opt expect=SimplifyCaseWhenConstValue SELECT @@ -689,17 +660,12 @@ SELECT ELSE 'five' END ---- -project - ├── columns: case:1(string!null) +values + ├── columns: case:1(string) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: 'three' [type=string] + └── ('three',) [type=tuple{string}] opt expect=SimplifyCaseWhenConstValue SELECT @@ -710,32 +676,22 @@ SELECT ELSE 'four' END ---- -project - ├── columns: case:1(string!null) +values + ├── columns: case:1(string) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: 'four' [type=string] + └── ('four',) [type=tuple{string}] opt expect=SimplifyCaseWhenConstValue SELECT CASE WHEN false THEN 'one' WHEN true THEN 'two' END ---- -project - ├── columns: case:1(string!null) +values + ├── columns: case:1(string) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: 'two' [type=string] + └── ('two',) [type=tuple{string}] # -------------------------------------------------- # UnifyComparisonTypes @@ -1117,47 +1073,32 @@ select norm expect=FoldCollate SELECT 'hello' COLLATE en_u_ks_level1 ---- -project - ├── columns: "?column?":1(collatedstring{en_u_ks_level1}!null) +values + ├── columns: "?column?":1(collatedstring{en_u_ks_level1}) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: 'hello' COLLATE en_u_ks_level1 [type=collatedstring{en_u_ks_level1}] + └── ('hello' COLLATE en_u_ks_level1,) [type=tuple{collatedstring{en_u_ks_level1}}] norm expect=FoldCollate SELECT ('hello' COLLATE en_u_ks_level1) COLLATE en_u_ks_level1 ---- -project - ├── columns: "?column?":1(collatedstring{en_u_ks_level1}!null) +values + ├── columns: "?column?":1(collatedstring{en_u_ks_level1}) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: 'hello' COLLATE en_u_ks_level1 [type=collatedstring{en_u_ks_level1}] + └── ('hello' COLLATE en_u_ks_level1,) [type=tuple{collatedstring{en_u_ks_level1}}] norm expect=FoldCollate SELECT ('hello' COLLATE en) COLLATE en_u_ks_level1 ---- -project - ├── columns: "?column?":1(collatedstring{en_u_ks_level1}!null) +values + ├── columns: "?column?":1(collatedstring{en_u_ks_level1}) ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: 'hello' COLLATE en_u_ks_level1 [type=collatedstring{en_u_ks_level1}] + └── ('hello' COLLATE en_u_ks_level1,) [type=tuple{collatedstring{en_u_ks_level1}}] norm expect-not=FoldCollate SELECT s COLLATE en_u_ks_level1 FROM a diff --git a/pkg/sql/opt/operator.go b/pkg/sql/opt/operator.go index 625dc2401236..41e3a648e772 100644 --- a/pkg/sql/opt/operator.go +++ b/pkg/sql/opt/operator.go @@ -27,11 +27,28 @@ import ( type Operator uint16 // String returns the name of the operator as a string. -func (i Operator) String() string { - if i >= Operator(len(opNames)-1) { - return fmt.Sprintf("Operator(%d)", i) +func (op Operator) String() string { + if op >= Operator(len(opNames)-1) { + return fmt.Sprintf("Operator(%d)", op) + } + return opNames[opNameIndexes[op]:opNameIndexes[op+1]] +} + +// SyntaxTag returns the name of the operator using the SQL syntax that most +// closely matches it. +func (op Operator) SyntaxTag() string { + // Handle any special cases where default codegen tag isn't best choice as + // switch cases. + switch op { + default: + // Use default codegen tag, which is mechanically derived from the + // operator name. + if op >= Operator(len(opNames)-1) { + // Use UNKNOWN. + op = 0 + } + return opSyntaxTags[opSyntaxTagIndexes[op]:opSyntaxTagIndexes[op+1]] } - return opNames[opIndexes[i]:opIndexes[i+1]] } // Expr is a node in an expression tree. It offers methods to traverse and diff --git a/pkg/sql/opt/ops/statement.opt b/pkg/sql/opt/ops/statement.opt new file mode 100644 index 000000000000..8fc454fbed25 --- /dev/null +++ b/pkg/sql/opt/ops/statement.opt @@ -0,0 +1,54 @@ +# statement.opt contains Optgen language definitions for all of Cockroach's +# SQL statement operators, such as Insert and CreateTable. Although though many +# of them have no return result, they are still treated as if they were +# expressions with a zero row, zero column result. + + +# Insert evaluates a relational input expression, and inserts values from it +# into a target table. The input may be an arbitrarily complex expression: +# +# INSERT INTO ab SELECT x, y+1 FROM xy ORDER BY y +# +# It can also be a simple VALUES clause: +# +# INSERT INTO ab VALUES (1, 2) +# +# It may also return rows, which can be further composed: +# +# SELECT a + b FROM [INSERT INTO ab VALUES (1, 2) RETURNING a, b] +# +# The Insert operator is capable of inserting values into computed columns and +# mutation columns, which are not writable (or even visible in the case of +# mutation columns) by SQL users. +[Relational, Mutation] +define Insert { + Input RelExpr + + _ InsertPrivate +} + +[Private] +define InsertPrivate { + # Table identifies the table into which to insert. It is an id that can be + # passed to the Metadata.Table method in order to fetch opt.Table metadata. + Table TableID + + # InputCols are columns from the Input expression that will be inserted into + # the target table. They must be a subset of the Input expression's output + # columns, but otherwise can be in any order. The count and order of columns + # corresponds to the count and order of the target table's columns; column + # values are read from the specified input columns and are then inserted into + # the corresponding table columns. + InputCols ColList + + # Ordering is the ordering required of the input expression. Rows will be + # inserted into the target table in this order. + Ordering OrderingChoice + + # NeedResults is true if the Insert operator returns output rows. One output + # row will be returned for each input row. The output row contains all + # columns in the table, including hidden columns, but not including any + # columns that are undergoing mutation (being added or dropped as part of + # online schema change). + NeedResults bool +} diff --git a/pkg/sql/opt/optbuilder/builder.go b/pkg/sql/opt/optbuilder/builder.go index 13fe835fc386..0af9b5fb0b19 100644 --- a/pkg/sql/opt/optbuilder/builder.go +++ b/pkg/sql/opt/optbuilder/builder.go @@ -180,14 +180,17 @@ func unimplementedf(format string, a ...interface{}) builderError { func (b *Builder) buildStmt(stmt tree.Statement, inScope *scope) (outScope *scope) { // NB: The case statements are sorted lexicographically. switch stmt := stmt.(type) { + case *tree.Explain: + return b.buildExplain(stmt, inScope) + + case *tree.Insert: + return b.buildInsert(stmt, inScope) + case *tree.ParenSelect: - return b.buildSelect(stmt.Select, inScope) + return b.buildSelect(stmt.Select, nil /* desiredTypes */, inScope) case *tree.Select: - return b.buildSelect(stmt, inScope) - - case *tree.Explain: - return b.buildExplain(stmt, inScope) + return b.buildSelect(stmt, nil /* desiredTypes */, inScope) case *tree.ShowTraceForSession: return b.buildShowTrace(stmt, inScope) diff --git a/pkg/sql/opt/optbuilder/groupby.go b/pkg/sql/opt/optbuilder/groupby.go index 6689809f8fe6..9990bbd00ca8 100644 --- a/pkg/sql/opt/optbuilder/groupby.go +++ b/pkg/sql/opt/optbuilder/groupby.go @@ -394,7 +394,7 @@ func (b *Builder) buildGrouping( // Save a representation of the GROUP BY expression for validation of the // SELECT and HAVING expressions. This enables queries such as: // SELECT x+y FROM t GROUP BY x+y - col := b.addColumn(outScope, label, e.ResolvedType(), e) + col := b.addColumn(outScope, label, e) b.buildScalar(e, inScope, outScope, col, nil) inScope.groupby.groupStrs[symbolicExprStr(e)] = col } @@ -444,7 +444,7 @@ func (b *Builder) buildAggregateFunction( // This synthesizes a new tempScope column, unless the argument is a // simple VariableOp. texpr := pexpr.(tree.TypedExpr) - col := b.addColumn(tempScope, "" /* label */, texpr.ResolvedType(), texpr) + col := b.addColumn(tempScope, "" /* label */, texpr) b.buildScalar(texpr, inScope, tempScope, col, &info.colRefs) if col.scalar != nil { info.args[i] = col.scalar diff --git a/pkg/sql/opt/optbuilder/insert.go b/pkg/sql/opt/optbuilder/insert.go new file mode 100644 index 000000000000..27685c6a6c1c --- /dev/null +++ b/pkg/sql/opt/optbuilder/insert.go @@ -0,0 +1,783 @@ +// Copyright 2018 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package optbuilder + +import ( + "fmt" + "sort" + + "github.com/pkg/errors" + + "github.com/cockroachdb/cockroach/pkg/sql/opt" + "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" + "github.com/cockroachdb/cockroach/pkg/sql/parser" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sem/types" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" +) + +// buildInsert builds a memo group for an InsertOp expression. An input +// expression is constructed which outputs these columns: +// +// 1. Columns explicitly specified by the user in SELECT or VALUES expression. +// +// 2. Columns not specified by the user, but having a default value declared +// in schema (or being nullable). +// +// 3. Computed columns. +// +// 4. Mutation columns which are being added or dropped by an online schema +// change. +// +// buildInsert starts by constructing the input expression, and then wraps it +// with Project operators which add default, computed, and mutation columns. The +// final input expression will project values for all columns in the target +// table. For example, if this is the schema and INSERT statement: +// +// CREATE TABLE abcd ( +// a INT PRIMARY KEY, +// b INT, +// c INT DEFAULT(10), +// d INT AS (b+c) STORED +// ) +// INSERT INTO abcd (a) VALUES (1) +// +// Then an input expression equivalent to this would be built: +// +// INSERT INTO abcd (a, b, c, d) +// SELECT aa, bb, cc, bb + cc AS dd +// FROM (VALUES (1, NULL, 10)) AS t(aa, bb, cc) +// +func (b *Builder) buildInsert(ins *tree.Insert, inScope *scope) (outScope *scope) { + if ins.OnConflict != nil { + panic(unimplementedf("UPSERT is not supported")) + } + + if ins.With != nil { + inScope = b.buildCTE(ins.With.CTEList, inScope) + defer b.checkCTEUsage(inScope) + } + + // INSERT INTO xx AS yy - we want to know about xx (tn) because + // that's what we get the descriptor with, and yy (alias) because + // that's what RETURNING will use. + tn, alias := getAliasedTableName(ins.Table) + + // Find which table we're working on, check the permissions. + tab := b.resolveTable(tn, privilege.INSERT) + + // Table resolution checked the INSERT permission, but if OnConflict is + // defined, then check the UPDATE permission as well. This has the side effect + // of twice adding the table to the metadata dependency list. + if ins.OnConflict != nil && !ins.OnConflict.DoNothing { + b.checkPrivilege(tab, privilege.UPDATE) + } + + var ib insertBuilder + ib.init(b, opt.InsertOp, tab, alias) + + // Compute target columns in two cases: + // + // 1. When explicitly specified by name: + // + // INSERT INTO (, , ...) ... + // + // 2. When implicitly targeted by VALUES expression: + // + // INSERT INTO
VALUES (...) + // + // Target columns for other cases can't be derived until the input expression + // is built, at which time the number of input columns is known. At the same + // time, the input expression cannot be built until DEFAULT expressions are + // replaced and named target columns are known. So this step must come first. + if len(ins.Columns) != 0 { + // Target columns are explicitly specified by name. + ib.addTargetNamedCols(ins.Columns) + } else { + values := ib.extractValuesInput(ins.Rows) + if values != nil { + // Target columns are implicitly targeted by VALUES expression in the + // same order they appear in the target table schema. + ib.addTargetTableCols(len(values.Rows[0])) + } + } + + // Build the input rows expression if one was specified: + // + // INSERT INTO
VALUES ... + // INSERT INTO
SELECT ... FROM ... + // + // or initialize an empty input if inserting default values (default values + // will be added later): + // + // INSERT INTO
DEFAULT VALUES + // + if !ins.DefaultValues() { + // Replace any DEFAULT expressions in the VALUES clause, if a VALUES clause + // exists: + // + // INSERT INTO
VALUES (..., DEFAULT, ...) + // + rows := ib.replaceDefaultExprs(ins.Rows) + + ib.buildInputRows(inScope, rows) + } else { + ib.buildEmptyInput(inScope) + } + + // Add default and computed columns that were not explicitly specified by + // name or implicitly targeted by input columns. This includes any columns + // undergoing write mutations, as they must always have a default or computed + // value. + ib.addDefaultAndComputedCols() + + // Build the final insert statement, including any returned expressions. + if resultsNeeded(ins.Returning) { + ib.buildInsert(*ins.Returning.(*tree.ReturningExprs)) + } else { + ib.buildInsert(nil /* returning */) + } + + return ib.outScope +} + +// insertBuilder is a helper struct that supports building an Insert operator in +// stages. +type insertBuilder struct { + b *Builder + md *opt.Metadata + + // op is InsertOp or UpsertOp. + op opt.Operator + + // tab is the target table. + tab opt.Table + + // tabID is the metadata ID of the table. + tabID opt.TableID + + // alias is the table alias specified in the INSERT statement, or just the + // table name itself if no alias was specified. + alias *tree.TableName + + // targetColList is an ordered list of IDs of the table columns into which + // values will be inserted by the Insert operator. It is incrementally built + // as the operator is built. + targetColList opt.ColList + + // targetColSet contains the same column IDs as targetColList, but as a set. + targetColSet opt.ColSet + + // parsedExprs is a cached set of parsed default and computed expressions + // from the table schema. These are parsed once and cached for reuse. + parsedExprs []tree.Expr + + // outScope contains the current set of columns that are in scope, as well as + // the output expression as it is incrementally built. Once the final Insert + // expression is completed, it will be contained in outScope.expr. + outScope *scope +} + +func (ib *insertBuilder) init(b *Builder, op opt.Operator, tab opt.Table, alias *tree.TableName) { + ib.b = b + ib.md = b.factory.Metadata() + ib.op = op + ib.tab = tab + ib.targetColList = make(opt.ColList, 0, tab.ColumnCount()) + + if alias != nil { + ib.alias = alias + } else { + ib.alias = tab.Name() + } + + // Add the table and its columns to metadata. Include columns undergoing write + // mutations, since default values will need to be inserted into those. + ib.tabID = ib.md.AddTableWithMutations(tab) +} + +// addTargetNamedCols adds a list of user-specified column names to the list of +// table columns that are the target of the Insert operation. +func (ib *insertBuilder) addTargetNamedCols(names tree.NameList) { + if len(ib.targetColList) != 0 { + panic("addTargetNamedCols cannot be called more than once") + } + + for _, name := range names { + found := false + for ord, n := 0, ib.tab.ColumnCount(); ord < n; ord++ { + tabCol := ib.tab.Column(ord) + if tabCol.ColName() == name { + colID := ib.tabID.ColumnID(ord) + + // Computed columns cannot be targeted with input values. + if tabCol.IsComputed() { + panic(builderError{sqlbase.CannotWriteToComputedColError(string(tabCol.ColName()))}) + } + + // Ensure that the name list does not contain duplicates. + if ib.targetColSet.Contains(int(colID)) { + panic(builderError{fmt.Errorf("multiple assignments to the same column %q", &name)}) + } + ib.targetColSet.Add(int(colID)) + + ib.targetColList = append(ib.targetColList, colID) + found = true + break + } + } + if !found { + panic(builderError{sqlbase.NewUndefinedColumnError(string(name))}) + } + } + + // Ensure that primary key columns are in the target column list, or that + // they have default values. + ib.checkPrimaryKey() + + // Ensure that foreign keys columns are in the target column list, or that + // they have default values. + ib.checkForeignKeys() +} + +// checkPrimaryKey ensures that the columns of the primary key are either +// assigned values by the INSERT statement, or else have default/computed +// values. If neither condition is true, checkPrimaryKey raises an error. +func (ib *insertBuilder) checkPrimaryKey() { + primary := ib.tab.Index(opt.PrimaryIndex) + for i, n := 0, primary.KeyColumnCount(); i < n; i++ { + col := primary.Column(i) + if col.Column.HasDefault() || col.Column.IsComputed() { + // The column has a default or computed value. + continue + } + + colID := ib.tabID.ColumnID(col.Ordinal) + if ib.targetColSet.Contains(int(colID)) { + // The column is explicitly specified in the target name list. + continue + } + + panic(builderError{fmt.Errorf( + "missing %q primary key column", col.Column.ColName())}) + } +} + +// checkForeignKeys ensures that all foreign key columns are either assigned +// values by the INSERT statement, or else have default/computed values. +// Alternatively, all columns can be unspecified. If neither condition is true, +// checkForeignKeys raises an error. Here is an example: +// +// CREATE TABLE orders ( +// id INT, +// cust_id INT, +// state STRING, +// FOREIGN KEY (cust_id, state) REFERENCES customers (id, state) +// ) +// +// INSERT INTO orders (cust_id) VALUES (1) +// +// This INSERT statement would trigger a static error, because only cust_id is +// specified in the INSERT statement. Either the state column must be specified +// as well, or else neither column can be specified. +// +// TODO(bram): add MATCH SIMPLE and fix MATCH FULL #30026 +func (ib *insertBuilder) checkForeignKeys() { + for i, n := 0, ib.tab.IndexCount(); i < n; i++ { + idx := ib.tab.Index(i) + fkey, ok := idx.ForeignKey() + if !ok { + continue + } + + var missingCols []string + allMissing := true + for j := 0; j < int(fkey.PrefixLen); j++ { + indexCol := idx.Column(j) + if indexCol.Column.HasDefault() || indexCol.Column.IsComputed() { + // The column has a default value. + allMissing = false + continue + } + + colID := ib.tabID.ColumnID(indexCol.Ordinal) + if ib.targetColSet.Contains(int(colID)) { + // The column is explicitly specified in the target name list. + allMissing = false + continue + } + + missingCols = append(missingCols, string(indexCol.Column.ColName())) + } + if allMissing { + continue + } + + switch len(missingCols) { + case 0: + // Do nothing. + + case 1: + panic(builderError{errors.Errorf( + "missing value for column %q in multi-part foreign key", missingCols[0])}) + default: + sort.Strings(missingCols) + panic(builderError{errors.Errorf( + "missing values for columns %q in multi-part foreign key", missingCols)}) + } + } +} + +// addTargetTableCols adds up to maxCols columns to the list of columns that +// will be set by an INSERT operation. Columns are added from the target table +// in the same order they appear in its schema. This method is used when the +// target columns are not explicitly specified in the INSERT statement: +// +// INSERT INTO t VALUES (1, 2, 3) +// +// In this example, the first three columns of table t would be added as target +// columns. +func (ib *insertBuilder) addTargetTableCols(maxCols int) { + if len(ib.targetColList) != 0 { + panic("addTargetTableCols cannot be called more than once") + } + + numCols := 0 + for i, n := 0, ib.tab.ColumnCount(); i < n && numCols < maxCols; i++ { + tabCol := ib.tab.Column(i) + if tabCol.IsHidden() { + continue + } + + // TODO(justin): this is too restrictive. It should be possible to allow + // INSERT INTO (x) VALUES (DEFAULT) if x is a computed column. See #22434. + if tabCol.IsComputed() { + panic(builderError{sqlbase.CannotWriteToComputedColError(string(tabCol.ColName()))}) + } + + colID := ib.tabID.ColumnID(i) + ib.targetColList = append(ib.targetColList, colID) + ib.targetColSet.Add(int(colID)) + numCols++ + } + + // Ensure that the number of input columns does not exceed the number of + // target columns. + ib.checkNumCols(len(ib.targetColList), maxCols) +} + +// extractValuesInput tests whether the given input is a VALUES clause with no +// WITH, ORDER BY, or LIMIT modifier. If so, it's returned, otherwise nil is +// returned. +func (ib *insertBuilder) extractValuesInput(inputRows *tree.Select) *tree.ValuesClause { + if inputRows == nil { + return nil + } + + // Only extract a simple VALUES clause with no modifiers. + if inputRows.With != nil || inputRows.OrderBy != nil || inputRows.Limit != nil { + return nil + } + + // Discard parentheses. + if parens, ok := inputRows.Select.(*tree.ParenSelect); ok { + return ib.extractValuesInput(parens.Select) + } + + if values, ok := inputRows.Select.(*tree.ValuesClause); ok { + return values + } + + return nil +} + +// replaceDefaultExprs looks for DEFAULT specifiers in input value expressions +// and replaces them with the corresponding default value expression for the +// corresponding column. This is only possible when the input is a VALUES +// clause. For example: +// +// INSERT INTO t (a, b) (VALUES (1, DEFAULT), (DEFAULT, 2)) +// +// Here, the two DEFAULT specifiers are replaced by the default value expression +// for the a and b columns, respectively. +// +// replaceDefaultExprs returns a VALUES expression with replaced DEFAULT values, +// or just the unchanged input expression if there are no DEFAULT values. +func (ib *insertBuilder) replaceDefaultExprs(inRows *tree.Select) (outRows *tree.Select) { + values := ib.extractValuesInput(inRows) + if values == nil { + return inRows + } + + // Ensure that the number of input columns exactly matches the number of + // target columns. + numCols := len(values.Rows[0]) + ib.checkNumCols(len(ib.targetColList), numCols) + + var newRows []tree.Exprs + for irow, tuple := range values.Rows { + if len(tuple) != numCols { + reportValuesLenError(numCols, len(tuple)) + } + + // Scan list of tuples in the VALUES row, looking for DEFAULT specifiers. + var newTuple tree.Exprs + for itup, val := range tuple { + if _, ok := val.(tree.DefaultVal); ok { + // Found DEFAULT, so lazily create new rows and tuple lists. + if newRows == nil { + newRows = make([]tree.Exprs, irow, len(values.Rows)) + copy(newRows, values.Rows[:irow]) + } + + if newTuple == nil { + newTuple = make(tree.Exprs, itup, numCols) + copy(newTuple, tuple[:itup]) + } + + val = ib.parseDefaultOrComputedExpr(ib.targetColList[itup]) + } + if newTuple != nil { + newTuple = append(newTuple, val) + } + } + + if newRows != nil { + if newTuple != nil { + newRows = append(newRows, newTuple) + } else { + newRows = append(newRows, tuple) + } + } + } + + if newRows != nil { + return &tree.Select{Select: &tree.ValuesClause{Rows: newRows}} + } + return inRows +} + +// buildInputRows constructs the memo group for the input expression and +// constructs a new output scope containing that expression's output columns. +func (ib *insertBuilder) buildInputRows(inScope *scope, inputRows *tree.Select) { + // If there are already required target columns, then those will provide + // desired input types. Otherwise, input columns are mapped to the table's + // non-hidden columns by corresponding ordinal position. Exclude hidden + // columns to prevent this statement from writing hidden columns: + // + // INSERT INTO
VALUES (...) + // + // However, hidden columns can be written if the target columns were + // explicitly specified: + // + // INSERT INTO
(...) VALUES (...) + // + var desiredTypes []types.T + if len(ib.targetColList) != 0 { + desiredTypes = make([]types.T, len(ib.targetColList)) + for i, colID := range ib.targetColList { + desiredTypes[i] = ib.md.ColumnType(colID) + } + } else { + desiredTypes = make([]types.T, 0, ib.tab.ColumnCount()) + for i, n := 0, ib.tab.ColumnCount(); i < n; i++ { + tabCol := ib.tab.Column(i) + if !tabCol.IsHidden() { + desiredTypes = append(desiredTypes, tabCol.DatumType()) + } + } + } + + ib.outScope = ib.b.buildSelect(inputRows, desiredTypes, inScope) + + if len(ib.targetColList) != 0 { + // Target columns already exist, so ensure that the number of input + // columns exactly matches the number of target columns. + ib.checkNumCols(len(ib.targetColList), len(ib.outScope.cols)) + } else { + // No target columns have been added by previous steps, so add columns + // that are implicitly targeted by the input expression. + ib.addTargetTableCols(len(ib.outScope.cols)) + } + + // Type check input columns. + for i := range ib.outScope.cols { + inCol := &ib.outScope.cols[i] + tabCol := ib.tab.Column(ib.md.ColumnOrdinal(ib.targetColList[i])) + checkDatumTypeFitsColumnType(tabCol, inCol.typ) + } +} + +// buildEmptyInput constructs a new output scope containing a single row VALUES +// expression with zero columns. +func (ib *insertBuilder) buildEmptyInput(inScope *scope) { + ib.outScope = inScope.push() + ib.outScope.expr = ib.b.factory.ConstructValues(memo.ScalarListWithEmptyTuple, opt.ColList{}) +} + +// addDefaultAndComputedCols wraps the input expression with Project operator(s) +// containing any default (or nullable) and computed columns that are not yet +// part of the target column list. This includes mutation columns, since they +// must always have default or computed values. +// +// After this call, the input expression will provide values for every one of +// the target table columns, whether it was explicitly specified or implicitly +// added. +func (ib *insertBuilder) addDefaultAndComputedCols() { + // Add any missing default and nullable columns. + ib.addSynthesizedCols(func(tabCol opt.Column) bool { return !tabCol.IsComputed() }) + + // Add any missing computed columns. This must be done after adding default + // columns above, because computed columns can depend on default columns. + ib.addSynthesizedCols(func(tabCol opt.Column) bool { return tabCol.IsComputed() }) +} + +// addSynthesizedCols is a helper method for addDefaultAndComputedCols that +// scans the list of table columns, looking for any that do not yet have values +// provided by the input expression. New columns are synthesized for any missing +// columns, as long as the addCol callback function returns true for that +// column. +func (ib *insertBuilder) addSynthesizedCols(addCol func(tabCol opt.Column) bool) { + var projectionsScope *scope + + for i, n := 0, ib.tab.ColumnCount()+ib.tab.MutationColumnCount(); i < n; i++ { + // Skip columns that are already specified. + tabColID := ib.tabID.ColumnID(i) + if ib.targetColSet.Contains(int(tabColID)) { + continue + } + + // Get column metadata, including any mutation columns. + tabCol := tableColumnByOrdinal(ib.tab, i) + + // Invoke addCol to determine whether column should be added. + if !addCol(tabCol) { + continue + } + + // Construct a new Project operator that will contain the newly synthesized + // column(s). + if projectionsScope == nil { + projectionsScope = ib.outScope.replace() + projectionsScope.appendColumnsFromScope(ib.outScope) + projectionsScope.copyOrdering(ib.outScope) + } + + expr := ib.parseDefaultOrComputedExpr(tabColID) + texpr := ib.outScope.resolveType(expr, tabCol.DatumType()) + scopeCol := ib.b.addColumn(projectionsScope, "" /* label */, texpr) + ib.b.buildScalar(texpr, ib.outScope, projectionsScope, scopeCol, nil) + + ib.targetColList = append(ib.targetColList, tabColID) + ib.targetColSet.Add(int(tabColID)) + } + + if projectionsScope != nil { + ib.b.constructProjectForScope(ib.outScope, projectionsScope) + ib.outScope = projectionsScope + } + + // Alias output columns using table column names. Computed columns may refer + // to other columns in the table by name. + for i := range ib.outScope.cols { + ib.outScope.cols[i].name = tree.Name(ib.md.ColumnLabel(ib.targetColList[i])) + } +} + +// buildInsert constructs an Insert operator, possibly wrapped by a Project +// operator that corresponds to the given RETURNING clause. Insert always +// returns columns in the same order and with the same names as the target +// table. +func (ib *insertBuilder) buildInsert(returning tree.ReturningExprs) { + if len(ib.outScope.cols) != len(ib.targetColList) { + panic("expected input column count to match table column coun") + } + + // Map unordered input columns to order of target table columns. + inputCols := make(opt.ColList, len(ib.outScope.cols)) + for i := range ib.outScope.cols { + tabOrd := ib.md.ColumnOrdinal(ib.targetColList[i]) + inputCols[tabOrd] = ib.outScope.cols[i].id + } + + private := memo.InsertPrivate{ + Table: ib.tabID, + InputCols: inputCols, + NeedResults: returning != nil, + } + private.Ordering.FromOrdering(ib.outScope.ordering) + ib.outScope.expr = ib.b.factory.ConstructInsert(ib.outScope.expr, &private) + + if returning != nil { + // 1. Project only non-mutation columns. + // 2. Re-order columns so they're in same order as table columns. + // 3. Alias columns to use table column names. + // 4. Mark hidden columns. + inScope := ib.outScope.replace() + inScope.expr = ib.outScope.expr + inScope.cols = make([]scopeColumn, ib.tab.ColumnCount()) + for i := range ib.outScope.cols { + targetColID := ib.targetColList[i] + ord := ib.md.ColumnOrdinal(targetColID) + if ord >= ib.tab.ColumnCount() { + // Exclude mutation columns. + continue + } + + outCol := &ib.outScope.cols[i] + inScope.cols[ord] = *outCol + inScope.cols[ord].table = *ib.alias + inScope.cols[ord].name = ib.tab.Column(ord).ColName() + + if ib.tab.Column(ord).IsHidden() { + inScope.cols[ord].hidden = true + } + } + + outScope := inScope.replace() + ib.b.analyzeReturningList(returning, nil /* desiredTypes */, inScope, outScope) + ib.b.buildProjectionList(inScope, outScope) + ib.b.constructProjectForScope(inScope, outScope) + ib.outScope = outScope + } else { + ib.outScope = &scope{builder: ib.b, expr: ib.outScope.expr} + } +} + +// checkNumCols raises an error if the expected number of columns does not match +// the actual number of columns. +func (ib *insertBuilder) checkNumCols(expected, actual int) { + if actual != expected { + more, less := "expressions", "target columns" + if actual < expected { + more, less = less, more + } + + // TODO(andyk): Add UpsertOp case. + kw := "INSERT" + panic(builderError{pgerror.NewErrorf(pgerror.CodeSyntaxError, + "%s has more %s than %s, %d expressions for %d targets", + kw, more, less, actual, expected)}) + } +} + +// parseDefaultOrComputedExpr parses the default (including nullable) or +// computed value expression for the given table column, and caches it for +// reuse. +func (ib *insertBuilder) parseDefaultOrComputedExpr(colID opt.ColumnID) tree.Expr { + if ib.parsedExprs == nil { + ib.parsedExprs = make([]tree.Expr, ib.tab.ColumnCount()+ib.tab.MutationColumnCount()) + } + + // Return expression from cache, if it was already parsed previously. + ord := ib.md.ColumnOrdinal(colID) + if ib.parsedExprs[ord] != nil { + return ib.parsedExprs[ord] + } + + var exprStr string + tabCol := tableColumnByOrdinal(ib.tab, ord) + switch { + case tabCol.IsComputed(): + exprStr = tabCol.ComputedExprStr() + case tabCol.HasDefault(): + exprStr = tabCol.DefaultExprStr() + case tabCol.IsNullable(): + return tree.DNull + default: + panic(builderError{sqlbase.NewNonNullViolationError(string(tabCol.ColName()))}) + } + + expr, err := parser.ParseExpr(exprStr) + if err != nil { + panic(builderError{err}) + } + + ib.parsedExprs[ord] = expr + return ib.parsedExprs[ord] +} + +// resultsNeeded determines whether a statement that might have a RETURNING +// clause needs to provide values for result rows for a downstream plan. +func resultsNeeded(r tree.ReturningClause) bool { + switch t := r.(type) { + case *tree.ReturningExprs: + return true + case *tree.ReturningNothing, *tree.NoReturningClause: + return false + default: + panic(errors.Errorf("unexpected ReturningClause type: %T", t)) + } +} + +// getAliasedTableName returns the underlying table name for a TableExpr that +// could be either an alias or a normal table name. It also returns the original +// table name, which will be equal to the alias name if the input is an alias, +// or identical to the table name if the input is a normal table name. +// +// This is not meant to perform name resolution, but rather simply to extract +// the name indicated after FROM in DELETE/INSERT/UPDATE/UPSERT. +func getAliasedTableName(n tree.TableExpr) (*tree.TableName, *tree.TableName) { + var alias *tree.TableName + if ate, ok := n.(*tree.AliasedTableExpr); ok { + n = ate.Expr + // It's okay to ignore the As columns here, as they're not permitted in + // DML aliases where this function is used. The grammar does not allow + // them, so the parser would have reported an error if they were present. + if ate.As.Alias != "" { + alias = tree.NewUnqualifiedTableName(ate.As.Alias) + } + } + tn, ok := n.(*tree.TableName) + if !ok { + panic(builderError{pgerror.Unimplemented( + "complex table expression in UPDATE/DELETE", + "cannot use a complex table name with DELETE/UPDATE")}) + } + if alias == nil { + alias = tn + } + return tn, alias +} + +// checkDatumTypeFitsColumnType verifies that a given scalar value type is valid +// to be stored in a column of the given column type. +// +// For the purpose of this analysis, column type aliases are not considered to +// be different (eg. TEXT and VARCHAR will fit the same scalar type String). +// +// This is used by the UPDATE, INSERT and UPSERT code. +func checkDatumTypeFitsColumnType(col opt.Column, typ types.T) { + if typ == types.Unknown || typ.Equivalent(col.DatumType()) { + return + } + + colName := string(col.ColName()) + panic(builderError{pgerror.NewErrorf(pgerror.CodeDatatypeMismatchError, + "value type %s doesn't match type %s of column %q", + typ, col.ColTypeStr(), tree.ErrNameString(&colName))}) +} + +// tableColumnByOrdinal returns the table column with the given ordinal +// position, including any mutation columns, as if they were appended to end of +// regular column list. +func tableColumnByOrdinal(tab opt.Table, ord int) opt.Column { + if ord < tab.ColumnCount() { + return tab.Column(ord) + } + return tab.MutationColumn(ord - tab.ColumnCount()) +} diff --git a/pkg/sql/opt/optbuilder/orderby.go b/pkg/sql/opt/optbuilder/orderby.go index 8bb2967b0fc0..8931b18140db 100644 --- a/pkg/sql/opt/optbuilder/orderby.go +++ b/pkg/sql/opt/optbuilder/orderby.go @@ -18,9 +18,9 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/sql/opt" + "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sem/types" - "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" ) // analyzeOrderBy analyzes an Ordering physical property from the ORDER BY @@ -115,11 +115,7 @@ func (b *Builder) addExtraColumn( func (b *Builder) analyzeOrderByIndex( order *tree.Order, inScope, projectionsScope, orderByScope *scope, ) { - tab, ok := b.resolveDataSource(&order.Table).(opt.Table) - if !ok { - panic(builderError{sqlbase.NewWrongObjectTypeError(&order.Table, "table")}) - } - + tab := b.resolveTable(&order.Table, privilege.SELECT) index, err := b.findIndexByName(tab, order.Index) if err != nil { panic(builderError{err}) @@ -143,7 +139,7 @@ func (b *Builder) analyzeOrderByIndex( colItem := tree.NewColumnItem(tab.Name(), col.Column.ColName()) expr := inScope.resolveType(colItem, types.Any) - outCol := b.addColumn(orderByScope, "" /* label */, expr.ResolvedType(), expr) + outCol := b.addColumn(orderByScope, "" /* label */, expr) outCol.descending = desc } } @@ -249,7 +245,7 @@ func (b *Builder) analyzeExtraArgument( for _, e := range exprs { // Ensure we can order on the given column(s). ensureColumnOrderable(e) - b.addColumn(extraColsScope, "" /* label */, e.ResolvedType(), e) + b.addColumn(extraColsScope, "" /* label */, e) } } diff --git a/pkg/sql/opt/optbuilder/project.go b/pkg/sql/opt/optbuilder/project.go index be0e9de4e18a..891d338a90e6 100644 --- a/pkg/sql/opt/optbuilder/project.go +++ b/pkg/sql/opt/optbuilder/project.go @@ -22,9 +22,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/types" ) -// constructProjectForScope constructs a projection if it will result in a different -// set of columns than its input. Either way, it updates projectionsScope.group -// with the output memo group ID. +// constructProjectForScope constructs a projection if it will result in a +// different set of columns than its input. Either way, it updates +// projectionsScope.group with the output memo group ID. func (b *Builder) constructProjectForScope(inScope, projectionsScope *scope) { // Don't add an unnecessary "pass through" project. if projectionsScope.hasSameColumns(inScope) { @@ -61,12 +61,12 @@ func (b *Builder) constructProject(input memo.RelExpr, cols []scopeColumn) memo. return b.factory.ConstructProject(input, projections, passthrough) } -// analyzeProjectionList analyzes the given list of select expressions, and -// adds the resulting labels and typed expressions to outScope. -// -// As a side-effect, the appropriate scopes are updated with aggregations -// (scope.groupby.aggs) -func (b *Builder) analyzeProjectionList(selects tree.SelectExprs, inScope, outScope *scope) { +// analyzeProjectionList analyzes the given list of SELECT clause expressions, +// and adds the resulting labels and typed expressions to outScope. See the +// header comment for analyzeSelectList. +func (b *Builder) analyzeProjectionList( + selects tree.SelectExprs, desiredTypes []types.T, inScope, outScope *scope, +) { // We need to save and restore the previous values of the replaceSRFs field // and the field in semaCtx in case we are recursively called within a // subquery context. @@ -77,7 +77,38 @@ func (b *Builder) analyzeProjectionList(selects tree.SelectExprs, inScope, outSc inScope.context = "SELECT" inScope.replaceSRFs = true - for _, e := range selects { + b.analyzeSelectList(selects, desiredTypes, inScope, outScope) +} + +// analyzeReturningList analyzes the given list of RETURNING clause expressions, +// and adds the resulting labels and typed expressions to outScope. See the +// header comment for analyzeSelectList. +func (b *Builder) analyzeReturningList( + returning tree.ReturningExprs, desiredTypes []types.T, inScope, outScope *scope, +) { + // We need to save and restore the previous value of the field in + // semaCtx in case we are recursively called within a subquery + // context. + defer b.semaCtx.Properties.Restore(b.semaCtx.Properties) + + // Ensure there are no special functions in the RETURNING clause. + b.semaCtx.Properties.Require("RETURNING", tree.RejectSpecial) + inScope.context = "RETURNING" + + b.analyzeSelectList(tree.SelectExprs(returning), desiredTypes, inScope, outScope) +} + +// analyzeSelectList is a helper function used by analyzeProjectionList and +// analyzeReturningList. It normalizes names, expands wildcards, resolves types, +// and adds resulting columns to outScope. The desiredTypes slice contains +// target type hints for the resulting expressions. +// +// As a side-effect, the appropriate scopes are updated with aggregations +// (scope.groupby.aggs) +func (b *Builder) analyzeSelectList( + selects tree.SelectExprs, desiredTypes []types.T, inScope, outScope *scope, +) { + for i, e := range selects { // Start with fast path, looking for simple column reference. texpr := b.resolveColRef(e.Expr, inScope) if texpr == nil { @@ -100,14 +131,19 @@ func (b *Builder) analyzeProjectionList(selects tree.SelectExprs, inScope, outSc if outScope.cols == nil { outScope.cols = make([]scopeColumn, 0, len(selects)+len(exprs)-1) } - for i, e := range exprs { - b.addColumn(outScope, labels[i], e.ResolvedType(), e) + for j, e := range exprs { + b.addColumn(outScope, labels[j], e) } continue } } - texpr = inScope.resolveType(e.Expr, types.Any) + desired := types.Any + if i < len(desiredTypes) { + desired = desiredTypes[i] + } + + texpr = inScope.resolveType(e.Expr, desired) } // Output column names should exactly match the original expression, so we @@ -117,7 +153,7 @@ func (b *Builder) analyzeProjectionList(selects tree.SelectExprs, inScope, outSc outScope.cols = make([]scopeColumn, 0, len(selects)) } label := b.getColName(e) - b.addColumn(outScope, label, texpr.ResolvedType(), texpr) + b.addColumn(outScope, label, texpr) } } diff --git a/pkg/sql/opt/optbuilder/scope.go b/pkg/sql/opt/optbuilder/scope.go index 82217bb98592..9ad6e2e3cd42 100644 --- a/pkg/sql/opt/optbuilder/scope.go +++ b/pkg/sql/opt/optbuilder/scope.go @@ -576,9 +576,6 @@ func (s *scope) FindSourceProvidingColumn( for ; s != nil; s, allowHidden = s.parent, false { for i := range s.cols { col := &s.cols[i] - // TODO(rytaft): Do not return a match if this column is being - // backfilled, or the column expression being resolved is not from - // a selector column expression from an UPDATE/DELETE. if col.name == colName { if col.table.TableName == "" && !col.hidden { if candidateFromAnonSource != nil { @@ -868,7 +865,7 @@ func (s *scope) replaceSRF(f *tree.FuncExpr, def *tree.FunctionDefinition) *srf srfScope := s.push() var outCol *scopeColumn if len(def.ReturnLabels) == 1 { - outCol = s.builder.addColumn(srfScope, def.Name, typedFunc.ResolvedType(), typedFunc) + outCol = s.builder.addColumn(srfScope, def.Name, typedFunc) } out := s.builder.buildFunction(typedFunc.(*tree.FuncExpr), s, srfScope, outCol, nil) srf := &srf{ diff --git a/pkg/sql/opt/optbuilder/select.go b/pkg/sql/opt/optbuilder/select.go index 02fbfe323947..141bd7bdd3ef 100644 --- a/pkg/sql/opt/optbuilder/select.go +++ b/pkg/sql/opt/optbuilder/select.go @@ -17,14 +17,16 @@ package optbuilder import ( "fmt" + "github.com/pkg/errors" + "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sem/types" "github.com/cockroachdb/cockroach/pkg/util/hlc" - "github.com/pkg/errors" ) // buildDataSource builds a set of memo groups that represent the given table @@ -62,6 +64,7 @@ func (b *Builder) buildDataSource( case *tree.TableName: tn := source + // CTEs take precedence over other data sources. if cte := inScope.resolveCTE(tn); cte != nil { if cte.used { @@ -78,7 +81,7 @@ func (b *Builder) buildDataSource( return outScope } - ds := b.resolveDataSource(tn) + ds := b.resolveDataSource(tn, privilege.SELECT) switch t := ds.(type) { case opt.Table: return b.buildScan(t, tn, nil /* ordinals */, indexFlags, inScope) @@ -107,10 +110,14 @@ func (b *Builder) buildDataSource( case *tree.StatementSource: outScope = b.buildStmt(source.Statement, inScope) + if len(outScope.cols) == 0 { + panic(builderError{pgerror.NewErrorf(pgerror.CodeFeatureNotSupportedError, + "statement source \"%v\" does not return any columns", source.Statement)}) + } return outScope case *tree.TableRef: - ds := b.resolveDataSourceRef(source) + ds := b.resolveDataSourceRef(source, privilege.SELECT) switch t := ds.(type) { case opt.Table: outScope = b.buildScanFromTableRef(t, source, indexFlags, inScope) @@ -159,7 +166,7 @@ func (b *Builder) buildView(view opt.View, inScope *scope) (outScope *scope) { defer func() { b.skipSelectPrivilegeChecks = false }() } - outScope = b.buildSelect(sel, &scope{builder: b}) + outScope = b.buildSelect(sel, nil /* desiredTypes */, &scope{builder: b}) // Update data source name to be the name of the view. And if view columns // are specified, then update names of output columns. @@ -387,6 +394,12 @@ func (b *Builder) buildCTE(ctes []*tree.CTE, inScope *scope) (outScope *scope) { cols[j].table = tableName } } + + if len(cols) == 0 { + panic(builderError{pgerror.NewErrorf(pgerror.CodeFeatureNotSupportedError, + "WITH clause %q does not have a RETURNING clause", tree.ErrString(&name))}) + } + outScope.ctes[ctes[i].Name.Alias.String()] = &cteSource{ name: ctes[i].Name, cols: cols, @@ -397,12 +410,25 @@ func (b *Builder) buildCTE(ctes []*tree.CTE, inScope *scope) (outScope *scope) { return outScope } +// checkCTEUsage ensures that a CTE that contains a mutation (like INSERT) is +// used at least once by the query. Otherwise, it might not be executed. +func (b *Builder) checkCTEUsage(inScope *scope) { + for alias, source := range inScope.ctes { + if !source.used && source.expr.Relational().CanMutate { + panic(builderError{pgerror.UnimplementedWithIssueErrorf(24307, + "common table expression %q with side effects was not used in query", alias)}) + } + } +} + // buildSelect builds a set of memo groups that represent the given select // statement. // // See Builder.buildStmt for a description of the remaining input and // return values. -func (b *Builder) buildSelect(stmt *tree.Select, inScope *scope) (outScope *scope) { +func (b *Builder) buildSelect( + stmt *tree.Select, desiredTypes []types.T, inScope *scope, +) (outScope *scope) { wrapped := stmt.Select orderBy := stmt.OrderBy limit := stmt.Limit @@ -441,18 +467,19 @@ func (b *Builder) buildSelect(stmt *tree.Select, inScope *scope) (outScope *scop if with != nil { inScope = b.buildCTE(with.CTEList, inScope) + defer b.checkCTEUsage(inScope) } // NB: The case statements are sorted lexicographically. switch t := stmt.Select.(type) { case *tree.SelectClause: - outScope = b.buildSelectClause(t, orderBy, inScope) + outScope = b.buildSelectClause(t, orderBy, desiredTypes, inScope) case *tree.UnionClause: - outScope = b.buildUnion(t, inScope) + outScope = b.buildUnion(t, desiredTypes, inScope) case *tree.ValuesClause: - outScope = b.buildValuesClause(t, inScope) + outScope = b.buildValuesClause(t, desiredTypes, inScope) default: panic(fmt.Errorf("unknown select statement: %T", stmt.Select)) @@ -463,7 +490,7 @@ func (b *Builder) buildSelect(stmt *tree.Select, inScope *scope) (outScope *scop projectionsScope.cols = make([]scopeColumn, 0, len(outScope.cols)) for i := range outScope.cols { expr := &outScope.cols[i] - col := b.addColumn(projectionsScope, "" /* label */, expr.ResolvedType(), expr) + col := b.addColumn(projectionsScope, "" /* label */, expr) b.buildScalar(expr, outScope, projectionsScope, col, nil) } orderByScope := b.analyzeOrderBy(orderBy, outScope, projectionsScope) @@ -489,7 +516,7 @@ func (b *Builder) buildSelect(stmt *tree.Select, inScope *scope) (outScope *scop // See Builder.buildStmt for a description of the remaining input and // return values. func (b *Builder) buildSelectClause( - sel *tree.SelectClause, orderBy tree.OrderBy, inScope *scope, + sel *tree.SelectClause, orderBy tree.OrderBy, desiredTypes []types.T, inScope *scope, ) (outScope *scope) { fromScope := b.buildFrom(sel.From, sel.Where, inScope) projectionsScope := fromScope.replace() @@ -498,7 +525,7 @@ func (b *Builder) buildSelectClause( // function that refers to variables in fromScope or an ancestor scope, // buildAggregateFunction is called which adds columns to the appropriate // aggInScope and aggOutScope. - b.analyzeProjectionList(sel.Exprs, fromScope, projectionsScope) + b.analyzeProjectionList(sel.Exprs, desiredTypes, fromScope, projectionsScope) // Any aggregates in the HAVING, ORDER BY and DISTINCT ON clauses (if they // exist) will be added here. diff --git a/pkg/sql/opt/optbuilder/srfs.go b/pkg/sql/opt/optbuilder/srfs.go index 166f1939932a..ac281532962e 100644 --- a/pkg/sql/opt/optbuilder/srfs.go +++ b/pkg/sql/opt/optbuilder/srfs.go @@ -104,7 +104,7 @@ func (b *Builder) buildZip(exprs tree.Exprs, inScope *scope) (outScope *scope) { var outCol *scopeColumn startCols := len(outScope.cols) if def == nil || def.Class != tree.GeneratorClass || len(def.ReturnLabels) == 1 { - outCol = b.addColumn(outScope, label, texpr.ResolvedType(), texpr) + outCol = b.addColumn(outScope, label, texpr) } zip[i].Func = b.buildScalar(texpr, inScope, outScope, outCol, nil) zip[i].Cols = make(opt.ColList, len(outScope.cols)-startCols) diff --git a/pkg/sql/opt/optbuilder/testdata/insert b/pkg/sql/opt/optbuilder/testdata/insert new file mode 100644 index 000000000000..76bdbba96d21 --- /dev/null +++ b/pkg/sql/opt/optbuilder/testdata/insert @@ -0,0 +1,950 @@ +exec-ddl +CREATE TABLE abcde ( + a INT NOT NULL, + b INT, + c INT DEFAULT (10), + d INT AS (b + c + 1) STORED, + e INT AS (a) STORED +) +---- +TABLE abcde + ├── a int not null + ├── b int + ├── c int + ├── d int + ├── e int + ├── rowid int not null (hidden) + └── INDEX primary + └── rowid int not null (hidden) + +exec-ddl +CREATE TABLE xyz ( + x TEXT PRIMARY KEY, + y INT8, + z FLOAT8 +) +---- +TABLE xyz + ├── x string not null + ├── y int + ├── z float + └── INDEX primary + └── x string not null + +exec-ddl +CREATE TABLE uv ( + u DECIMAL, + v BYTES +) +---- +TABLE uv + ├── u decimal + ├── v bytes + ├── rowid int not null (hidden) + └── INDEX primary + └── rowid int not null (hidden) + +exec-ddl +CREATE TABLE mutation ( + m INT PRIMARY KEY, + n INT, + "o:mutation" INT DEFAULT(10), + "p:mutation" STRING AS ("o:mutation" + n) STORED +) +---- +TABLE mutation + ├── m int not null + ├── n int + └── INDEX primary + └── m int not null + +# Unknown target table. +build +INSERT INTO unknown VALUES (1, 2, 3) +---- +error: no data source matches prefix: "unknown" + +# ------------------------------------------------------------------------------ +# Tests without target column names. +# ------------------------------------------------------------------------------ + +# Specify values for all non-hidden columns. +build +INSERT INTO abcde VALUES (1, 2, 3) +---- +insert abcde + ├── columns: + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) rowid:6(int) + ├── input columns: column1:7(int) column2:8(int) column3:9(int) column11:11(int) column1:7(int) column10:10(int) + └── project + ├── columns: column11:11(int) column1:7(int) column2:8(int) column3:9(int) column10:10(int) + ├── project + │ ├── columns: column10:10(int) column1:7(int) column2:8(int) column3:9(int) + │ ├── values + │ │ ├── columns: column1:7(int) column2:8(int) column3:9(int) + │ │ └── tuple [type=tuple{int, int, int}] + │ │ ├── const: 1 [type=int] + │ │ ├── const: 2 [type=int] + │ │ └── const: 3 [type=int] + │ └── projections + │ └── function: unique_rowid [type=int] + └── projections + └── plus [type=int] + ├── plus [type=int] + │ ├── variable: column2 [type=int] + │ └── variable: column3 [type=int] + └── const: 1 [type=int] + +# Don't specify values for null or default columns. +build +INSERT INTO abcde VALUES (1) +---- +insert abcde + ├── columns: + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) rowid:6(int) + ├── input columns: column1:7(int) column8:8(unknown) column9:9(int) column8:8(unknown) column1:7(int) column10:10(int) + └── project + ├── columns: column8:8(unknown) column9:9(int!null) column10:10(int) column1:7(int) + ├── values + │ ├── columns: column1:7(int) + │ └── tuple [type=tuple{int}] + │ └── const: 1 [type=int] + └── projections + ├── null [type=unknown] + ├── const: 10 [type=int] + └── function: unique_rowid [type=int] + +# Ordered input. +build +INSERT INTO abcde SELECT y FROM xyz ORDER BY y, z +---- +insert abcde + ├── columns: + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) rowid:6(int) + ├── input columns: y:8(int) column10:10(unknown) column11:11(int) column10:10(unknown) y:8(int) column12:12(int) + ├── internal-ordering: +8,+9 + └── sort + ├── columns: y:8(int) z:9(float) column10:10(unknown) column11:11(int!null) column12:12(int) + ├── ordering: +8,+9 + └── project + ├── columns: column10:10(unknown) column11:11(int!null) column12:12(int) y:8(int) z:9(float) + ├── project + │ ├── columns: y:8(int) z:9(float) + │ └── scan xyz + │ └── columns: x:7(string!null) y:8(int) z:9(float) + └── projections + ├── null [type=unknown] + ├── const: 10 [type=int] + └── function: unique_rowid [type=int] + +# Use placeholders. +build +INSERT INTO xyz VALUES ($1, $2, $3) +---- +insert xyz + ├── columns: + ├── table columns: x:1(string) y:2(int) z:3(float) + ├── input columns: column1:4(string) column2:5(int) column3:6(float) + └── values + ├── columns: column1:4(string) column2:5(int) column3:6(float) + └── tuple [type=tuple{string, int, float}] + ├── placeholder: $1 [type=string] + ├── placeholder: $2 [type=int] + └── placeholder: $3 [type=float] + +# Null expressions. +build +INSERT INTO abcde VALUES (2, null, null) +---- +insert abcde + ├── columns: + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) rowid:6(int) + ├── input columns: column1:7(int) column2:8(unknown) column3:9(unknown) column11:11(unknown) column1:7(int) column10:10(int) + └── project + ├── columns: column11:11(unknown) column1:7(int) column2:8(unknown) column3:9(unknown) column10:10(int) + ├── project + │ ├── columns: column10:10(int) column1:7(int) column2:8(unknown) column3:9(unknown) + │ ├── values + │ │ ├── columns: column1:7(int) column2:8(unknown) column3:9(unknown) + │ │ └── tuple [type=tuple{int, unknown, unknown}] + │ │ ├── const: 2 [type=int] + │ │ ├── null [type=unknown] + │ │ └── null [type=unknown] + │ └── projections + │ └── function: unique_rowid [type=int] + └── projections + └── null [type=unknown] + +# Duplicate expressions. +build +INSERT INTO abcde SELECT 2, $1 + 1, $1 + 1 +---- +insert abcde + ├── columns: + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) rowid:6(int) + ├── input columns: "?column?":7(int) "?column?":8(int) "?column?":8(int) column10:10(int) "?column?":7(int) column9:9(int) + └── project + ├── columns: column10:10(int) "?column?":7(int!null) "?column?":8(int) column9:9(int) + ├── project + │ ├── columns: column9:9(int) "?column?":7(int!null) "?column?":8(int) + │ ├── project + │ │ ├── columns: "?column?":7(int!null) "?column?":8(int) + │ │ ├── values + │ │ │ └── tuple [type=tuple] + │ │ └── projections + │ │ ├── const: 2 [type=int] + │ │ └── plus [type=int] + │ │ ├── placeholder: $1 [type=int] + │ │ └── const: 1 [type=int] + │ └── projections + │ └── function: unique_rowid [type=int] + └── projections + └── plus [type=int] + ├── plus [type=int] + │ ├── variable: ?column? [type=int] + │ └── variable: ?column? [type=int] + └── const: 1 [type=int] + +# Use DEFAULT VALUES. +build +INSERT INTO uv DEFAULT VALUES +---- +insert uv + ├── columns: + ├── table columns: u:1(decimal) v:2(bytes) rowid:3(int) + ├── input columns: column4:4(unknown) column4:4(unknown) column5:5(int) + └── project + ├── columns: column4:4(unknown) column5:5(int) + ├── values + │ └── tuple [type=tuple] + └── projections + ├── null [type=unknown] + └── function: unique_rowid [type=int] + +# Use DEFAULT expressions in VALUES expression. +build +INSERT INTO abcde ((VALUES (1, DEFAULT, 2), (2, 3, 4), (3, 2, DEFAULT), (4, DEFAULT, DEFAULT))) +---- +insert abcde + ├── columns: + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) rowid:6(int) + ├── input columns: column1:7(int) column2:8(int) column3:9(int) column11:11(int) column1:7(int) column10:10(int) + └── project + ├── columns: column11:11(int) column1:7(int) column2:8(int) column3:9(int) column10:10(int) + ├── project + │ ├── columns: column10:10(int) column1:7(int) column2:8(int) column3:9(int) + │ ├── values + │ │ ├── columns: column1:7(int) column2:8(int) column3:9(int) + │ │ ├── tuple [type=tuple{int, int, int}] + │ │ │ ├── const: 1 [type=int] + │ │ │ ├── null [type=unknown] + │ │ │ └── const: 2 [type=int] + │ │ ├── tuple [type=tuple{int, int, int}] + │ │ │ ├── const: 2 [type=int] + │ │ │ ├── const: 3 [type=int] + │ │ │ └── const: 4 [type=int] + │ │ ├── tuple [type=tuple{int, int, int}] + │ │ │ ├── const: 3 [type=int] + │ │ │ ├── const: 2 [type=int] + │ │ │ └── const: 10 [type=int] + │ │ └── tuple [type=tuple{int, int, int}] + │ │ ├── const: 4 [type=int] + │ │ ├── null [type=unknown] + │ │ └── const: 10 [type=int] + │ └── projections + │ └── function: unique_rowid [type=int] + └── projections + └── plus [type=int] + ├── plus [type=int] + │ ├── variable: column2 [type=int] + │ └── variable: column3 [type=int] + └── const: 1 [type=int] + +# Use DEFAULT expressions in VALUES expression wrapped by WITH clause (error). +build +INSERT INTO abcde WITH a AS (SELECT 1) VALUES (1, DEFAULT, 2) +---- +error (42601): DEFAULT can only appear in a VALUES list within INSERT or on the right side of a SET + +# Too many values. +build +INSERT INTO xyz VALUES ('foo', 2, 3, 4) +---- +error (42601): INSERT has more expressions than target columns, 4 expressions for 3 targets + +# Return values from insert. +build +INSERT INTO abcde SELECT 1 RETURNING * +---- +project + ├── columns: a:7(int!null) b:8(unknown) c:9(int!null) d:8(unknown) e:7(int!null) + └── insert abcde + ├── columns: "?column?":7(int!null) column8:8(unknown) column9:9(int!null) column10:10(int!null) + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) rowid:6(int) + ├── input columns: "?column?":7(int!null) column8:8(unknown) column9:9(int!null) column8:8(unknown) "?column?":7(int!null) column10:10(int!null) + └── project + ├── columns: column8:8(unknown) column9:9(int!null) column10:10(int) "?column?":7(int!null) + ├── project + │ ├── columns: "?column?":7(int!null) + │ ├── values + │ │ └── tuple [type=tuple] + │ └── projections + │ └── const: 1 [type=int] + └── projections + ├── null [type=unknown] + ├── const: 10 [type=int] + └── function: unique_rowid [type=int] + +# Return values from aliased table. +build +INSERT INTO abcde AS foo SELECT 1 RETURNING foo.a + 1, foo.b * foo.c +---- +project + ├── columns: "?column?":11(int) "?column?":12(unknown) + ├── insert abcde + │ ├── columns: "?column?":7(int!null) column8:8(unknown) column9:9(int!null) column10:10(int!null) + │ ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) rowid:6(int) + │ ├── input columns: "?column?":7(int!null) column8:8(unknown) column9:9(int!null) column8:8(unknown) "?column?":7(int!null) column10:10(int!null) + │ └── project + │ ├── columns: column8:8(unknown) column9:9(int!null) column10:10(int) "?column?":7(int!null) + │ ├── project + │ │ ├── columns: "?column?":7(int!null) + │ │ ├── values + │ │ │ └── tuple [type=tuple] + │ │ └── projections + │ │ └── const: 1 [type=int] + │ └── projections + │ ├── null [type=unknown] + │ ├── const: 10 [type=int] + │ └── function: unique_rowid [type=int] + └── projections + ├── plus [type=int] + │ ├── variable: ?column? [type=int] + │ └── const: 1 [type=int] + └── null [type=unknown] + +# Use returning INSERT as a FROM expression. +build +SELECT * FROM [INSERT INTO abcde VALUES (1) RETURNING *] +---- +project + ├── columns: a:7(int!null) b:8(unknown) c:9(int!null) d:8(unknown) e:7(int!null) + └── insert abcde + ├── columns: column1:7(int!null) column8:8(unknown) column9:9(int!null) column10:10(int!null) + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) rowid:6(int) + ├── input columns: column1:7(int!null) column8:8(unknown) column9:9(int!null) column8:8(unknown) column1:7(int!null) column10:10(int!null) + └── project + ├── columns: column8:8(unknown) column9:9(int!null) column10:10(int) column1:7(int) + ├── values + │ ├── columns: column1:7(int) + │ └── tuple [type=tuple{int}] + │ └── const: 1 [type=int] + └── projections + ├── null [type=unknown] + ├── const: 10 [type=int] + └── function: unique_rowid [type=int] + +# Try to use aggregate function in RETURNING clause. +build +INSERT INTO abcde VALUES (1) RETURNING sum(a) +---- +error: sum(): aggregate functions are not allowed in RETURNING + +# Try to use SRF in RETURNING clause. +build +INSERT INTO abcde VALUES (1) RETURNING generate_series(1, 10) +---- +error: generate_series(): generator functions are not allowed in RETURNING + +# Try to use non-returning INSERT as expression. +build +SELECT * FROM [INSERT INTO abcde VALUES (1)] +---- +error (0A000): statement source "INSERT INTO abcde VALUES (1)" does not return any columns + +# Use CTE. +build +WITH a AS (SELECT y, y+1 FROM xyz) INSERT INTO abcde SELECT * FROM a +---- +insert abcde + ├── columns: + ├── table columns: a:5(int) b:6(int) c:7(int) d:8(int) e:9(int) rowid:10(int) + ├── input columns: y:2(int) "?column?":4(int) column11:11(int) column13:13(int) y:2(int) column12:12(int) + └── project + ├── columns: column13:13(int) y:2(int) "?column?":4(int) column11:11(int!null) column12:12(int) + ├── project + │ ├── columns: column11:11(int!null) column12:12(int) y:2(int) "?column?":4(int) + │ ├── project + │ │ ├── columns: "?column?":4(int) y:2(int) + │ │ ├── scan xyz + │ │ │ └── columns: x:1(string!null) y:2(int) z:3(float) + │ │ └── projections + │ │ └── plus [type=int] + │ │ ├── variable: y [type=int] + │ │ └── const: 1 [type=int] + │ └── projections + │ ├── const: 10 [type=int] + │ └── function: unique_rowid [type=int] + └── projections + └── plus [type=int] + ├── plus [type=int] + │ ├── variable: ?column? [type=int] + │ └── variable: column11 [type=int] + └── const: 1 [type=int] + +# Use CTE. +build +WITH a AS (SELECT y, y+1 FROM xyz), b AS (SELECT y+1, y FROM xyz) +INSERT INTO abcde TABLE a UNION TABLE b +---- +insert abcde + ├── columns: + ├── table columns: a:9(int) b:10(int) c:11(int) d:12(int) e:13(int) rowid:14(int) + ├── input columns: y:15(int) "?column?":16(int) column17:17(int) column19:19(int) y:15(int) column18:18(int) + └── project + ├── columns: column19:19(int) y:15(int) "?column?":16(int) column17:17(int!null) column18:18(int) + ├── project + │ ├── columns: column17:17(int!null) column18:18(int) y:15(int) "?column?":16(int) + │ ├── union + │ │ ├── columns: y:15(int) "?column?":16(int) + │ │ ├── left columns: xyz.y:2(int) "?column?":4(int) + │ │ ├── right columns: "?column?":8(int) xyz.y:6(int) + │ │ ├── project + │ │ │ ├── columns: "?column?":4(int) xyz.y:2(int) + │ │ │ ├── scan xyz + │ │ │ │ └── columns: xyz.x:1(string!null) xyz.y:2(int) xyz.z:3(float) + │ │ │ └── projections + │ │ │ └── plus [type=int] + │ │ │ ├── variable: xyz.y [type=int] + │ │ │ └── const: 1 [type=int] + │ │ └── project + │ │ ├── columns: "?column?":8(int) xyz.y:6(int) + │ │ ├── scan xyz + │ │ │ └── columns: xyz.x:5(string!null) xyz.y:6(int) xyz.z:7(float) + │ │ └── projections + │ │ └── plus [type=int] + │ │ ├── variable: xyz.y [type=int] + │ │ └── const: 1 [type=int] + │ └── projections + │ ├── const: 10 [type=int] + │ └── function: unique_rowid [type=int] + └── projections + └── plus [type=int] + ├── plus [type=int] + │ ├── variable: ?column? [type=int] + │ └── variable: column17 [type=int] + └── const: 1 [type=int] + +# Non-referenced CTE with mutation. +build +WITH cte AS (SELECT b FROM [INSERT INTO abcde VALUES (1) RETURNING *]) INSERT INTO abcde VALUES (1) +---- +error (0A000): unimplemented: common table expression "cte" with side effects was not used in query + +# Insert CTE that returns no columns. +build +WITH cte AS (INSERT INTO abcde VALUES (1)) SELECT * FROM cte +---- +error (0A000): WITH clause "cte" does not have a RETURNING clause + +# Use SRF in RETURNING clause. +build +INSERT INTO abcde VALUES (1) RETURNING generate_series(1, 100) +---- +error: generate_series(): generator functions are not allowed in RETURNING + +# Correlated subquery. +build +SELECT * FROM xyz WHERE EXISTS (SELECT * FROM [INSERT INTO abcde VALUES (y, y+1) RETURNING *]) +---- +select + ├── columns: x:1(string!null) y:2(int) z:3(float) + ├── scan xyz + │ └── columns: x:1(string!null) y:2(int) z:3(float) + └── filters + └── exists [type=bool] + └── project + ├── columns: column1:10(int!null) column2:11(int) column12:12(int!null) column14:14(int) + └── insert abcde + ├── columns: column1:10(int!null) column2:11(int) column12:12(int!null) column13:13(int!null) column14:14(int) + ├── table columns: a:4(int) b:5(int) c:6(int) d:7(int) e:8(int) rowid:9(int) + ├── input columns: column1:10(int!null) column2:11(int) column12:12(int!null) column14:14(int) column1:10(int!null) column13:13(int!null) + └── project + ├── columns: column14:14(int) column1:10(int) column2:11(int) column12:12(int!null) column13:13(int) + ├── project + │ ├── columns: column12:12(int!null) column13:13(int) column1:10(int) column2:11(int) + │ ├── values + │ │ ├── columns: column1:10(int) column2:11(int) + │ │ └── tuple [type=tuple{int, int}] + │ │ ├── variable: y [type=int] + │ │ └── plus [type=int] + │ │ ├── variable: y [type=int] + │ │ └── const: 1 [type=int] + │ └── projections + │ ├── const: 10 [type=int] + │ └── function: unique_rowid [type=int] + └── projections + └── plus [type=int] + ├── plus [type=int] + │ ├── variable: column2 [type=int] + │ └── variable: column12 [type=int] + └── const: 1 [type=int] + +# ------------------------------------------------------------------------------ +# Tests with target column names. +# ------------------------------------------------------------------------------ + +# Specify values for all non-computed columns. +build +INSERT INTO abcde (c, b, a) VALUES (1, 2, 3) +---- +insert abcde + ├── columns: + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) rowid:6(int) + ├── input columns: column3:9(int) column2:8(int) column1:7(int) column11:11(int) column3:9(int) column10:10(int) + └── project + ├── columns: column11:11(int) column1:7(int) column2:8(int) column3:9(int) column10:10(int) + ├── project + │ ├── columns: column10:10(int) column1:7(int) column2:8(int) column3:9(int) + │ ├── values + │ │ ├── columns: column1:7(int) column2:8(int) column3:9(int) + │ │ └── tuple [type=tuple{int, int, int}] + │ │ ├── const: 1 [type=int] + │ │ ├── const: 2 [type=int] + │ │ └── const: 3 [type=int] + │ └── projections + │ └── function: unique_rowid [type=int] + └── projections + └── plus [type=int] + ├── plus [type=int] + │ ├── variable: column2 [type=int] + │ └── variable: column1 [type=int] + └── const: 1 [type=int] + +# Don't specify values for null or default columns. +build +INSERT INTO abcde (a) VALUES (1) +---- +insert abcde + ├── columns: + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) rowid:6(int) + ├── input columns: column1:7(int) column8:8(unknown) column9:9(int) column8:8(unknown) column1:7(int) column10:10(int) + └── project + ├── columns: column8:8(unknown) column9:9(int!null) column10:10(int) column1:7(int) + ├── values + │ ├── columns: column1:7(int) + │ └── tuple [type=tuple{int}] + │ └── const: 1 [type=int] + └── projections + ├── null [type=unknown] + ├── const: 10 [type=int] + └── function: unique_rowid [type=int] + +# Insert value into hidden rowid column. +build +INSERT INTO abcde (a, rowid) VALUES (1, 2) RETURNING * +---- +project + ├── columns: a:7(int!null) b:9(unknown) c:10(int!null) d:9(unknown) e:7(int!null) + └── insert abcde + ├── columns: column1:7(int!null) column2:8(int!null) column9:9(unknown) column10:10(int!null) + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) rowid:6(int) + ├── input columns: column1:7(int!null) column9:9(unknown) column10:10(int!null) column9:9(unknown) column1:7(int!null) column2:8(int!null) + └── project + ├── columns: column9:9(unknown) column10:10(int!null) column1:7(int) column2:8(int) + ├── values + │ ├── columns: column1:7(int) column2:8(int) + │ └── tuple [type=tuple{int, int}] + │ ├── const: 1 [type=int] + │ └── const: 2 [type=int] + └── projections + ├── null [type=unknown] + └── const: 10 [type=int] + +# Use DEFAULT expressions in VALUES expression. +build +INSERT INTO abcde (c, b, a, rowid) +VALUES (DEFAULT, DEFAULT, 1, DEFAULT), (3, 2, 1, DEFAULT), (DEFAULT, DEFAULT, 2, 100) +RETURNING *, rowid +---- +insert abcde + ├── columns: a:9(int!null) b:8(int) c:7(int) d:11(int) e:9(int!null) rowid:10(int!null) + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) rowid:6(int) + ├── input columns: column3:9(int!null) column2:8(int) column1:7(int) column11:11(int) column3:9(int!null) column4:10(int!null) + └── project + ├── columns: column11:11(int) column1:7(int) column2:8(int) column3:9(int) column4:10(int) + ├── values + │ ├── columns: column1:7(int) column2:8(int) column3:9(int) column4:10(int) + │ ├── tuple [type=tuple{int, int, int, int}] + │ │ ├── const: 10 [type=int] + │ │ ├── null [type=unknown] + │ │ ├── const: 1 [type=int] + │ │ └── function: unique_rowid [type=int] + │ ├── tuple [type=tuple{int, int, int, int}] + │ │ ├── const: 3 [type=int] + │ │ ├── const: 2 [type=int] + │ │ ├── const: 1 [type=int] + │ │ └── function: unique_rowid [type=int] + │ └── tuple [type=tuple{int, int, int, int}] + │ ├── const: 10 [type=int] + │ ├── null [type=unknown] + │ ├── const: 2 [type=int] + │ └── const: 100 [type=int] + └── projections + └── plus [type=int] + ├── plus [type=int] + │ ├── variable: column2 [type=int] + │ └── variable: column1 [type=int] + └── const: 1 [type=int] + +# Mismatched type. +build +INSERT INTO xyz (x) VALUES (10) +---- +error (42804): value type int doesn't match type STRING of column "x" + +# Try to insert into computed column. +build +INSERT INTO abcde (a, b, c, d) VALUES (1, 2, 3, 4) +---- +error (55000): cannot write directly to computed column "d" + +# Try to insert DEFAULT expression into computed column. +build +INSERT INTO abcde (a, d) VALUES (1, DEFAULT) +---- +error (55000): cannot write directly to computed column "d" + +# Too many values. +build +INSERT INTO abcde (a, b) VALUES (1, 2, 3) +---- +error (42601): INSERT has more expressions than target columns, 3 expressions for 2 targets + +# Too few values. +build +INSERT INTO abcde (a, b) VALUES (1) +---- +error (42601): INSERT has more target columns than expressions, 1 expressions for 2 targets + +# Duplicate column name. +build +INSERT INTO abcde (a, b, a) VALUES (1, 2, 3) +---- +error: multiple assignments to the same column "a" + +# Undefined column name. +build +INSERT INTO abcde (a, unk) VALUES (1, 2) +---- +error (42703): column "unk" does not exist + +# Cannot insert null into non-null column. +build +INSERT INTO abcde (b, c) VALUES (1, 2) +---- +error (23502): null value in column "a" violates not-null constraint + +# Return values from insert. +build +INSERT INTO abcde (b, a) SELECT x::int, y FROM xyz RETURNING * +---- +project + ├── columns: a:8(int!null) b:10(int) c:11(int!null) d:13(int) e:8(int!null) + └── insert abcde + ├── columns: y:8(int!null) x:10(int) column11:11(int!null) column12:12(int!null) column13:13(int) + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) rowid:6(int) + ├── input columns: y:8(int!null) x:10(int) column11:11(int!null) column13:13(int) y:8(int!null) column12:12(int!null) + └── project + ├── columns: column13:13(int) y:8(int) x:10(int) column11:11(int!null) column12:12(int) + ├── project + │ ├── columns: column11:11(int!null) column12:12(int) y:8(int) x:10(int) + │ ├── project + │ │ ├── columns: x:10(int) y:8(int) + │ │ ├── scan xyz + │ │ │ └── columns: xyz.x:7(string!null) y:8(int) z:9(float) + │ │ └── projections + │ │ └── cast: INT [type=int] + │ │ └── variable: xyz.x [type=string] + │ └── projections + │ ├── const: 10 [type=int] + │ └── function: unique_rowid [type=int] + └── projections + └── plus [type=int] + ├── plus [type=int] + │ ├── variable: x [type=int] + │ └── variable: column11 [type=int] + └── const: 1 [type=int] + +# Return hidden column. +build +INSERT INTO abcde (rowid, a) VALUES (1, 2) RETURNING *, rowid +---- +insert abcde + ├── columns: a:8(int!null) b:9(unknown) c:10(int!null) d:9(unknown) e:8(int!null) rowid:7(int!null) + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) rowid:6(int) + ├── input columns: column2:8(int!null) column9:9(unknown) column10:10(int!null) column9:9(unknown) column2:8(int!null) column1:7(int!null) + └── project + ├── columns: column9:9(unknown) column10:10(int!null) column1:7(int) column2:8(int) + ├── values + │ ├── columns: column1:7(int) column2:8(int) + │ └── tuple [type=tuple{int, int}] + │ ├── const: 1 [type=int] + │ └── const: 2 [type=int] + └── projections + ├── null [type=unknown] + └── const: 10 [type=int] + +# Use returning INSERT as a FROM expression. +build +SELECT * FROM [INSERT INTO abcde (a, b) SELECT y+1, y FROM xyz RETURNING *] +---- +project + ├── columns: a:10(int!null) b:8(int) c:11(int!null) d:13(int) e:10(int!null) + └── insert abcde + ├── columns: y:8(int) "?column?":10(int!null) column11:11(int!null) column12:12(int!null) column13:13(int) + ├── table columns: a:1(int) b:2(int) c:3(int) d:4(int) e:5(int) rowid:6(int) + ├── input columns: "?column?":10(int!null) y:8(int) column11:11(int!null) column13:13(int) "?column?":10(int!null) column12:12(int!null) + └── project + ├── columns: column13:13(int) y:8(int) "?column?":10(int) column11:11(int!null) column12:12(int) + ├── project + │ ├── columns: column11:11(int!null) column12:12(int) y:8(int) "?column?":10(int) + │ ├── project + │ │ ├── columns: "?column?":10(int) y:8(int) + │ │ ├── scan xyz + │ │ │ └── columns: x:7(string!null) y:8(int) z:9(float) + │ │ └── projections + │ │ └── plus [type=int] + │ │ ├── variable: y [type=int] + │ │ └── const: 1 [type=int] + │ └── projections + │ ├── const: 10 [type=int] + │ └── function: unique_rowid [type=int] + └── projections + └── plus [type=int] + ├── plus [type=int] + │ ├── variable: y [type=int] + │ └── variable: column11 [type=int] + └── const: 1 [type=int] + +# ------------------------------------------------------------------------------ +# Propagate desired INSERT types. +# ------------------------------------------------------------------------------ + +# Propagate types to VALUES. +build +INSERT INTO xyz VALUES ($1, $2 + 1, $3 + 1) +---- +insert xyz + ├── columns: + ├── table columns: x:1(string) y:2(int) z:3(float) + ├── input columns: column1:4(string) column2:5(int) column3:6(float) + └── values + ├── columns: column1:4(string) column2:5(int) column3:6(float) + └── tuple [type=tuple{string, int, float}] + ├── placeholder: $1 [type=string] + ├── plus [type=int] + │ ├── placeholder: $2 [type=int] + │ └── const: 1 [type=int] + └── plus [type=float] + ├── placeholder: $3 [type=float] + └── const: 1.0 [type=float] + +# Propagate types to VALUES (named columns). +build +INSERT INTO xyz (z, y, x) VALUES ($1 + 1, $2 + 1, $3) +---- +insert xyz + ├── columns: + ├── table columns: x:1(string) y:2(int) z:3(float) + ├── input columns: column3:6(string) column2:5(int) column1:4(float) + └── values + ├── columns: column1:4(float) column2:5(int) column3:6(string) + └── tuple [type=tuple{float, int, string}] + ├── plus [type=float] + │ ├── placeholder: $1 [type=float] + │ └── const: 1.0 [type=float] + ├── plus [type=int] + │ ├── placeholder: $2 [type=int] + │ └── const: 1 [type=int] + └── placeholder: $3 [type=string] + +# Propagate types to projection list. +build +INSERT INTO xyz ((SELECT $1, $2 + 1, $3 + 1)) +---- +insert xyz + ├── columns: + ├── table columns: x:1(string) y:2(int) z:3(float) + ├── input columns: "?column?":4(string) "?column?":5(int) "?column?":6(float) + └── project + ├── columns: "?column?":4(string) "?column?":5(int) "?column?":6(float) + ├── values + │ └── tuple [type=tuple] + └── projections + ├── placeholder: $1 [type=string] + ├── plus [type=int] + │ ├── placeholder: $2 [type=int] + │ └── const: 1 [type=int] + └── plus [type=float] + ├── placeholder: $3 [type=float] + └── const: 1.0 [type=float] + +# Propagate types to projection list (named columns). +build +INSERT INTO xyz (x, y, z) SELECT $1, $2 + 1, $3 + 1 +---- +insert xyz + ├── columns: + ├── table columns: x:1(string) y:2(int) z:3(float) + ├── input columns: "?column?":4(string) "?column?":5(int) "?column?":6(float) + └── project + ├── columns: "?column?":4(string) "?column?":5(int) "?column?":6(float) + ├── values + │ └── tuple [type=tuple] + └── projections + ├── placeholder: $1 [type=string] + ├── plus [type=int] + │ ├── placeholder: $2 [type=int] + │ └── const: 1 [type=int] + └── plus [type=float] + ├── placeholder: $3 [type=float] + └── const: 1.0 [type=float] + +# Propagate types to UNION. +build +INSERT INTO xyz (SELECT $1, $2 + 1, $3 + 1) UNION ALL (SELECT $1, $2 + 1, $3 + 1) +---- +insert xyz + ├── columns: + ├── table columns: x:1(string) y:2(int) z:3(float) + ├── input columns: "?column?":10(string) "?column?":11(int) "?column?":12(float) + └── union-all + ├── columns: "?column?":10(string) "?column?":11(int) "?column?":12(float) + ├── left columns: "?column?":4(string) "?column?":5(int) "?column?":6(float) + ├── right columns: "?column?":7(string) "?column?":8(int) "?column?":9(float) + ├── project + │ ├── columns: "?column?":4(string) "?column?":5(int) "?column?":6(float) + │ ├── values + │ │ └── tuple [type=tuple] + │ └── projections + │ ├── placeholder: $1 [type=string] + │ ├── plus [type=int] + │ │ ├── placeholder: $2 [type=int] + │ │ └── const: 1 [type=int] + │ └── plus [type=float] + │ ├── placeholder: $3 [type=float] + │ └── const: 1.0 [type=float] + └── project + ├── columns: "?column?":7(string) "?column?":8(int) "?column?":9(float) + ├── values + │ └── tuple [type=tuple] + └── projections + ├── placeholder: $1 [type=string] + ├── plus [type=int] + │ ├── placeholder: $2 [type=int] + │ └── const: 1 [type=int] + └── plus [type=float] + ├── placeholder: $3 [type=float] + └── const: 1.0 [type=float] + +# Propagate types to UNION (named columns). +build +INSERT INTO xyz (x, z, y) SELECT $1, $2 + 1, $3 + 1 UNION ALL SELECT $1, $2 + 1, $3 + 1 +---- +insert xyz + ├── columns: + ├── table columns: x:1(string) y:2(int) z:3(float) + ├── input columns: "?column?":10(string) "?column?":12(int) "?column?":11(float) + └── union-all + ├── columns: "?column?":10(string) "?column?":11(float) "?column?":12(int) + ├── left columns: "?column?":4(string) "?column?":5(float) "?column?":6(int) + ├── right columns: "?column?":7(string) "?column?":8(float) "?column?":9(int) + ├── project + │ ├── columns: "?column?":4(string) "?column?":5(float) "?column?":6(int) + │ ├── values + │ │ └── tuple [type=tuple] + │ └── projections + │ ├── placeholder: $1 [type=string] + │ ├── plus [type=float] + │ │ ├── placeholder: $2 [type=float] + │ │ └── const: 1.0 [type=float] + │ └── plus [type=int] + │ ├── placeholder: $3 [type=int] + │ └── const: 1 [type=int] + └── project + ├── columns: "?column?":7(string) "?column?":8(float) "?column?":9(int) + ├── values + │ └── tuple [type=tuple] + └── projections + ├── placeholder: $1 [type=string] + ├── plus [type=float] + │ ├── placeholder: $2 [type=float] + │ └── const: 1.0 [type=float] + └── plus [type=int] + ├── placeholder: $3 [type=int] + └── const: 1 [type=int] + +# ------------------------------------------------------------------------------ +# Tests with mutations. +# ------------------------------------------------------------------------------ + +# Test mutation columns with default and computed values. +build +INSERT INTO mutation (m, n) VALUES (1, 2) +---- +insert mutation + ├── columns: + ├── table columns: m:1(int) n:2(int) "o:mutation":3(int) "p:mutation":4(string) + ├── input columns: column1:5(int) column2:6(int) column7:7(int) column8:8(int) + └── project + ├── columns: column8:8(int) column1:5(int) column2:6(int) column7:7(int!null) + ├── project + │ ├── columns: column7:7(int!null) column1:5(int) column2:6(int) + │ ├── values + │ │ ├── columns: column1:5(int) column2:6(int) + │ │ └── tuple [type=tuple{int, int}] + │ │ ├── const: 1 [type=int] + │ │ └── const: 2 [type=int] + │ └── projections + │ └── const: 10 [type=int] + └── projections + └── plus [type=int] + ├── variable: column7 [type=int] + └── variable: column2 [type=int] + +# Use RETURNING clause and ensure that mutation columns aren't projected. +build +INSERT INTO mutation (m, n) VALUES (1, 2) RETURNING * +---- +insert mutation + ├── columns: m:5(int!null) n:6(int) + ├── table columns: m:1(int) n:2(int) "o:mutation":3(int) "p:mutation":4(string) + ├── input columns: column1:5(int!null) column2:6(int) column7:7(int) column8:8(int) + └── project + ├── columns: column8:8(int) column1:5(int) column2:6(int) column7:7(int!null) + ├── project + │ ├── columns: column7:7(int!null) column1:5(int) column2:6(int) + │ ├── values + │ │ ├── columns: column1:5(int) column2:6(int) + │ │ └── tuple [type=tuple{int, int}] + │ │ ├── const: 1 [type=int] + │ │ └── const: 2 [type=int] + │ └── projections + │ └── const: 10 [type=int] + └── projections + └── plus [type=int] + ├── variable: column7 [type=int] + └── variable: column2 [type=int] + +# Try to reference mutation column in RETURNING clause. +build +INSERT INTO mutation (m, n) VALUES (1, 2) RETURNING "o:mutation" +---- +error (42703): column "o:mutation" does not exist + +# Try to insert into mutation column. +build +INSERT INTO mutation (m, n, "o:mutation") VALUES (1, 2, 3) +---- +error (42703): column "o:mutation" does not exist diff --git a/pkg/sql/opt/optbuilder/testdata/scalar b/pkg/sql/opt/optbuilder/testdata/scalar index 312330c7339d..e7a149a82494 100644 --- a/pkg/sql/opt/optbuilder/testdata/scalar +++ b/pkg/sql/opt/optbuilder/testdata/scalar @@ -766,11 +766,9 @@ error: arrays of jsonb not allowed opt SELECT -((-9223372036854775808):::int) ---- -project +values ├── columns: "?column?":1(int) - ├── values - │ └── tuple [type=tuple] - └── projections + └── tuple [type=tuple{int}] └── unary-minus [type=int] └── const: -9223372036854775808 [type=int] diff --git a/pkg/sql/opt/optbuilder/testdata/select b/pkg/sql/opt/optbuilder/testdata/select index e9b84366368f..d2b0ef63c207 100644 --- a/pkg/sql/opt/optbuilder/testdata/select +++ b/pkg/sql/opt/optbuilder/testdata/select @@ -1242,3 +1242,9 @@ SELECT * FROM [54(3) as t] project └── scan num_ref_hidden └── columns: rowid:3(int!null) + +# Non-referenced CTE with mutation. +build +WITH cte AS (SELECT b FROM [INSERT INTO abc VALUES (1) RETURNING *] LIMIT 1) SELECT * FROM abc +---- +error (0A000): unimplemented: common table expression "cte" with side effects was not used in query diff --git a/pkg/sql/opt/optbuilder/union.go b/pkg/sql/opt/optbuilder/union.go index f9271734c3b1..9f117ad83537 100644 --- a/pkg/sql/opt/optbuilder/union.go +++ b/pkg/sql/opt/optbuilder/union.go @@ -29,9 +29,11 @@ import ( // // See Builder.buildStmt for a description of the remaining input and // return values. -func (b *Builder) buildUnion(clause *tree.UnionClause, inScope *scope) (outScope *scope) { - leftScope := b.buildSelect(clause.Left, inScope) - rightScope := b.buildSelect(clause.Right, inScope) +func (b *Builder) buildUnion( + clause *tree.UnionClause, desiredTypes []types.T, inScope *scope, +) (outScope *scope) { + leftScope := b.buildSelect(clause.Left, desiredTypes, inScope) + rightScope := b.buildSelect(clause.Right, desiredTypes, inScope) // Remove any hidden columns, as they are not included in the Union. leftScope.removeHiddenCols() diff --git a/pkg/sql/opt/optbuilder/util.go b/pkg/sql/opt/optbuilder/util.go index a1a0887c9842..49888f92ae6b 100644 --- a/pkg/sql/opt/optbuilder/util.go +++ b/pkg/sql/opt/optbuilder/util.go @@ -201,13 +201,11 @@ func (b *Builder) projectColumn(dst *scopeColumn, src *scopeColumn) { // addColumn adds a column to scope with the given label, type, and // expression. It returns a pointer to the new column. The column ID and group // are left empty so they can be filled in later. -func (b *Builder) addColumn( - scope *scope, label string, typ types.T, expr tree.TypedExpr, -) *scopeColumn { +func (b *Builder) addColumn(scope *scope, label string, expr tree.TypedExpr) *scopeColumn { name := tree.Name(label) scope.cols = append(scope.cols, scopeColumn{ name: name, - typ: typ, + typ: expr.ResolvedType(), expr: expr, }) return &scope.cols[len(scope.cols)-1] @@ -381,28 +379,39 @@ func (b *Builder) assertNoAggregationOrWindowing(expr tree.Expr, op string) { } } +// resolveTable returns the data source in the catalog with the given name. If +// the name does not resolve to a table, or if the current user does not have +// the given privilege, then resolveTable raises an error. +func (b *Builder) resolveTable(tn *tree.TableName, priv privilege.Kind) opt.Table { + tab, ok := b.resolveDataSource(tn, priv).(opt.Table) + if !ok { + panic(builderError{sqlbase.NewWrongObjectTypeError(tn, "table")}) + } + return tab +} + // resolveDataSource returns the data source in the catalog with the given name. // If the name does not resolve to a table, or if the current user does not have -// the right privileges, then resolveDataSource raises an error. -func (b *Builder) resolveDataSource(tn *tree.TableName) opt.DataSource { +// the given privilege, then resolveDataSource raises an error. +func (b *Builder) resolveDataSource(tn *tree.TableName, priv privilege.Kind) opt.DataSource { ds, err := b.catalog.ResolveDataSource(b.ctx, tn) if err != nil { panic(builderError{err}) } - b.checkPrivilege(ds) + b.checkPrivilege(ds, priv) return ds } // resolveDataSourceFromRef returns the data source in the catalog that matches // the given TableRef spec. If no data source matches, or if the current user -// does not have the right privileges, then resolveDataSourceFromRef raises an +// does not have the given privilege, then resolveDataSourceFromRef raises an // error. -func (b *Builder) resolveDataSourceRef(ref *tree.TableRef) opt.DataSource { +func (b *Builder) resolveDataSourceRef(ref *tree.TableRef, priv privilege.Kind) opt.DataSource { ds, err := b.catalog.ResolveDataSourceByID(b.ctx, ref.TableID) if err != nil { panic(builderError{errors.Wrapf(err, "%s", tree.ErrString(ref))}) } - b.checkPrivilege(ds) + b.checkPrivilege(ds, priv) return ds } @@ -410,17 +419,15 @@ func (b *Builder) resolveDataSourceRef(ref *tree.TableRef) opt.DataSource { // access the given data source in the catalog. If not, then checkPrivilege // raises an error. It also adds the data source as a dependency to the // metadata, so that the privileges can be re-checked on reuse of the memo. -// -// TODO(andyk): Add privilegeKind field to Builder when privileges other than -// SELECT are needed. -func (b *Builder) checkPrivilege(ds opt.DataSource) { - var priv privilege.Kind - if !b.skipSelectPrivilegeChecks { - priv = privilege.SELECT +func (b *Builder) checkPrivilege(ds opt.DataSource, priv privilege.Kind) { + if priv != privilege.SELECT || !b.skipSelectPrivilegeChecks { err := b.catalog.CheckPrivilege(b.ctx, ds, priv) if err != nil { panic(builderError{err}) } + } else { + // The check is skipped, so don't recheck when dependencies are checked. + priv = 0 } // Add dependency on this data source to the metadata, so that the metadata diff --git a/pkg/sql/opt/optbuilder/values.go b/pkg/sql/opt/optbuilder/values.go index 65e1dd41a188..88b1886abbf2 100644 --- a/pkg/sql/opt/optbuilder/values.go +++ b/pkg/sql/opt/optbuilder/values.go @@ -28,7 +28,9 @@ import ( // // See Builder.buildStmt for a description of the remaining input and // return values. -func (b *Builder) buildValuesClause(values *tree.ValuesClause, inScope *scope) (outScope *scope) { +func (b *Builder) buildValuesClause( + values *tree.ValuesClause, desiredTypes []types.T, inScope *scope, +) (outScope *scope) { var numCols int if len(values.Rows) > 0 { numCols = len(values.Rows[0]) @@ -51,15 +53,17 @@ func (b *Builder) buildValuesClause(values *tree.ValuesClause, inScope *scope) ( for _, tuple := range values.Rows { if numCols != len(tuple) { - panic(builderError{pgerror.NewErrorf( - pgerror.CodeSyntaxError, - "VALUES lists must all be the same length, expected %d columns, found %d", - numCols, len(tuple))}) + reportValuesLenError(numCols, len(tuple)) } elems := make(memo.ScalarListExpr, numCols) for i, expr := range tuple { - texpr := inScope.resolveType(expr, types.Any) + desired := types.Any + if i < len(desiredTypes) { + desired = desiredTypes[i] + } + + texpr := inScope.resolveType(expr, desired) typ := texpr.ResolvedType() elems[i] = b.buildScalar(texpr, inScope, nil, nil, nil) @@ -86,3 +90,10 @@ func (b *Builder) buildValuesClause(values *tree.ValuesClause, inScope *scope) ( outScope.expr = b.factory.ConstructValues(rows, colList) return outScope } + +func reportValuesLenError(expected, actual int) { + panic(builderError{pgerror.NewErrorf( + pgerror.CodeSyntaxError, + "VALUES lists must all be the same length, expected %d columns, found %d", + expected, actual)}) +} diff --git a/pkg/sql/opt/optgen/cmd/optgen/ops_gen.go b/pkg/sql/opt/optgen/cmd/optgen/ops_gen.go index 336f9470d920..c23d5ce3d587 100644 --- a/pkg/sql/opt/optgen/cmd/optgen/ops_gen.go +++ b/pkg/sql/opt/optgen/cmd/optgen/ops_gen.go @@ -40,6 +40,7 @@ func (g *opsGen) generate(compiled *lang.CompiledExpr, w io.Writer) { g.genOperatorEnum() g.genOperatorNames() + g.genOperatorSyntaxTags() g.genOperatorsByTag() } @@ -70,7 +71,24 @@ func (g *opsGen) genOperatorNames() { fmt.Fprintf(g.w, "const opNames = \"%s\"\n\n", names.String()) - fmt.Fprintf(g.w, "var opIndexes = [...]uint32{%s%d}\n\n", indexes.String(), names.Len()) + fmt.Fprintf(g.w, "var opNameIndexes = [...]uint32{%s%d}\n\n", indexes.String(), names.Len()) +} + +func (g *opsGen) genOperatorSyntaxTags() { + var names bytes.Buffer + var indexes bytes.Buffer + + fmt.Fprint(&names, "UNKNOWN") + fmt.Fprint(&indexes, "0, ") + + for _, define := range g.sorted { + fmt.Fprintf(&indexes, "%d, ", names.Len()) + fmt.Fprint(&names, syntaxCase(string(define.Name))) + } + + fmt.Fprintf(g.w, "const opSyntaxTags = \"%s\"\n\n", names.String()) + + fmt.Fprintf(g.w, "var opSyntaxTagIndexes = [...]uint32{%s%d}\n\n", indexes.String(), names.Len()) } func (g *opsGen) genOperatorsByTag() { @@ -119,7 +137,6 @@ func sortDefines(defines lang.DefineSetExpr) lang.DefineSetExpr { // InnerJoinApply => inner-join-apply func dashCase(s string) string { var buf bytes.Buffer - for i, ch := range s { if unicode.IsUpper(ch) { if i != 0 { @@ -131,6 +148,20 @@ func dashCase(s string) string { buf.WriteRune(ch) } } + return buf.String() +} +// syntaxCase converts camel-case identifiers into "syntax case", where +// uppercase letters in the middle of the identifier are interpreted as new +// words and separated by a space from the previous word. Example: +// InnerJoinApply => INNER JOIN APPLY +func syntaxCase(s string) string { + var buf bytes.Buffer + for i, ch := range s { + if unicode.IsUpper(ch) && i != 0 { + buf.WriteByte(' ') + } + buf.WriteRune(unicode.ToUpper(ch)) + } return buf.String() } diff --git a/pkg/sql/opt/optgen/cmd/optgen/rule_gen.go b/pkg/sql/opt/optgen/cmd/optgen/rule_gen.go index b21e3e74b62a..94651ab5a8ff 100644 --- a/pkg/sql/opt/optgen/cmd/optgen/rule_gen.go +++ b/pkg/sql/opt/optgen/cmd/optgen/rule_gen.go @@ -303,6 +303,7 @@ func (g *newRuleGen) genMatchList(match *lang.ListExpr, context string, noMatch return } + var item string switch { case isFirst && isLast: // Match single item. @@ -311,20 +312,19 @@ func (g *newRuleGen) genMatchList(match *lang.ListExpr, context string, noMatch panic("noMatch is not yet fully supported by the list match single op") } g.w.nestIndent("if len(%s) != 1 {\n", context) - } else { - g.w.nestIndent("if len(%s) == 1 {\n", context) - g.w.writeIndent("_item := %s\n", g.makeListItemRef(context+"[0]", listItemTyp)) - g.genMatch(matchItem, "_item", noMatch) + return } + g.w.nestIndent("if len(%s) == 1 {\n", context) + item = g.makeListItemRef(context+"[0]", listItemTyp) + case isFirst && !isLast: // Match first item in list. if noMatch { panic("noMatch is not yet supported by the list match first op") } g.w.nestIndent("if len(%s) > 0 {\n", context) - g.w.writeIndent("_item := %s\n", g.makeListItemRef(context+"[0]", listItemTyp)) - g.genMatch(matchItem, "_item", noMatch) + item = g.makeListItemRef(context+"[0]", listItemTyp) case !isFirst && isLast: // Match last item in list. @@ -332,9 +332,7 @@ func (g *newRuleGen) genMatchList(match *lang.ListExpr, context string, noMatch panic("noMatch is not yet supported by the list match last op") } g.w.nestIndent("if len(%s) > 0 {\n", context) - itemRef := g.makeListItemRef(fmt.Sprintf("%s[len(%s)-1]", context, context), listItemTyp) - g.w.writeIndent("_item := %s\n", itemRef) - g.genMatch(matchItem, "_item", noMatch) + item = g.makeListItemRef(fmt.Sprintf("%s[len(%s)-1]", context, context), listItemTyp) case !isFirst && !isLast: // Match any item in list. @@ -342,7 +340,21 @@ func (g *newRuleGen) genMatchList(match *lang.ListExpr, context string, noMatch panic("noMatch is not yet supported by the list match any op") } g.w.nestIndent("for i := range %s {\n", context) - g.w.writeIndent("_item := %s\n", g.makeListItemRef(context+"[i]", listItemTyp)) + item = g.makeListItemRef(context+"[i]", listItemTyp) + } + + // Store the expression in a variable, since it may be expensive to evaluate + // multiple times. If already binding the item, use that variable, else use + // a temporary _item variable. + switch matchItem.(type) { + case *lang.BindExpr: + g.genMatch(matchItem, item, noMatch) + + case *lang.AnyExpr: + // Don't need to bind item in case of matching [ * ], [ ... * ... ], etc. + + default: + g.w.writeIndent("_item := %s\n", item) g.genMatch(matchItem, "_item", noMatch) } } diff --git a/pkg/sql/opt/optgen/cmd/optgen/testdata/factory b/pkg/sql/opt/optgen/cmd/optgen/testdata/factory index 58946314f2c1..b9aeafc6a5e2 100644 --- a/pkg/sql/opt/optgen/cmd/optgen/testdata/factory +++ b/pkg/sql/opt/optgen/cmd/optgen/testdata/factory @@ -87,8 +87,7 @@ func (_f *Factory) ConstructSelect( right := input.Child(1).(memo.RelExpr) on := *input.Child(2).(*memo.FiltersExpr) for i := range filters { - _item := &filters[i] - item := _item + item := &filters[i] leftCols := _f.funcs.OutputCols(left) if _f.funcs.IsBoundBy(item, leftCols) { if _f.matchedRule == nil || _f.matchedRule(opt.PushSelectIntoJoinLeft) { diff --git a/pkg/sql/opt/optgen/cmd/optgen/testdata/ops b/pkg/sql/opt/optgen/cmd/optgen/testdata/ops index de0fca400ede..29c81712900d 100644 --- a/pkg/sql/opt/optgen/cmd/optgen/testdata/ops +++ b/pkg/sql/opt/optgen/cmd/optgen/testdata/ops @@ -52,7 +52,11 @@ const ( const opNames = "unknowncol-privateprojectprojectionsprojections-itemsort" -var opIndexes = [...]uint32{0, 7, 18, 25, 36, 52, 56} +var opNameIndexes = [...]uint32{0, 7, 18, 25, 36, 52, 56} + +const opSyntaxTags = "UNKNOWNCOL PRIVATEPROJECTPROJECTIONSPROJECTIONS ITEMSORT" + +var opSyntaxTagIndexes = [...]uint32{0, 7, 18, 25, 36, 52, 56} var RelationalOperators = [...]Operator{ ProjectOp, diff --git a/pkg/sql/opt/ordering/insert.go b/pkg/sql/opt/ordering/insert.go new file mode 100644 index 000000000000..7e4c2d864605 --- /dev/null +++ b/pkg/sql/opt/ordering/insert.go @@ -0,0 +1,51 @@ +// Copyright 2018 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ordering + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/opt" + "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" + "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" +) + +func insertCanProvideOrdering(expr memo.RelExpr, required *physical.OrderingChoice) bool { + // Insert requires a certain ordering of its input, but can also pass through + // a stronger ordering. For example: + // + // SELECT * FROM [INSERT INTO t1 SELECT * FROM t2 ORDER BY x] ORDER BY x,y + // + // In this case the internal ordering is x+, but we can pass through x+,y+ + // to satisfy both orderings. + return required.Intersects(&expr.(*memo.InsertExpr).Ordering) +} + +func insertBuildChildReqOrdering( + parent memo.RelExpr, required *physical.OrderingChoice, childIdx int, +) physical.OrderingChoice { + return required.Intersection(&parent.(*memo.InsertExpr).Ordering) +} + +func insertBuildProvided(expr memo.RelExpr, required *physical.OrderingChoice) opt.Ordering { + insert := expr.(*memo.InsertExpr) + provided := insert.Input.ProvidedPhysical().Ordering + inputFDs := &insert.Input.Relational().FuncDeps + + // Ensure that provided ordering only uses projected columns. + provided = remapProvided(provided, inputFDs, insert.Relational().OutputCols) + + // The child's provided ordering satisfies both and the Insert + // internal ordering; it may need to be trimmed. + return trimProvided(provided, required, &expr.Relational().FuncDeps) +} diff --git a/pkg/sql/opt/ordering/limit.go b/pkg/sql/opt/ordering/limit.go index d178ffef24bc..b9bb490044e6 100644 --- a/pkg/sql/opt/ordering/limit.go +++ b/pkg/sql/opt/ordering/limit.go @@ -23,7 +23,9 @@ import ( func limitOrOffsetCanProvideOrdering(expr memo.RelExpr, required *physical.OrderingChoice) bool { // Limit/Offset require a certain ordering of their input, but can also pass // through a stronger ordering. For example: + // // SELECT * FROM (SELECT x, y FROM t ORDER BY x LIMIT 10) ORDER BY x,y + // // In this case the internal ordering is x+, but we can pass through x+,y+ // to satisfy both orderings. return required.Intersects(expr.Private().(*physical.OrderingChoice)) diff --git a/pkg/sql/opt/ordering/ordering.go b/pkg/sql/opt/ordering/ordering.go index 03b6b04c4d75..a149916ad0f6 100644 --- a/pkg/sql/opt/ordering/ordering.go +++ b/pkg/sql/opt/ordering/ordering.go @@ -171,6 +171,11 @@ func init() { buildChildReqOrdering: noChildReqOrdering, buildProvidedOrdering: sortBuildProvided, } + funcMap[opt.InsertOp] = funcs{ + canProvideOrdering: insertCanProvideOrdering, + buildChildReqOrdering: insertBuildChildReqOrdering, + buildProvidedOrdering: insertBuildProvided, + } } func canNeverProvideOrdering(expr memo.RelExpr, required *physical.OrderingChoice) bool { diff --git a/pkg/sql/opt/props/logical.go b/pkg/sql/opt/props/logical.go index 8404c122117b..c389745c44f5 100644 --- a/pkg/sql/opt/props/logical.go +++ b/pkg/sql/opt/props/logical.go @@ -154,6 +154,11 @@ type Shared struct { // CanHaveSideEffects bool + // CanMutate is true if the subtree rooted at this expression contains at + // least one operator that modifies schema (like CreateTable) or writes or + // deletes rows (like Insert). + CanMutate bool + // HasPlaceholder is true if the subtree rooted at this expression contains // at least one Placeholder operator. HasPlaceholder bool diff --git a/pkg/sql/opt/testutils/testcat/create_table.go b/pkg/sql/opt/testutils/testcat/create_table.go index 37daec84407d..ec9b1e904c21 100644 --- a/pkg/sql/opt/testutils/testcat/create_table.go +++ b/pkg/sql/opt/testutils/testcat/create_table.go @@ -44,6 +44,8 @@ const ( nonKeyCol ) +var uniqueRowIDString = "unique_rowid()" + // CreateTable creates a test table from a parsed DDL statement and adds it to // the catalog. This is intended for testing, and is not a complete (and // probably not fully correct) implementation. It just has to be "good enough". @@ -61,7 +63,7 @@ func (tc *Catalog) CreateTable(stmt *tree.CreateTable) *Table { tab.IsVirtual = true } - // Add the columns. + // Add columns. for _, def := range stmt.Defs { switch def := def.(type) { case *tree.ColumnTableDef: @@ -87,7 +89,12 @@ func (tc *Catalog) CreateTable(stmt *tree.CreateTable) *Table { // If there is no primary index, add the hidden rowid column. if len(tab.Indexes) == 0 && !tab.IsVirtual { - rowid := &Column{Name: "rowid", Type: types.Int, Hidden: true} + rowid := &Column{ + Name: "rowid", + Type: types.Int, + Hidden: true, + DefaultExpr: &uniqueRowIDString, + } tab.Columns = append(tab.Columns, rowid) tab.addPrimaryColumnIndex(rowid.Name) } @@ -119,6 +126,7 @@ func (tc *Catalog) CreateTable(stmt *tree.CreateTable) *Table { // number derived from how CRDB internally stores tables. The first user table // is 53. This magic number is used to have tests look consistent. tab.tableID = sqlbase.ID(len(tc.dataSources) + 53) + // Add the new table to the catalog. tc.AddTable(tab) @@ -225,7 +233,23 @@ func (tt *Table) addColumn(def *tree.ColumnTableDef) { nullable := !def.PrimaryKey && def.Nullable.Nullability != tree.NotNull typ := coltypes.CastTargetToDatumType(def.Type) col := &Column{Name: string(def.Name), Type: typ, Nullable: nullable} - tt.Columns = append(tt.Columns, col) + + if def.DefaultExpr.Expr != nil { + s := tree.Serialize(def.DefaultExpr.Expr) + col.DefaultExpr = &s + } + + if def.Computed.Expr != nil { + s := tree.Serialize(def.Computed.Expr) + col.ComputedExpr = &s + } + + // Add mutation columns to the Mutations list. + if col.IsMutation() { + tt.Mutations = append(tt.Mutations, col) + } else { + tt.Columns = append(tt.Columns, col) + } } func (tt *Table) addIndex(def *tree.IndexTableDef, typ indexType) *Index { diff --git a/pkg/sql/opt/testutils/testcat/test_catalog.go b/pkg/sql/opt/testutils/testcat/test_catalog.go index 7a2c424e0be2..c4e564f809db 100644 --- a/pkg/sql/opt/testutils/testcat/test_catalog.go +++ b/pkg/sql/opt/testutils/testcat/test_catalog.go @@ -17,8 +17,10 @@ package testcat import ( "context" "fmt" + "strings" "time" + "github.com/cockroachdb/cockroach/pkg/sql/coltypes" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -313,6 +315,7 @@ type Table struct { Stats TableStats IsVirtual bool Catalog opt.Catalog + Mutations []*Column // If Revoked is true, then the user has had privileges on the table revoked. Revoked bool @@ -387,6 +390,16 @@ func (tt *Table) Statistic(i int) opt.TableStatistic { return tt.Stats[i] } +// MutationColumnCount is part of the opt.Table interface. +func (tt *Table) MutationColumnCount() int { + return len(tt.Mutations) +} + +// MutationColumn is part of the opt.Table interface. +func (tt *Table) MutationColumn(i int) opt.Column { + return tt.Mutations[i] +} + // FindOrdinal returns the ordinal of the column with the given name. func (tt *Table) FindOrdinal(name string) int { for i, col := range tt.Columns { @@ -477,10 +490,12 @@ func (ti *Index) ForeignKey() (opt.ForeignKeyReference, bool) { // Column implements the opt.Column interface for testing purposes. type Column struct { - Hidden bool - Nullable bool - Name string - Type types.T + Hidden bool + Nullable bool + Name string + Type types.T + DefaultExpr *string + ComputedExpr *string } var _ opt.Column = &Column{} @@ -500,11 +515,47 @@ func (tc *Column) DatumType() types.T { return tc.Type } +// ColTypeStr is part of the opt.Column interface. +func (tc *Column) ColTypeStr() string { + t, err := coltypes.DatumTypeToColumnType(tc.Type) + if err != nil { + panic(err) + } + return t.String() +} + // IsHidden is part of the opt.Column interface. func (tc *Column) IsHidden() bool { return tc.Hidden } +// HasDefault is part of the opt.Column interface. +func (tc *Column) HasDefault() bool { + return tc.DefaultExpr != nil +} + +// IsComputed is part of the opt.Column interface. +func (tc *Column) IsComputed() bool { + return tc.ComputedExpr != nil +} + +// DefaultExprStr is part of the opt.Column interface. +func (tc *Column) DefaultExprStr() string { + return *tc.DefaultExpr +} + +// ComputedExprStr is part of the opt.Column interface. +func (tc *Column) ComputedExprStr() string { + return *tc.ComputedExpr +} + +// IsMutation is true if the column should be treated as if it were recently +// added via a schema change, and is still being back-filled. Any INSERT ops +// must fill in its default value. +func (tc *Column) IsMutation() bool { + return strings.HasSuffix(tc.Name, ":mutation") +} + // TableStat implements the opt.TableStatistic interface for testing purposes. type TableStat struct { js stats.JSONStatistic diff --git a/pkg/sql/opt/xform/testdata/coster/perturb-cost b/pkg/sql/opt/xform/testdata/coster/perturb-cost index e9d5ab038a06..f65d018bb3d4 100644 --- a/pkg/sql/opt/xform/testdata/coster/perturb-cost +++ b/pkg/sql/opt/xform/testdata/coster/perturb-cost @@ -58,37 +58,23 @@ sort opt perturb-cost=(0.9) SELECT 1 ---- -project - ├── columns: "?column?":1(int!null) +values + ├── columns: "?column?":1(int) ├── cardinality: [1 - 1] ├── stats: [rows=1] - ├── cost: 0.05 + ├── cost: 0.02 ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── stats: [rows=1] - │ ├── cost: 0.02 - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: 1 [type=int] + └── (1,) [type=tuple{int}] opt perturb-cost=(2.5) SELECT 1 ---- -project - ├── columns: "?column?":1(int!null) +values + ├── columns: "?column?":1(int) ├── cardinality: [1 - 1] ├── stats: [rows=1] - ├── cost: 0.05 + ├── cost: 0.02 ├── key: () ├── fd: ()-->(1) - ├── values - │ ├── cardinality: [1 - 1] - │ ├── stats: [rows=1] - │ ├── cost: 0.02 - │ ├── key: () - │ └── tuple [type=tuple] - └── projections - └── const: 1 [type=int] + └── (1,) [type=tuple{int}] diff --git a/pkg/sql/opt/xform/testdata/external/nova b/pkg/sql/opt/xform/testdata/external/nova index 94127863c3ab..8b293a20aec9 100644 --- a/pkg/sql/opt/xform/testdata/external/nova +++ b/pkg/sql/opt/xform/testdata/external/nova @@ -276,23 +276,20 @@ left-join (merge) ├── columns: anon_1_flavors_created_at:14(timestamp) anon_1_flavors_updated_at:15(timestamp) anon_1_flavors_id:1(int!null) anon_1_flavors_name:2(string) anon_1_flavors_memory_mb:3(int) anon_1_flavors_vcpus:4(int) anon_1_flavors_root_gb:5(int) anon_1_flavors_ephemeral_gb:6(int) anon_1_flavors_flavorid:7(string) anon_1_flavors_swap:8(int) anon_1_flavors_rxtx_factor:9(float) anon_1_flavors_vcpu_weight:10(int) anon_1_flavors_disabled:11(bool) anon_1_flavors_is_public:12(bool) flavor_extra_specs_1_created_at:29(timestamp) flavor_extra_specs_1_updated_at:30(timestamp) flavor_extra_specs_1_id:25(int) flavor_extra_specs_1_key:26(string) flavor_extra_specs_1_value:27(string) flavor_extra_specs_1_flavor_id:28(int) ├── left ordering: +1 ├── right ordering: +28 - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,25) ├── fd: (1)-->(2-12,14,15), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15), (25)-->(26-30), (26,28)-->(25,27,29,30) ├── ordering: +1 ├── project │ ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-12,14,15), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15) │ ├── ordering: +1 │ └── limit │ ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) true_agg:23(bool) │ ├── internal-ordering: +1 - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-12,14,15,23), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15) │ ├── ordering: +1 @@ -456,15 +453,13 @@ order by anon_1.flavors_flavorid asc, anon_1.flavors_id asc ---- sort ├── columns: anon_1_flavors_created_at:14(timestamp) anon_1_flavors_updated_at:15(timestamp) anon_1_flavors_id:1(int!null) anon_1_flavors_name:2(string) anon_1_flavors_memory_mb:3(int) anon_1_flavors_vcpus:4(int) anon_1_flavors_root_gb:5(int) anon_1_flavors_ephemeral_gb:6(int) anon_1_flavors_flavorid:7(string) anon_1_flavors_swap:8(int) anon_1_flavors_rxtx_factor:9(float) anon_1_flavors_vcpu_weight:10(int) anon_1_flavors_disabled:11(bool) anon_1_flavors_is_public:12(bool) flavor_extra_specs_1_created_at:38(timestamp) flavor_extra_specs_1_updated_at:39(timestamp) flavor_extra_specs_1_id:34(int) flavor_extra_specs_1_key:35(string) flavor_extra_specs_1_value:36(string) flavor_extra_specs_1_flavor_id:37(int) - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,34) ├── fd: ()-->(11), (1)-->(2-10,12,14,15), (7)-->(1-6,8-10,12,14,15), (2)-->(1,3-10,12,14,15), (34)-->(35-39), (35,37)-->(34,36,38,39) ├── ordering: +7 opt(11) [provided: +7] └── right-join ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) flavor_extra_specs.id:34(int) key:35(string) value:36(string) flavor_extra_specs.flavor_id:37(int) flavor_extra_specs.created_at:38(timestamp) flavor_extra_specs.updated_at:39(timestamp) - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,34) ├── fd: ()-->(11), (1)-->(2-10,12,14,15), (7)-->(1-6,8-10,12,14,15), (2)-->(1,3-10,12,14,15), (34)-->(35-39), (35,37)-->(34,36,38,39) ├── scan flavor_extra_specs @@ -473,15 +468,13 @@ sort │ └── fd: (34)-->(35-39), (35,37)-->(34,36,38,39) ├── project │ ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: ()-->(11), (1)-->(2-10,12,14,15), (7)-->(1-6,8-10,12,14,15), (2)-->(1,3-10,12,14,15) │ └── limit │ ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) true_agg:32(bool) │ ├── internal-ordering: +7 opt(11) - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: ()-->(11), (1)-->(2-10,12,14,15,32), (7)-->(1-6,8-10,12,14,15), (2)-->(1,3-10,12,14,15) │ ├── offset @@ -721,15 +714,13 @@ order by anon_1.instance_types_flavorid asc, ---- sort ├── columns: anon_1_instance_types_created_at:15(timestamp) anon_1_instance_types_updated_at:16(timestamp) anon_1_instance_types_deleted_at:14(timestamp) anon_1_instance_types_deleted:13(bool) anon_1_instance_types_id:1(int!null) anon_1_instance_types_name:2(string) anon_1_instance_types_memory_mb:3(int) anon_1_instance_types_vcpus:4(int) anon_1_instance_types_root_gb:5(int) anon_1_instance_types_ephemeral_gb:6(int) anon_1_instance_types_flavorid:7(string) anon_1_instance_types_swap:8(int) anon_1_instance_types_rxtx_factor:9(float) anon_1_instance_types_vcpu_weight:10(int) anon_1_instance_types_disabled:11(bool) anon_1_instance_types_is_public:12(bool) instance_type_extra_specs_1_created_at:34(timestamp) instance_type_extra_specs_1_updated_at:35(timestamp) instance_type_extra_specs_1_deleted_at:33(timestamp) instance_type_extra_specs_1_deleted:32(bool) instance_type_extra_specs_1_id:28(int) instance_type_extra_specs_1_key:29(string) instance_type_extra_specs_1_value:30(string) instance_type_extra_specs_1_instance_type_id:31(int) - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,28) ├── fd: (1)-->(2-16), (7,13)~~>(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16), (28)-->(29-35), (29,31,32)~~>(28,30,33-35) ├── ordering: +7,+1 └── right-join ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) instance_type_extra_specs.id:28(int) key:29(string) value:30(string) instance_type_extra_specs.instance_type_id:31(int) instance_type_extra_specs.deleted:32(bool) instance_type_extra_specs.deleted_at:33(timestamp) instance_type_extra_specs.created_at:34(timestamp) instance_type_extra_specs.updated_at:35(timestamp) - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,28) ├── fd: (1)-->(2-16), (7,13)~~>(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16), (28)-->(29-35), (29,31,32)~~>(28,30,33-35) ├── select @@ -745,15 +736,13 @@ sort │ └── instance_type_extra_specs.deleted = $7 [type=bool, outer=(32), constraints=(/32: (/NULL - ])] ├── project │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-16), (7,13)~~>(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16) │ └── limit │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) true_agg:26(bool) │ ├── internal-ordering: +7,+1 - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-16,26), (7,13)~~>(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16) │ ├── offset @@ -1077,27 +1066,23 @@ from (select instance_types.created_at as instance_types_created_at, left-join (lookup instance_type_extra_specs) ├── columns: anon_1_instance_types_created_at:15(timestamp) anon_1_instance_types_updated_at:16(timestamp) anon_1_instance_types_deleted_at:14(timestamp) anon_1_instance_types_deleted:13(bool) anon_1_instance_types_id:1(int!null) anon_1_instance_types_name:2(string) anon_1_instance_types_memory_mb:3(int) anon_1_instance_types_vcpus:4(int) anon_1_instance_types_root_gb:5(int) anon_1_instance_types_ephemeral_gb:6(int) anon_1_instance_types_flavorid:7(string) anon_1_instance_types_swap:8(int) anon_1_instance_types_rxtx_factor:9(float) anon_1_instance_types_vcpu_weight:10(int) anon_1_instance_types_disabled:11(bool) anon_1_instance_types_is_public:12(bool) instance_type_extra_specs_1_created_at:34(timestamp) instance_type_extra_specs_1_updated_at:35(timestamp) instance_type_extra_specs_1_deleted_at:33(timestamp) instance_type_extra_specs_1_deleted:32(bool) instance_type_extra_specs_1_id:28(int) instance_type_extra_specs_1_key:29(string) instance_type_extra_specs_1_value:30(string) instance_type_extra_specs_1_instance_type_id:31(int) ├── key columns: [28] = [28] - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,28) ├── fd: (1)-->(2-16), (7,13)~~>(1-6,8-12,14-16), (2,13)-->(1,3-12,14-16), (28)-->(29-35), (29,31,32)~~>(28,30,33-35) ├── left-join (lookup instance_type_extra_specs@secondary) │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) instance_type_extra_specs.id:28(int) key:29(string) instance_type_extra_specs.instance_type_id:31(int) instance_type_extra_specs.deleted:32(bool) │ ├── key columns: [1] = [31] - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1,28) │ ├── fd: (1)-->(2-16), (7,13)~~>(1-6,8-12,14-16), (2,13)-->(1,3-12,14-16), (28)-->(29,31,32), (29,31,32)~~>(28) │ ├── project │ │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) - │ │ ├── side-effects - │ │ ├── has-placeholder + │ │ ├── side-effects, has-placeholder │ │ ├── key: (1) │ │ ├── fd: (1)-->(2-16), (7,13)~~>(1-6,8-12,14-16), (2,13)-->(1,3-12,14-16) │ │ └── limit │ │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) true_agg:26(bool) - │ │ ├── side-effects - │ │ ├── has-placeholder + │ │ ├── side-effects, has-placeholder │ │ ├── key: (1) │ │ ├── fd: (1)-->(2-16,26), (7,13)~~>(1-6,8-12,14-16), (2,13)-->(1,3-12,14-16) │ │ ├── offset @@ -1259,27 +1244,23 @@ from (select instance_types.created_at as instance_types_created_at, left-join (lookup instance_type_extra_specs) ├── columns: anon_1_instance_types_created_at:15(timestamp) anon_1_instance_types_updated_at:16(timestamp) anon_1_instance_types_deleted_at:14(timestamp) anon_1_instance_types_deleted:13(bool) anon_1_instance_types_id:1(int!null) anon_1_instance_types_name:2(string) anon_1_instance_types_memory_mb:3(int) anon_1_instance_types_vcpus:4(int) anon_1_instance_types_root_gb:5(int) anon_1_instance_types_ephemeral_gb:6(int) anon_1_instance_types_flavorid:7(string) anon_1_instance_types_swap:8(int) anon_1_instance_types_rxtx_factor:9(float) anon_1_instance_types_vcpu_weight:10(int) anon_1_instance_types_disabled:11(bool) anon_1_instance_types_is_public:12(bool) instance_type_extra_specs_1_created_at:34(timestamp) instance_type_extra_specs_1_updated_at:35(timestamp) instance_type_extra_specs_1_deleted_at:33(timestamp) instance_type_extra_specs_1_deleted:32(bool) instance_type_extra_specs_1_id:28(int) instance_type_extra_specs_1_key:29(string) instance_type_extra_specs_1_value:30(string) instance_type_extra_specs_1_instance_type_id:31(int) ├── key columns: [28] = [28] - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,28) ├── fd: (1)-->(2-16), (7,13)~~>(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16), (28)-->(29-35), (29,31,32)~~>(28,30,33-35) ├── left-join (lookup instance_type_extra_specs@secondary) │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) instance_type_extra_specs.id:28(int) key:29(string) instance_type_extra_specs.instance_type_id:31(int) instance_type_extra_specs.deleted:32(bool) │ ├── key columns: [1] = [31] - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1,28) │ ├── fd: (1)-->(2-16), (7,13)~~>(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16), (28)-->(29,31,32), (29,31,32)~~>(28) │ ├── project │ │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) - │ │ ├── side-effects - │ │ ├── has-placeholder + │ │ ├── side-effects, has-placeholder │ │ ├── key: (1) │ │ ├── fd: (1)-->(2-16), (7,13)~~>(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16) │ │ └── limit │ │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) true_agg:26(bool) - │ │ ├── side-effects - │ │ ├── has-placeholder + │ │ ├── side-effects, has-placeholder │ │ ├── key: (1) │ │ ├── fd: (1)-->(2-16,26), (7,13)~~>(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16) │ │ ├── offset @@ -1432,8 +1413,7 @@ from (select flavors.created_at as flavors_created_at, ---- right-join ├── columns: anon_1_flavors_created_at:14(timestamp) anon_1_flavors_updated_at:15(timestamp) anon_1_flavors_id:1(int!null) anon_1_flavors_name:2(string) anon_1_flavors_memory_mb:3(int) anon_1_flavors_vcpus:4(int) anon_1_flavors_root_gb:5(int) anon_1_flavors_ephemeral_gb:6(int) anon_1_flavors_flavorid:7(string) anon_1_flavors_swap:8(int) anon_1_flavors_rxtx_factor:9(float) anon_1_flavors_vcpu_weight:10(int) anon_1_flavors_disabled:11(bool) anon_1_flavors_is_public:12(bool) flavor_extra_specs_1_created_at:29(timestamp) flavor_extra_specs_1_updated_at:30(timestamp) flavor_extra_specs_1_id:25(int) flavor_extra_specs_1_key:26(string) flavor_extra_specs_1_value:27(string) flavor_extra_specs_1_flavor_id:28(int) - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,25) ├── fd: (1)-->(2-12,14,15), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15), (25)-->(26-30), (26,28)-->(25,27,29,30) ├── scan flavor_extra_specs @@ -1442,14 +1422,12 @@ right-join │ └── fd: (25)-->(26-30), (26,28)-->(25,27,29,30) ├── project │ ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-12,14,15), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15) │ └── limit │ ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) true_agg:23(bool) - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-12,14,15,23), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15) │ ├── offset @@ -1594,8 +1572,7 @@ from (select flavors.created_at as flavors_created_at, ---- right-join ├── columns: anon_1_flavors_created_at:14(timestamp) anon_1_flavors_updated_at:15(timestamp) anon_1_flavors_id:1(int!null) anon_1_flavors_name:2(string) anon_1_flavors_memory_mb:3(int) anon_1_flavors_vcpus:4(int) anon_1_flavors_root_gb:5(int) anon_1_flavors_ephemeral_gb:6(int) anon_1_flavors_flavorid:7(string) anon_1_flavors_swap:8(int) anon_1_flavors_rxtx_factor:9(float) anon_1_flavors_vcpu_weight:10(int) anon_1_flavors_disabled:11(bool) anon_1_flavors_is_public:12(bool) flavor_extra_specs_1_created_at:29(timestamp) flavor_extra_specs_1_updated_at:30(timestamp) flavor_extra_specs_1_id:25(int) flavor_extra_specs_1_key:26(string) flavor_extra_specs_1_value:27(string) flavor_extra_specs_1_flavor_id:28(int) - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,25) ├── fd: (1)-->(2-12,14,15), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15), (25)-->(26-30), (26,28)-->(25,27,29,30) ├── scan flavor_extra_specs @@ -1604,14 +1581,12 @@ right-join │ └── fd: (25)-->(26-30), (26,28)-->(25,27,29,30) ├── project │ ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-12,14,15), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15) │ └── limit │ ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) true_agg:23(bool) - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-12,14,15,23), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15) │ ├── offset @@ -1760,15 +1735,13 @@ order by anon_1.flavors_flavorid asc, anon_1.flavors_id asc ---- sort ├── columns: anon_1_flavors_created_at:14(timestamp) anon_1_flavors_updated_at:15(timestamp) anon_1_flavors_id:1(int!null) anon_1_flavors_name:2(string) anon_1_flavors_memory_mb:3(int) anon_1_flavors_vcpus:4(int) anon_1_flavors_root_gb:5(int) anon_1_flavors_ephemeral_gb:6(int) anon_1_flavors_flavorid:7(string) anon_1_flavors_swap:8(int) anon_1_flavors_rxtx_factor:9(float) anon_1_flavors_vcpu_weight:10(int) anon_1_flavors_disabled:11(bool) anon_1_flavors_is_public:12(bool) flavor_extra_specs_1_created_at:29(timestamp) flavor_extra_specs_1_updated_at:30(timestamp) flavor_extra_specs_1_id:25(int) flavor_extra_specs_1_key:26(string) flavor_extra_specs_1_value:27(string) flavor_extra_specs_1_flavor_id:28(int) - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,25) ├── fd: (1)-->(2-12,14,15), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15), (25)-->(26-30), (26,28)-->(25,27,29,30) ├── ordering: +7 └── right-join ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) flavor_extra_specs.id:25(int) key:26(string) value:27(string) flavor_extra_specs.flavor_id:28(int) flavor_extra_specs.created_at:29(timestamp) flavor_extra_specs.updated_at:30(timestamp) - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,25) ├── fd: (1)-->(2-12,14,15), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15), (25)-->(26-30), (26,28)-->(25,27,29,30) ├── scan flavor_extra_specs @@ -1777,15 +1750,13 @@ sort │ └── fd: (25)-->(26-30), (26,28)-->(25,27,29,30) ├── project │ ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-12,14,15), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15) │ └── limit │ ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) true_agg:23(bool) │ ├── internal-ordering: +7 - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-12,14,15,23), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15) │ ├── offset @@ -1949,15 +1920,13 @@ order by anon_1.instance_types_flavorid asc, ---- sort ├── columns: anon_1_instance_types_created_at:15(timestamp) anon_1_instance_types_updated_at:16(timestamp) anon_1_instance_types_deleted_at:14(timestamp) anon_1_instance_types_deleted:13(bool) anon_1_instance_types_id:1(int!null) anon_1_instance_types_name:2(string) anon_1_instance_types_memory_mb:3(int) anon_1_instance_types_vcpus:4(int) anon_1_instance_types_root_gb:5(int) anon_1_instance_types_ephemeral_gb:6(int) anon_1_instance_types_flavorid:7(string) anon_1_instance_types_swap:8(int) anon_1_instance_types_rxtx_factor:9(float) anon_1_instance_types_vcpu_weight:10(int) anon_1_instance_types_disabled:11(bool) anon_1_instance_types_is_public:12(bool) instance_type_extra_specs_1_created_at:34(timestamp) instance_type_extra_specs_1_updated_at:35(timestamp) instance_type_extra_specs_1_deleted_at:33(timestamp) instance_type_extra_specs_1_deleted:32(bool) instance_type_extra_specs_1_id:28(int) instance_type_extra_specs_1_key:29(string) instance_type_extra_specs_1_value:30(string) instance_type_extra_specs_1_instance_type_id:31(int) - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,28) ├── fd: (1)-->(2-16), (7,13)~~>(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16), (28)-->(29-35), (29,31,32)~~>(28,30,33-35) ├── ordering: +7,+1 └── right-join ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) instance_type_extra_specs.id:28(int) key:29(string) value:30(string) instance_type_extra_specs.instance_type_id:31(int) instance_type_extra_specs.deleted:32(bool) instance_type_extra_specs.deleted_at:33(timestamp) instance_type_extra_specs.created_at:34(timestamp) instance_type_extra_specs.updated_at:35(timestamp) - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,28) ├── fd: (1)-->(2-16), (7,13)~~>(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16), (28)-->(29-35), (29,31,32)~~>(28,30,33-35) ├── select @@ -1973,15 +1942,13 @@ sort │ └── instance_type_extra_specs.deleted = $6 [type=bool, outer=(32), constraints=(/32: (/NULL - ])] ├── project │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-16), (7,13)~~>(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16) │ └── limit │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) true_agg:26(bool) │ ├── internal-ordering: +7,+1 - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-16,26), (7,13)~~>(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16) │ ├── offset @@ -2149,27 +2116,23 @@ from (select instance_types.created_at as instance_types_created_at, left-join (lookup instance_type_extra_specs) ├── columns: anon_1_instance_types_created_at:15(timestamp) anon_1_instance_types_updated_at:16(timestamp) anon_1_instance_types_deleted_at:14(timestamp) anon_1_instance_types_deleted:13(bool) anon_1_instance_types_id:1(int!null) anon_1_instance_types_name:2(string) anon_1_instance_types_memory_mb:3(int) anon_1_instance_types_vcpus:4(int) anon_1_instance_types_root_gb:5(int) anon_1_instance_types_ephemeral_gb:6(int) anon_1_instance_types_flavorid:7(string) anon_1_instance_types_swap:8(int) anon_1_instance_types_rxtx_factor:9(float) anon_1_instance_types_vcpu_weight:10(int) anon_1_instance_types_disabled:11(bool) anon_1_instance_types_is_public:12(bool) instance_type_extra_specs_1_created_at:34(timestamp) instance_type_extra_specs_1_updated_at:35(timestamp) instance_type_extra_specs_1_deleted_at:33(timestamp) instance_type_extra_specs_1_deleted:32(bool) instance_type_extra_specs_1_id:28(int) instance_type_extra_specs_1_key:29(string) instance_type_extra_specs_1_value:30(string) instance_type_extra_specs_1_instance_type_id:31(int) ├── key columns: [28] = [28] - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,28) ├── fd: (1)-->(2-16), (7,13)-->(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16), (28)-->(29-35), (29,31,32)~~>(28,30,33-35) ├── left-join (lookup instance_type_extra_specs@secondary) │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) instance_type_extra_specs.id:28(int) key:29(string) instance_type_extra_specs.instance_type_id:31(int) instance_type_extra_specs.deleted:32(bool) │ ├── key columns: [1] = [31] - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1,28) │ ├── fd: (1)-->(2-16), (7,13)-->(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16), (28)-->(29,31,32), (29,31,32)~~>(28) │ ├── project │ │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) - │ │ ├── side-effects - │ │ ├── has-placeholder + │ │ ├── side-effects, has-placeholder │ │ ├── key: (1) │ │ ├── fd: (1)-->(2-16), (7,13)-->(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16) │ │ └── limit │ │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) true_agg:26(bool) - │ │ ├── side-effects - │ │ ├── has-placeholder + │ │ ├── side-effects, has-placeholder │ │ ├── key: (1) │ │ ├── fd: (1)-->(2-16,26), (7,13)-->(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16) │ │ ├── offset @@ -2464,31 +2427,27 @@ order by anon_1.instance_types_flavorid asc, left-join (lookup instance_type_extra_specs) ├── columns: anon_1_instance_types_created_at:15(timestamp) anon_1_instance_types_updated_at:16(timestamp) anon_1_instance_types_deleted_at:14(timestamp) anon_1_instance_types_deleted:13(bool) anon_1_instance_types_id:1(int!null) anon_1_instance_types_name:2(string) anon_1_instance_types_memory_mb:3(int) anon_1_instance_types_vcpus:4(int) anon_1_instance_types_root_gb:5(int) anon_1_instance_types_ephemeral_gb:6(int) anon_1_instance_types_flavorid:7(string) anon_1_instance_types_swap:8(int) anon_1_instance_types_rxtx_factor:9(float) anon_1_instance_types_vcpu_weight:10(int) anon_1_instance_types_disabled:11(bool) anon_1_instance_types_is_public:12(bool) instance_type_extra_specs_1_created_at:34(timestamp) instance_type_extra_specs_1_updated_at:35(timestamp) instance_type_extra_specs_1_deleted_at:33(timestamp) instance_type_extra_specs_1_deleted:32(bool) instance_type_extra_specs_1_id:28(int) instance_type_extra_specs_1_key:29(string) instance_type_extra_specs_1_value:30(string) instance_type_extra_specs_1_instance_type_id:31(int) ├── key columns: [28] = [28] - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,28) ├── fd: (1)-->(2-16), (7,13)~~>(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16), (28)-->(29-35), (29,31,32)~~>(28,30,33-35) ├── ordering: +7,+1 ├── left-join (lookup instance_type_extra_specs@secondary) │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) instance_type_extra_specs.id:28(int) key:29(string) instance_type_extra_specs.instance_type_id:31(int) instance_type_extra_specs.deleted:32(bool) │ ├── key columns: [1] = [31] - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1,28) │ ├── fd: (1)-->(2-16), (7,13)~~>(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16), (28)-->(29,31,32), (29,31,32)~~>(28) │ ├── ordering: +7,+1 │ ├── project │ │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) - │ │ ├── side-effects - │ │ ├── has-placeholder + │ │ ├── side-effects, has-placeholder │ │ ├── key: (1) │ │ ├── fd: (1)-->(2-16), (7,13)~~>(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16) │ │ ├── ordering: +7,+1 │ │ └── limit │ │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) true_agg:26(bool) │ │ ├── internal-ordering: +7,+1 - │ │ ├── side-effects - │ │ ├── has-placeholder + │ │ ├── side-effects, has-placeholder │ │ ├── key: (1) │ │ ├── fd: (1)-->(2-16,26), (7,13)~~>(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16) │ │ ├── ordering: +7,+1 @@ -2650,15 +2609,13 @@ order by anon_1.flavors_flavorid asc, anon_1.flavors_id asc ---- sort ├── columns: anon_1_flavors_created_at:14(timestamp) anon_1_flavors_updated_at:15(timestamp) anon_1_flavors_id:1(int!null) anon_1_flavors_name:2(string) anon_1_flavors_memory_mb:3(int) anon_1_flavors_vcpus:4(int) anon_1_flavors_root_gb:5(int) anon_1_flavors_ephemeral_gb:6(int) anon_1_flavors_flavorid:7(string) anon_1_flavors_swap:8(int) anon_1_flavors_rxtx_factor:9(float) anon_1_flavors_vcpu_weight:10(int) anon_1_flavors_disabled:11(bool) anon_1_flavors_is_public:12(bool) flavor_extra_specs_1_created_at:29(timestamp) flavor_extra_specs_1_updated_at:30(timestamp) flavor_extra_specs_1_id:25(int) flavor_extra_specs_1_key:26(string) flavor_extra_specs_1_value:27(string) flavor_extra_specs_1_flavor_id:28(int) - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,25) ├── fd: (1)-->(2-12,14,15), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15), (25)-->(26-30), (26,28)-->(25,27,29,30) ├── ordering: +7 └── right-join ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) flavor_extra_specs.id:25(int) key:26(string) value:27(string) flavor_extra_specs.flavor_id:28(int) flavor_extra_specs.created_at:29(timestamp) flavor_extra_specs.updated_at:30(timestamp) - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,25) ├── fd: (1)-->(2-12,14,15), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15), (25)-->(26-30), (26,28)-->(25,27,29,30) ├── scan flavor_extra_specs @@ -2667,15 +2624,13 @@ sort │ └── fd: (25)-->(26-30), (26,28)-->(25,27,29,30) ├── project │ ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-12,14,15), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15) │ └── limit │ ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) true_agg:23(bool) │ ├── internal-ordering: +7 - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-12,14,15,23), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15) │ ├── offset @@ -2840,31 +2795,27 @@ order by anon_1.instance_types_flavorid asc, left-join (lookup instance_type_extra_specs) ├── columns: anon_1_instance_types_created_at:15(timestamp) anon_1_instance_types_updated_at:16(timestamp) anon_1_instance_types_deleted_at:14(timestamp) anon_1_instance_types_deleted:13(bool) anon_1_instance_types_id:1(int!null) anon_1_instance_types_name:2(string) anon_1_instance_types_memory_mb:3(int) anon_1_instance_types_vcpus:4(int) anon_1_instance_types_root_gb:5(int) anon_1_instance_types_ephemeral_gb:6(int) anon_1_instance_types_flavorid:7(string) anon_1_instance_types_swap:8(int) anon_1_instance_types_rxtx_factor:9(float) anon_1_instance_types_vcpu_weight:10(int) anon_1_instance_types_disabled:11(bool) anon_1_instance_types_is_public:12(bool) instance_type_extra_specs_1_created_at:45(timestamp) instance_type_extra_specs_1_updated_at:46(timestamp) instance_type_extra_specs_1_deleted_at:44(timestamp) instance_type_extra_specs_1_deleted:43(bool) instance_type_extra_specs_1_id:39(int) instance_type_extra_specs_1_key:40(string) instance_type_extra_specs_1_value:41(string) instance_type_extra_specs_1_instance_type_id:42(int) ├── key columns: [39] = [39] - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,39) ├── fd: ()-->(11), (1)-->(2-10,12-16), (7,13)~~>(1-6,8-10,12,14-16), (2,13)~~>(1,3-10,12,14-16), (39)-->(40-46), (40,42,43)~~>(39,41,44-46) ├── ordering: +7,+1 opt(11) [provided: +7,+1] ├── left-join (lookup instance_type_extra_specs@secondary) │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) instance_type_extra_specs.id:39(int) key:40(string) instance_type_extra_specs.instance_type_id:42(int) instance_type_extra_specs.deleted:43(bool) │ ├── key columns: [1] = [42] - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1,39) │ ├── fd: ()-->(11), (1)-->(2-10,12-16), (7,13)~~>(1-6,8-10,12,14-16), (2,13)~~>(1,3-10,12,14-16), (39)-->(40,42,43), (40,42,43)~~>(39) │ ├── ordering: +7,+1 opt(11) [provided: +7,+1] │ ├── project │ │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) - │ │ ├── side-effects - │ │ ├── has-placeholder + │ │ ├── side-effects, has-placeholder │ │ ├── key: (1) │ │ ├── fd: ()-->(11), (1)-->(2-10,12-16), (7,13)~~>(1-6,8-10,12,14-16), (2,13)~~>(1,3-10,12,14-16) │ │ ├── ordering: +7,+1 opt(11) [provided: +7,+1] │ │ └── limit │ │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) true_agg:37(bool) │ │ ├── internal-ordering: +7,+1 opt(11) - │ │ ├── side-effects - │ │ ├── has-placeholder + │ │ ├── side-effects, has-placeholder │ │ ├── key: (1) │ │ ├── fd: ()-->(11), (1)-->(2-10,12-16,37), (7,13)~~>(1-6,8-10,12,14-16), (2,13)~~>(1,3-10,12,14-16) │ │ ├── ordering: +7,+1 opt(11) [provided: +7,+1] @@ -3120,31 +3071,27 @@ order by anon_1.instance_types_deleted asc, left-join (lookup instance_type_extra_specs) ├── columns: anon_1_instance_types_created_at:15(timestamp) anon_1_instance_types_updated_at:16(timestamp) anon_1_instance_types_deleted_at:14(timestamp) anon_1_instance_types_deleted:13(bool) anon_1_instance_types_id:1(int!null) anon_1_instance_types_name:2(string) anon_1_instance_types_memory_mb:3(int) anon_1_instance_types_vcpus:4(int) anon_1_instance_types_root_gb:5(int) anon_1_instance_types_ephemeral_gb:6(int) anon_1_instance_types_flavorid:7(string) anon_1_instance_types_swap:8(int) anon_1_instance_types_rxtx_factor:9(float) anon_1_instance_types_vcpu_weight:10(int) anon_1_instance_types_disabled:11(bool) anon_1_instance_types_is_public:12(bool) instance_type_extra_specs_1_created_at:34(timestamp) instance_type_extra_specs_1_updated_at:35(timestamp) instance_type_extra_specs_1_deleted_at:33(timestamp) instance_type_extra_specs_1_deleted:32(bool) instance_type_extra_specs_1_id:28(int) instance_type_extra_specs_1_key:29(string) instance_type_extra_specs_1_value:30(string) instance_type_extra_specs_1_instance_type_id:31(int) ├── key columns: [28] = [28] - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,28) ├── fd: (1)-->(2-16), (7,13)-->(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16), (28)-->(29-35), (29,31,32)~~>(28,30,33-35) ├── ordering: +13,+1 ├── left-join (lookup instance_type_extra_specs@secondary) │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) instance_type_extra_specs.id:28(int) key:29(string) instance_type_extra_specs.instance_type_id:31(int) instance_type_extra_specs.deleted:32(bool) │ ├── key columns: [1] = [31] - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1,28) │ ├── fd: (1)-->(2-16), (7,13)-->(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16), (28)-->(29,31,32), (29,31,32)~~>(28) │ ├── ordering: +13,+1 │ ├── project │ │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) - │ │ ├── side-effects - │ │ ├── has-placeholder + │ │ ├── side-effects, has-placeholder │ │ ├── key: (1) │ │ ├── fd: (1)-->(2-16), (7,13)-->(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16) │ │ ├── ordering: +13,+1 │ │ └── limit │ │ ├── columns: instance_types.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) instance_types.deleted:13(bool) instance_types.deleted_at:14(timestamp) instance_types.created_at:15(timestamp) instance_types.updated_at:16(timestamp) true_agg:26(bool) │ │ ├── internal-ordering: +13,+1 - │ │ ├── side-effects - │ │ ├── has-placeholder + │ │ ├── side-effects, has-placeholder │ │ ├── key: (1) │ │ ├── fd: (1)-->(2-16,26), (7,13)-->(1-6,8-12,14-16), (2,13)~~>(1,3-12,14-16) │ │ ├── ordering: +13,+1 @@ -3305,8 +3252,7 @@ from (select flavors.created_at as flavors_created_at, ---- right-join ├── columns: anon_1_flavors_created_at:14(timestamp) anon_1_flavors_updated_at:15(timestamp) anon_1_flavors_id:1(int!null) anon_1_flavors_name:2(string) anon_1_flavors_memory_mb:3(int) anon_1_flavors_vcpus:4(int) anon_1_flavors_root_gb:5(int) anon_1_flavors_ephemeral_gb:6(int) anon_1_flavors_flavorid:7(string) anon_1_flavors_swap:8(int) anon_1_flavors_rxtx_factor:9(float) anon_1_flavors_vcpu_weight:10(int) anon_1_flavors_disabled:11(bool) anon_1_flavors_is_public:12(bool) flavor_extra_specs_1_created_at:29(timestamp) flavor_extra_specs_1_updated_at:30(timestamp) flavor_extra_specs_1_id:25(int) flavor_extra_specs_1_key:26(string) flavor_extra_specs_1_value:27(string) flavor_extra_specs_1_flavor_id:28(int) - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,25) ├── fd: (1)-->(2-12,14,15), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15), (25)-->(26-30), (26,28)-->(25,27,29,30) ├── scan flavor_extra_specs @@ -3315,14 +3261,12 @@ right-join │ └── fd: (25)-->(26-30), (26,28)-->(25,27,29,30) ├── project │ ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-12,14,15), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15) │ └── limit │ ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) true_agg:23(bool) - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-12,14,15,23), (7)-->(1-6,8-12,14,15), (2)-->(1,3-12,14,15) │ ├── offset @@ -3470,15 +3414,13 @@ order by anon_1.flavors_flavorid asc, anon_1.flavors_id asc ---- sort ├── columns: anon_1_flavors_created_at:14(timestamp) anon_1_flavors_updated_at:15(timestamp) anon_1_flavors_id:1(int!null) anon_1_flavors_name:2(string) anon_1_flavors_memory_mb:3(int) anon_1_flavors_vcpus:4(int) anon_1_flavors_root_gb:5(int) anon_1_flavors_ephemeral_gb:6(int) anon_1_flavors_flavorid:7(string) anon_1_flavors_swap:8(int) anon_1_flavors_rxtx_factor:9(float) anon_1_flavors_vcpu_weight:10(int) anon_1_flavors_disabled:11(bool) anon_1_flavors_is_public:12(bool) anon_1_flavors_description:13(string) flavor_extra_specs_1_created_at:29(timestamp) flavor_extra_specs_1_updated_at:30(timestamp) flavor_extra_specs_1_id:25(int) flavor_extra_specs_1_key:26(string) flavor_extra_specs_1_value:27(string) flavor_extra_specs_1_flavor_id:28(int) - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,25) ├── fd: (1)-->(2-15), (7)-->(1-6,8-15), (2)-->(1,3-15), (25)-->(26-30), (26,28)-->(25,27,29,30) ├── ordering: +7 └── right-join ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) description:13(string) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) flavor_extra_specs.id:25(int) key:26(string) value:27(string) flavor_extra_specs.flavor_id:28(int) flavor_extra_specs.created_at:29(timestamp) flavor_extra_specs.updated_at:30(timestamp) - ├── side-effects - ├── has-placeholder + ├── side-effects, has-placeholder ├── key: (1,25) ├── fd: (1)-->(2-15), (7)-->(1-6,8-15), (2)-->(1,3-15), (25)-->(26-30), (26,28)-->(25,27,29,30) ├── scan flavor_extra_specs @@ -3487,15 +3429,13 @@ sort │ └── fd: (25)-->(26-30), (26,28)-->(25,27,29,30) ├── project │ ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) description:13(string) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-15), (7)-->(1-6,8-15), (2)-->(1,3-15) │ └── limit │ ├── columns: flavors.id:1(int!null) name:2(string) memory_mb:3(int) vcpus:4(int) root_gb:5(int) ephemeral_gb:6(int) flavorid:7(string) swap:8(int) rxtx_factor:9(float) vcpu_weight:10(int) disabled:11(bool) is_public:12(bool) description:13(string) flavors.created_at:14(timestamp) flavors.updated_at:15(timestamp) true_agg:23(bool) │ ├── internal-ordering: +7 - │ ├── side-effects - │ ├── has-placeholder + │ ├── side-effects, has-placeholder │ ├── key: (1) │ ├── fd: (1)-->(2-15,23), (7)-->(1-6,8-15), (2)-->(1,3-15) │ ├── sort diff --git a/pkg/sql/opt/xform/testdata/physprops/ordering b/pkg/sql/opt/xform/testdata/physprops/ordering index a86a28729ac9..69ba5fdbe6f4 100644 --- a/pkg/sql/opt/xform/testdata/physprops/ordering +++ b/pkg/sql/opt/xform/testdata/physprops/ordering @@ -1185,3 +1185,75 @@ sort │ └── variable: b [type=int] └── first-agg [type=int] └── variable: c [type=int] + +# -------------------------------------------------- +# Insert operator. +# -------------------------------------------------- + +# Verify that the internal ordering is required of the input. +opt +INSERT INTO a SELECT a, b::float, c::decimal, 'foo' FROM abc ORDER BY b +---- +insert a + ├── columns: + ├── table columns: x:1(int) y:2(float) z:3(decimal) s:4(string) + ├── input columns: a:5(int) b:8(float) c:9(decimal) "?column?":10(string) + ├── internal-ordering: +8 opt(10) + └── sort + ├── columns: a:5(int!null) b:8(float) c:9(decimal) "?column?":10(string!null) + ├── ordering: +8 opt(10) + └── project + ├── columns: b:8(float) c:9(decimal) "?column?":10(string!null) a:5(int!null) + ├── scan abc + │ └── columns: a:5(int!null) abc.b:6(int!null) abc.c:7(int!null) + └── projections + ├── abc.b::FLOAT8 [type=float] + ├── abc.c::DECIMAL [type=decimal] + └── const: 'foo' [type=string] + +# Verify that the external and internal orderings are intersected. +opt +SELECT * FROM [INSERT INTO abc SELECT * FROM xyz ORDER BY y, z RETURNING *] ORDER BY b +---- +insert abc + ├── columns: a:4(int!null) b:5(int!null) c:6(int!null) + ├── table columns: a:1(int) b:2(int) c:3(int) + ├── input columns: x:4(int!null) y:5(int!null) z:6(int!null) + ├── internal-ordering: +5,+6 + ├── ordering: +5 + └── sort + ├── columns: x:4(int!null) y:5(int!null) z:6(int!null) + ├── ordering: +5,+6 + └── scan xyz + └── columns: x:4(int!null) y:5(int!null) z:6(int!null) + +# Verify that the external and internal orderings are intersected and simplified +# according to FDs. +opt format=(hide-qual,hide-cost,hide-stats,hide-constraints,hide-scalars) +SELECT * FROM [INSERT INTO abc SELECT * FROM xyz WHERE y=z ORDER BY z RETURNING *] ORDER BY b, c +---- +insert abc + ├── columns: a:4(int!null) b:5(int!null) c:6(int!null) + ├── table columns: a:1(int) b:2(int) c:3(int) + ├── input columns: x:4(int!null) y:5(int!null) z:6(int!null) + ├── internal-ordering: +(5|6) + ├── side-effects, mutations + ├── key: (4,6) + ├── fd: (5)==(6), (6)==(5) + ├── ordering: +(5|6) [provided: +5] + └── sort + ├── columns: x:4(int!null) y:5(int!null) z:6(int!null) + ├── key: (4,6) + ├── fd: (5)==(6), (6)==(5) + ├── ordering: +(5|6) [provided: +5] + └── select + ├── columns: x:4(int!null) y:5(int!null) z:6(int!null) + ├── key: (4,6) + ├── fd: (5)==(6), (6)==(5) + ├── scan xyz + │ ├── columns: x:4(int!null) y:5(int!null) z:6(int!null) + │ ├── key: (4-6) + │ ├── prune: (4-6) + │ └── interesting orderings: (+4,+5,+6) + └── filters + └── y = z [type=bool, outer=(5,6), fd=(5)==(6), (6)==(5)] diff --git a/pkg/sql/opt_catalog.go b/pkg/sql/opt_catalog.go index 9b8b6dc04dc8..baa22cb47f75 100644 --- a/pkg/sql/opt_catalog.go +++ b/pkg/sql/opt_catalog.go @@ -249,6 +249,11 @@ type optTable struct { // wrappers is a cache of index wrappers that's used to satisfy repeated // calls to the SecondaryIndex method for the same index. wrappers map[*sqlbase.IndexDescriptor]*optIndex + + // mutations is a list of the DELETE_AND_WRITE_ONLY mutation column + // descriptors. These are present when the table is undergoing an online + // schema change where one or more columns are being added or dropped. + mutations []*sqlbase.ColumnDescriptor } var _ opt.Table = &optTable{} @@ -349,6 +354,33 @@ func (ot *optTable) Statistic(i int) opt.TableStatistic { return &ot.stats[i] } +// MutationColumnCount is part of the opt.Table interface. +func (ot *optTable) MutationColumnCount() int { + ot.ensureMutations() + return len(ot.mutations) +} + +// MutationColumn is part of the opt.Table interface. +func (ot *optTable) MutationColumn(i int) opt.Column { + return ot.mutations[i] +} + +// ensureMutations adds any DELETE_AND_WRITE_ONLY column mutations to the table +// wrapper's list of mutations, to be returned by the MutationColumn method. +func (ot *optTable) ensureMutations() { + if ot.mutations == nil && len(ot.desc.Mutations) != 0 { + ot.mutations = make([]*sqlbase.ColumnDescriptor, 0, len(ot.desc.Mutations)) + for i := range ot.desc.Mutations { + m := &ot.desc.Mutations[i] + if m.State == sqlbase.DescriptorMutation_DELETE_AND_WRITE_ONLY { + if c := m.GetColumn(); c != nil { + ot.mutations = append(ot.mutations, c) + } + } + } + } +} + func (ot *optTable) ensureColMap() { if ot.colMap == nil { ot.colMap = make(map[sqlbase.ColumnID]int, len(ot.desc.Columns)) diff --git a/pkg/sql/opt_exec_factory.go b/pkg/sql/opt_exec_factory.go index 639336136e22..6bc45e9a45b2 100644 --- a/pkg/sql/opt_exec_factory.go +++ b/pkg/sql/opt_exec_factory.go @@ -26,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/constraint" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/builtins" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sem/types" @@ -160,6 +161,13 @@ func (ef *execFactory) ConstructFilter( f.ivarHelper = tree.MakeIndexedVarHelper(f, len(src.info.SourceColumns)) f.filter = f.ivarHelper.Rebind(filter, true /* alsoReset */, false /* normalizeToNonNil */) f.props.ordering = sqlbase.ColumnOrdering(reqOrdering) + + // If there's a spool, pull it up. + if spool, ok := f.source.plan.(*spoolNode); ok { + f.source.plan = spool.source + spool.source = f + return spool, nil + } return f, nil } @@ -190,26 +198,18 @@ func (ef *execFactory) ConstructSimpleProject( // We will need the names of the input columns. inputCols = planColumns(n.(planNode)) } - src := asDataSource(n) - r := &renderNode{ - source: src, - sourceInfo: sqlbase.MultiSourceInfo{src.info}, - render: make([]tree.TypedExpr, len(cols)), - columns: make([]sqlbase.ResultColumn, len(cols)), - } - r.ivarHelper = tree.MakeIndexedVarHelper(r, len(src.info.SourceColumns)) + + var rb renderBuilder + rb.init(n, reqOrdering, len(cols)) for i, col := range cols { - v := r.ivarHelper.IndexedVar(int(col)) - r.render[i] = v + v := rb.r.ivarHelper.IndexedVar(int(col)) if colNames == nil { - r.columns[i].Name = inputCols[col].Name + rb.addExpr(v, inputCols[col].Name) } else { - r.columns[i].Name = colNames[i] + rb.addExpr(v, colNames[i]) } - r.columns[i].Typ = v.ResolvedType() } - r.props.ordering = sqlbase.ColumnOrdering(reqOrdering) - return r, nil + return rb.res, nil } func hasDuplicates(cols []exec.ColumnOrdinal) bool { @@ -227,21 +227,13 @@ func hasDuplicates(cols []exec.ColumnOrdinal) bool { func (ef *execFactory) ConstructRender( n exec.Node, exprs tree.TypedExprs, colNames []string, reqOrdering exec.OutputOrdering, ) (exec.Node, error) { - src := asDataSource(n) - r := &renderNode{ - source: src, - sourceInfo: sqlbase.MultiSourceInfo{src.info}, - render: make([]tree.TypedExpr, len(exprs)), - columns: make([]sqlbase.ResultColumn, len(exprs)), - } - r.ivarHelper = tree.MakeIndexedVarHelper(r, len(src.info.SourceColumns)) + var rb renderBuilder + rb.init(n, reqOrdering, len(exprs)) for i, expr := range exprs { - expr = r.ivarHelper.Rebind(expr, false /* alsoReset */, true /* normalizeToNonNil */) - r.render[i] = expr - r.columns[i] = sqlbase.ResultColumn{Name: colNames[i], Typ: expr.ResolvedType()} + expr = rb.r.ivarHelper.Rebind(expr, false /* alsoReset */, true /* normalizeToNonNil */) + rb.addExpr(expr, colNames[i]) } - r.props.ordering = sqlbase.ColumnOrdering(reqOrdering) - return r, nil + return rb.res, nil } // RenameColumns is part of the exec.Factory interface. @@ -742,6 +734,12 @@ func (ef *execFactory) ConstructLimit( l.countExpr = limit return l, nil } + // If the input plan is a spoolNode, then propagate any constant limit to it. + if spool, ok := plan.(*spoolNode); ok { + if val, ok := limit.(*tree.DInt); ok { + spool.hardLimit = int64(*val) + } + } return &limitNode{ plan: plan, countExpr: limit, @@ -784,6 +782,16 @@ func (ef *execFactory) ConstructProjectSet( func (ef *execFactory) ConstructPlan( root exec.Node, subqueries []exec.Subquery, ) (exec.Plan, error) { + // Enable auto-commit if the planner setting allows it. + if ef.planner.autoCommit { + if ac, ok := root.(autoCommitNode); ok { + ac.enableAutoCommit() + } + } + // No need to spool at the root. + if spool, ok := root.(*spoolNode); ok { + root = spool.source + } res := &planTop{ plan: root.(planNode), auditEvents: ef.planner.curPlan.auditEvents, @@ -870,3 +878,110 @@ func (ef *execFactory) ConstructShowTrace(typ tree.ShowTraceType, compact bool) } return node, nil } + +func (ef *execFactory) ConstructInsert( + input exec.Node, table opt.Table, rowsNeeded bool, +) (exec.Node, error) { + // Derive insert table and column descriptors. + tabDesc := table.(*optTable).desc + colCount := len(tabDesc.Columns) + colDescs := make([]sqlbase.ColumnDescriptor, colCount+table.MutationColumnCount()) + copy(colDescs, tabDesc.Columns) + + // Append any mutation columns. + for i := colCount; i < len(colDescs); i++ { + colDescs[i] = *table.MutationColumn(i - colCount).(*sqlbase.ColumnDescriptor) + } + + // Determine the foreign key tables involved in the update. + fkTables, err := row.TablesNeededForFKs( + ef.planner.extendedEvalCtx.Context, + *tabDesc, + row.CheckInserts, + ef.planner.LookupTableByID, + ef.planner.CheckPrivilege, + ef.planner.analyzeExpr, + ) + if err != nil { + return nil, err + } + + // Create the table insert, which does the bulk of the work. + ri, err := row.MakeInserter(ef.planner.txn, tabDesc, fkTables, colDescs, + row.CheckFKs, &ef.planner.alloc) + if err != nil { + return nil, err + } + + // Determine the relational type of the generated insert node. + // If rows are not needed, no columns are returned. + var returnCols sqlbase.ResultColumns + if rowsNeeded { + // Insert always returns all non-mutation columns, in the same order they + // are defined in the table. Note that the columns and order can be + // different than tabCols. + returnCols = sqlbase.ResultColumnsFromColDescs(tabDesc.Columns) + } + + // Regular path for INSERT. + ins := insertNodePool.Get().(*insertNode) + *ins = insertNode{ + source: input.(planNode), + columns: returnCols, + run: insertRun{ + ti: tableInserter{ri: ri}, + checkHelper: fkTables[tabDesc.ID].CheckHelper, + rowsNeeded: rowsNeeded, + iVarContainerForComputedCols: sqlbase.RowIndexedVarContainer{ + Cols: tabDesc.Columns, + Mapping: ri.InsertColIDtoRowIndex, + }, + insertCols: ri.InsertCols, + }, + } + + // serialize the data-modifying plan to ensure that no data is + // observed that hasn't been validated first. See the comments + // on BatchedNext() in plan_batch.go. + if rowsNeeded { + return &spoolNode{source: &serializeNode{source: ins}}, nil + } + + // We could use serializeNode here, but using rowCountNode is an + // optimization that saves on calls to Next() by the caller. + return &rowCountNode{source: ins}, nil +} + +// renderBuilder encapsulates the code to build a renderNode. +type renderBuilder struct { + r *renderNode + res planNode +} + +// init initializes the renderNode with render expressions. +func (rb *renderBuilder) init(n exec.Node, reqOrdering exec.OutputOrdering, cap int) { + src := asDataSource(n) + rb.r = &renderNode{ + source: src, + sourceInfo: sqlbase.MultiSourceInfo{src.info}, + render: make([]tree.TypedExpr, 0, cap), + columns: make([]sqlbase.ResultColumn, 0, cap), + } + rb.r.ivarHelper = tree.MakeIndexedVarHelper(rb.r, len(src.info.SourceColumns)) + rb.r.props.ordering = sqlbase.ColumnOrdering(reqOrdering) + + // If there's a spool, pull it up. + if spool, ok := rb.r.source.plan.(*spoolNode); ok { + rb.r.source.plan = spool.source + spool.source = rb.r + rb.res = spool + } else { + rb.res = rb.r + } +} + +// addExpr adds a new render expression with the given name. +func (rb *renderBuilder) addExpr(expr tree.TypedExpr, colName string) { + rb.r.render = append(rb.r.render, expr) + rb.r.columns = append(rb.r.columns, sqlbase.ResultColumn{Name: colName, Typ: expr.ResolvedType()}) +} diff --git a/pkg/sql/pgwire/pgwire_test.go b/pkg/sql/pgwire/pgwire_test.go index 9c373e9b1352..2c9bf39c14ac 100644 --- a/pkg/sql/pgwire/pgwire_test.go +++ b/pkg/sql/pgwire/pgwire_test.go @@ -904,7 +904,7 @@ func TestPGPreparedQuery(t *testing.T) { }}, // #14238 {"EXPLAIN SELECT 1", []preparedQueryTest{ - baseTest.SetArgs().Results("render", "", "").Results(" └── emptyrow", "", ""), + baseTest.SetArgs().Results("values", "", "").Results("", "size", "1 column, 1 row"), }}, // #14245 {"SELECT 1::oid = $1", []preparedQueryTest{ diff --git a/pkg/sql/plan.go b/pkg/sql/plan.go index 79a753f79fb1..23868e462c53 100644 --- a/pkg/sql/plan.go +++ b/pkg/sql/plan.go @@ -367,7 +367,8 @@ func (p *planner) makeOptimizerPlan(ctx context.Context, stmt Statement) error { // Start with fast check to see if top-level statement is supported. switch stmt.AST.(type) { case *tree.ParenSelect, *tree.Select, *tree.SelectClause, - *tree.UnionClause, *tree.ValuesClause, *tree.Explain: + *tree.UnionClause, *tree.ValuesClause, *tree.Explain, + *tree.Insert: default: return pgerror.Unimplemented("statement", fmt.Sprintf("unsupported statement: %T", stmt.AST)) diff --git a/pkg/sql/sqlbase/computed_exprs.go b/pkg/sql/sqlbase/computed_exprs.go index 99473ccebccf..671ff74e6bea 100644 --- a/pkg/sql/sqlbase/computed_exprs.go +++ b/pkg/sql/sqlbase/computed_exprs.go @@ -77,9 +77,9 @@ func (*descContainer) IndexedVarNodeFormatter(idx int) tree.NodeFormatter { } // CannotWriteToComputedColError constructs a write error for a computed column. -func CannotWriteToComputedColError(col *ColumnDescriptor) error { +func CannotWriteToComputedColError(colName string) error { return pgerror.NewErrorf(pgerror.CodeObjectNotInPrerequisiteStateError, - "cannot write directly to computed column %q", tree.ErrNameString(&col.Name)) + "cannot write directly to computed column %q", tree.ErrNameString(&colName)) } // ProcessComputedColumns adds columns which are computed to the set of columns diff --git a/pkg/sql/sqlbase/structured.go b/pkg/sql/sqlbase/structured.go index 4e63852c6f61..56a53c20210a 100644 --- a/pkg/sql/sqlbase/structured.go +++ b/pkg/sql/sqlbase/structured.go @@ -2453,16 +2453,36 @@ func (desc *ColumnDescriptor) DatumType() types.T { return desc.Type.ToDatumType() } +// ColTypeStr is part of the opt.Column interface. +func (desc *ColumnDescriptor) ColTypeStr() string { + return desc.Type.SQLString() +} + // IsHidden is part of the opt.Column interface. func (desc *ColumnDescriptor) IsHidden() bool { return desc.Hidden } +// HasDefault is part of the opt.Column interface. +func (desc *ColumnDescriptor) HasDefault() bool { + return desc.DefaultExpr != nil +} + // IsComputed returns whether the given column is computed. func (desc *ColumnDescriptor) IsComputed() bool { return desc.ComputeExpr != nil } +// DefaultExprStr is part of the opt.Column interface. +func (desc *ColumnDescriptor) DefaultExprStr() string { + return *desc.DefaultExpr +} + +// ComputedExprStr is part of the opt.Column interface. +func (desc *ColumnDescriptor) ComputedExprStr() string { + return *desc.ComputeExpr +} + // CheckCanBeFKRef returns whether the given column is computed. func (desc *ColumnDescriptor) CheckCanBeFKRef() error { if desc.IsComputed() { diff --git a/pkg/sql/update.go b/pkg/sql/update.go index 31496d3b8e2f..1b0ea5c9e2db 100644 --- a/pkg/sql/update.go +++ b/pkg/sql/update.go @@ -876,7 +876,7 @@ func fillDefault(expr tree.Expr, index int, defaultExprs []tree.TypedExpr) tree. func checkHasNoComputedCols(cols []sqlbase.ColumnDescriptor) error { for i := range cols { if cols[i].IsComputed() { - return sqlbase.CannotWriteToComputedColError(&cols[i]) + return sqlbase.CannotWriteToComputedColError(cols[i].Name) } } return nil