-
Notifications
You must be signed in to change notification settings - Fork 3.8k
/
factory.opt
685 lines (616 loc) · 23.3 KB
/
factory.opt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
# Scan runs a scan of a specified index of a table, possibly with an index
# constraint and/or a hard limit.
define Scan {
Table cat.Table
Index cat.Index
Params exec.ScanParams
ReqOrdering exec.OutputOrdering
}
define Values {
Rows [][]tree.TypedExpr
Columns colinfo.ResultColumns
}
# Filter applies a filter on the results of the given input node.
define Filter {
Input exec.Node
Filter tree.TypedExpr
ReqOrdering exec.OutputOrdering
}
# InvertedFilter applies a span expression on the results of the given input
# node.
define InvertedFilter {
Input exec.Node
InvFilter *inverted.SpanExpression
PreFiltererExpr tree.TypedExpr
PreFiltererType *types.T
InvColumn exec.NodeColumnOrdinal
}
# SimpleProject applies a "simple" projection on the results of the given input
# node. A simple projection is one that does not involve new expressions; it's
# just a reshuffling of columns. This is a more efficient version of
# ConstructRender. The colNames argument is optional; if it is nil, the names
# of the corresponding input columns are kept.
define SimpleProject {
Input exec.Node
Cols []exec.NodeColumnOrdinal
ReqOrdering exec.OutputOrdering
}
# SerializingProject is similar to SimpleProject, but it allows renaming of
# columns and forces distributed execution to serialize (merge) any parallel
# result streams into a single stream before the projection. This allows any
# required output ordering of the input node to be "materialized", which is
# important for cases where the projection no longer contains the ordering
# columns (e.g. SELECT a FROM t ORDER BY b).
#
# Typically used as the "root" (top-level) operator to ensure the correct
# ordering and naming of columns.
define SerializingProject {
Input exec.Node
Cols []exec.NodeColumnOrdinal
ColNames []string
}
# Render applies a projection on the results of the given input node. The
# projection can contain new expressions. The input expression slice will be
# modified.
define Render {
Input exec.Node
Columns colinfo.ResultColumns
Exprs tree.TypedExprs
ReqOrdering exec.OutputOrdering
}
# ApplyJoin runs an apply join between an input node (the left side of the join)
# and a RelExpr that has outer columns (the right side of the join) by replacing
# the outer columns of the right side RelExpr with data from each row of the
# left side of the join according to the data in leftBoundColMap. The apply join
# can be any kind of join except for right outer and full outer.
#
# To plan the right-hand side, planRightSideFn must be called for each left
# row. This function generates a plan (using the same factory) that produces
# the rightColumns (in order).
#
# onCond is the join condition.
define ApplyJoin {
JoinType descpb.JoinType
Left exec.Node
RightColumns colinfo.ResultColumns
OnCond tree.TypedExpr
PlanRightSideFn exec.ApplyJoinPlanRightSideFn
}
# HashJoin runs a hash-join between the results of two input nodes.
#
# The leftEqColsAreKey/rightEqColsAreKey flags, if set, indicate that the
# equality columns form a key in the left/right input.
#
# The extraOnCond expression can refer to columns from both inputs using
# IndexedVars (first the left columns, then the right columns).
define HashJoin {
JoinType descpb.JoinType
Left exec.Node
Right exec.Node
LeftEqCols []exec.NodeColumnOrdinal
RightEqCols []exec.NodeColumnOrdinal
LeftEqColsAreKey bool
RightEqColsAreKey bool
ExtraOnCond tree.TypedExpr
}
# MergeJoin runs a merge join.
# The ON expression can refer to columns from both inputs using IndexedVars
# (first the left columns, then the right columns). In addition, the i-th
# column in leftOrdering is constrained to equal the i-th column in
# rightOrdering. The directions must match between the two orderings.
define MergeJoin {
JoinType descpb.JoinType
Left exec.Node
Right exec.Node
OnCond tree.TypedExpr
LeftOrdering colinfo.ColumnOrdering
RightOrdering colinfo.ColumnOrdering
ReqOrdering exec.OutputOrdering
LeftEqColsAreKey bool
RightEqColsAreKey bool
}
# GroupBy runs an aggregation. A set of aggregations is performed for each group
# of values on the groupCols.
# A row is produced for each set of distinct values on the group columns. The
# row contains the values of the grouping columns, followed by one value for
# each aggregation.
define GroupBy {
Input exec.Node
GroupCols []exec.NodeColumnOrdinal
# If set, the input is guaranteed to have this ordering and a "streaming"
# aggregation is performed (i.e. aggregation happens separately for each
# distinct set of values on the set of columns in the ordering).
GroupColOrdering colinfo.ColumnOrdering
Aggregations []exec.AggInfo
ReqOrdering exec.OutputOrdering
}
# ScalarGroupBy runs a scalar aggregation, i.e. one which performs a set of
# aggregations on all the input rows (as a single group) and has exactly one
# result row (even when there are no input rows). The output row has one value
# for each aggregation.
define ScalarGroupBy {
Input exec.Node
Aggregations []exec.AggInfo
}
# Distinct filters out rows such that only the first row is kept for each set of
# values along the distinct columns. The orderedCols are a subset of
# distinctCols; the input is required to be ordered along these columns (i.e.
# all rows with the same values on these columns are a contiguous part of the
# input). reqOrdering specifies the required output ordering, and if not empty,
# the input is already ordered according to it.
define Distinct {
Input exec.Node
DistinctCols exec.NodeColumnOrdinalSet
OrderedCols exec.NodeColumnOrdinalSet
ReqOrdering exec.OutputOrdering
NullsAreDistinct bool
ErrorOnDup string
}
# SetOp performs a UNION / INTERSECT / EXCEPT operation (either the ALL or the
# DISTINCT version). The left and right nodes must have the same number of
# columns.
#
# HardLimit can only be set for UNION ALL operations. It is used to implement
# locality optimized search, and instructs the execution engine that it should
# execute the left node to completion and possibly short-circuit if the limit is
# reached before executing the right node. The limit is guaranteed but the
# short-circuit behavior is not.
define SetOp {
Typ tree.UnionType
All bool
Left exec.Node
Right exec.Node
HardLimit uint64
}
# Sort performs a resorting of the rows produced by the input node.
#
# When the input is partially sorted we can execute a "segmented" sort. In
# this case alreadyOrderedPrefix is non-zero and the input is ordered by
# ordering[:alreadyOrderedPrefix].
define Sort {
Input exec.Node
Ordering exec.OutputOrdering
AlreadyOrderedPrefix int
}
# Ordinality appends an ordinality column to each row in the input node.
define Ordinality {
Input exec.Node
ColName string
}
# IndexJoin performs an index join. The input contains the primary key (on the
# columns identified as keyCols).
#
# The index join produces the given table columns (in ordinal order).
define IndexJoin {
Input exec.Node
Table cat.Table
KeyCols []exec.NodeColumnOrdinal
TableCols exec.TableColumnOrdinalSet
ReqOrdering exec.OutputOrdering
}
# LookupJoin performs a lookup join.
#
# The eqCols are columns from the input used as keys for the columns of the
# index (or a prefix of them); eqColsAreKey is set to true if the eqCols form a
# key in the table (and thus each input row matches with at most one index row);
# lookupExpr is used instead of eqCols when the lookup condition is more
# complicated than a simple equality between input columns and index columns
# (eqColsAreKey will be true in this case if the columns that are part of the
# simple equality join conditions form a key in the table); lookupCols are
# ordinals for the table columns we are retrieving.
#
# The node produces the columns in the input and (unless join type is
# LeftSemiJoin or LeftAntiJoin) the lookupCols, ordered by ordinal. The ON
# condition can refer to these using IndexedVars.
#
# If LocalityOptimized is true, we are performing a locality optimized search.
# In order for this to work correctly, the execution engine must create a local
# DistSQL plan for the main query (subqueries and postqueries need not be local).
define LookupJoin {
JoinType descpb.JoinType
Input exec.Node
Table cat.Table
Index cat.Index
EqCols []exec.NodeColumnOrdinal
EqColsAreKey bool
LookupExpr tree.TypedExpr
LookupCols exec.TableColumnOrdinalSet
OnCond tree.TypedExpr
IsSecondJoinInPairedJoiner bool
ReqOrdering exec.OutputOrdering
Locking *tree.LockingItem
LocalityOptimized bool
}
# InvertedJoin performs a lookup join into an inverted index.
#
# invertedExpr is used to find the keys to look up in the index; prefixEqCols
# are columns from the input used as keys for the non-inverted prefix columns,
# if the index is a multi-column inverted index; lookupCols are ordinals for the
# table columns we are retrieving.
#
# The node produces the columns in the input and (unless join type is
# LeftSemiJoin or LeftAntiJoin) the lookupCols, ordered by ordinal. The ON
# condition can refer to these using IndexedVars. Note that lookupCols
# includes the inverted column.
define InvertedJoin {
JoinType descpb.JoinType
InvertedExpr tree.TypedExpr
Input exec.Node
Table cat.Table
Index cat.Index
PrefixEqCols []exec.NodeColumnOrdinal
LookupCols exec.TableColumnOrdinalSet
OnCond tree.TypedExpr
IsFirstJoinInPairedJoiner bool
ReqOrdering exec.OutputOrdering
}
# ZigzagJoin performs a zigzag join.
#
# Each side of the join has two kinds of columns: fixed columns and equal
# columns. The fixed columns correspond 1-to-1 to a prefix of the index columns.
# The fixed columns and the equal columns together also form a prefix of the
# index columns.
define ZigzagJoin {
# Left table and index.
LeftTable cat.Table
LeftIndex cat.Index
# LeftCols are the columns that are scanned from the left index.
LeftCols exec.TableColumnOrdinalSet
# LeftFixedVals contains values for the fixed columns (a prefix of the
# index columns).
LeftFixedVals []tree.TypedExpr
# LeftEqCols are the left table columns that have equality constraints,
# corresponding 1-1 to RightEqCols.
LeftEqCols []exec.TableColumnOrdinal
# Right table and index.
RightTable cat.Table
RightIndex cat.Index
# RightCols are the columns that are scanned from the right index.
RightCols exec.TableColumnOrdinalSet
# RightFixedVals contains values for the fixed columns (a prefix of the
# index columns).
RightFixedVals []tree.TypedExpr
# RightEqCols are the right table columns that have equality constraints,
# corresponding 1-1 to LeftEqCols.
RightEqCols []exec.TableColumnOrdinal
# OnCond is an extra filter that is evaluated on the results.
# TODO(radu): remove this (it can be a separate Select).
OnCond tree.TypedExpr
ReqOrdering exec.OutputOrdering
}
# Limit implements LIMIT and/or OFFSET on the results of the given node. If one
# or the other is not needed, then it is set to nil.
define Limit {
Input exec.Node
Limit tree.TypedExpr
Offset tree.TypedExpr
}
# Max1Row permits at most one row from the given input node, causing an error
# with the given text at runtime if the node tries to return more than one row.
define Max1Row {
Input exec.Node
ErrorText string
}
# ProjectSet performs a lateral cross join between the output of the given node
# and the functional zip of the given expressions.
define ProjectSet {
Input exec.Node
Exprs tree.TypedExprs
ZipCols colinfo.ResultColumns
NumColsPerGen []int
}
# Window executes a window function over the given node.
define Window {
Input exec.Node
Window exec.WindowInfo
}
# Explain implements EXPLAIN (OPT), showing information about the given plan.
define ExplainOpt {
Plan string
EnvOpts exec.ExplainEnvData
}
# Explain implements EXPLAIN, showing information about the given plan.
#
# When the operator is created, it creates an ExplainFactory and calls BuildFn
# to construct the plan against that factory.
define Explain {
Options *tree.ExplainOptions
StmtType tree.StatementType
BuildFn exec.BuildPlanForExplainFn
}
# ShowTrace implements a SHOW TRACE FOR SESSION statement.
define ShowTrace {
Typ tree.ShowTraceType
Compact bool
}
# Insert implements an INSERT statement (including ON CONFLICT DO NOTHING, but
# not other ON CONFLICT clauses).
#
# The input columns are inserted into a subset of columns in the table, in the
# same order they're defined. The insertCols set contains the ordinal positions
# of columns in the table into which values are inserted. All columns are
# expected to be present except delete-only mutation columns, since those do not
# need to participate in an insert operation.
define Insert {
Input exec.Node
Table cat.Table
ArbiterIndexes cat.IndexOrdinals
ArbiterConstraints cat.UniqueOrdinals
InsertCols exec.TableColumnOrdinalSet
ReturnCols exec.TableColumnOrdinalSet
CheckCols exec.CheckOrdinalSet
# If set, the operator will commit the transaction as part of its execution.
# This is false when executing inside an explicit transaction, or there are
# multiple mutations in a statement, or the output of the mutation is
# processed through side-effecting expressions.
AutoCommit bool
}
# InsertFastPath implements a special (but very common) case of insert,
# satisfying the following conditions:
# - the input is Values with at most mutations.MaxBatchSize, and there are no
# subqueries;
# - there are no other mutations in the statement, and the output of the
# insert is not processed through side-effecting expressions.
# - there are no self-referencing foreign keys;
# - all FK checks can be performed using direct lookups into unique indexes.
#
# In this case, the foreign-key checks can run before (or even concurrently
# with) the insert. If they are run before, the insert is allowed to
# auto-commit.
define InsertFastPath {
Rows [][]tree.TypedExpr
Table cat.Table
InsertCols exec.TableColumnOrdinalSet
ReturnCols exec.TableColumnOrdinalSet
CheckCols exec.CheckOrdinalSet
FkChecks []exec.InsertFastPathFKCheck
# If set, the operator will commit the transaction as part of its execution.
# This is false when executing inside an explicit transaction.
AutoCommit bool
}
# Update implements an UPDATE statement. The input contains columns that were
# fetched from the target table, and that provide existing values that can be
# used to formulate the new encoded value that will be written back to the table
# (updating any column in a family requires having the values of all other
# columns). The input also contains computed columns that provide new values for
# any updated columns.
#
# The fetchCols and updateCols sets contain the ordinal positions of the
# fetch and update columns in the target table. The input must contain those
# columns in the same order as they appear in the table schema, with the
# fetch columns first and the update columns second.
#
# The passthrough parameter contains all the result columns that are part of
# the input node that the update node needs to return (passing through from
# the input). The pass through columns are used to return any column from the
# FROM tables that are referenced in the RETURNING clause.
#
# If allowAutoCommit is set, the operator is allowed to commit the
# transaction (if appropriate, i.e. if it is in an implicit transaction).
# This is false if there are multiple mutations in a statement, or the output
# of the mutation is processed through side-effecting expressions.
define Update {
Input exec.Node
Table cat.Table
FetchCols exec.TableColumnOrdinalSet
UpdateCols exec.TableColumnOrdinalSet
ReturnCols exec.TableColumnOrdinalSet
Checks exec.CheckOrdinalSet
Passthrough colinfo.ResultColumns
# If set, the operator will commit the transaction as part of its execution.
AutoCommit bool
}
# Upsert implements an INSERT..ON CONFLICT DO UPDATE or UPSERT statement.
#
# For each input row, Upsert will test the canaryCol. If it is null, then it
# will insert a new row. If not-null, then Upsert will update an existing row.
# The input is expected to contain the columns to be inserted, followed by the
# columns containing existing values, and finally the columns containing new
# values.
#
# The length of each group of input columns can be up to the number of
# columns in the given table. The insertCols, fetchCols, and updateCols sets
# contain the ordinal positions of the table columns that are involved in
# the Upsert. For example:
#
# CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT)
# INSERT INTO abc VALUES (10, 20, 30) ON CONFLICT (a) DO UPDATE SET b=25
#
# insertCols = {0, 1, 2}
# fetchCols = {0, 1, 2}
# updateCols = {1}
#
# The input is expected to first have 3 columns that will be inserted into
# columns {0, 1, 2} of the table. The next 3 columns contain the existing
# values of columns {0, 1, 2} of the table. The last column contains the
# new value for column {1} of the table.
define Upsert {
Input exec.Node
Table cat.Table
ArbiterIndexes cat.IndexOrdinals
ArbiterConstraints cat.UniqueOrdinals
CanaryCol exec.NodeColumnOrdinal
InsertCols exec.TableColumnOrdinalSet
FetchCols exec.TableColumnOrdinalSet
UpdateCols exec.TableColumnOrdinalSet
ReturnCols exec.TableColumnOrdinalSet
Checks exec.CheckOrdinalSet
# If set, the operator will commit the transaction as part of its execution.
# This is false when executing inside an explicit transaction, or there are
# multiple mutations in a statement, or the output of the mutation is
# processed through side-effecting expressions.
AutoCommit bool
}
# Delete implements a DELETE statement. The input contains columns that were
# fetched from the target table, and that will be deleted.
#
# The fetchCols set contains the ordinal positions of the fetch columns in
# the target table. The input must contain those columns in the same order
# as they appear in the table schema.
define Delete {
Input exec.Node
Table cat.Table
FetchCols exec.TableColumnOrdinalSet
ReturnCols exec.TableColumnOrdinalSet
# If set, the operator will commit the transaction as part of its execution.
# This is false when executing inside an explicit transaction, or there are
# multiple mutations in a statement, or the output of the mutation is
# processed through side-effecting expressions.
AutoCommit bool
}
# DeleteRange efficiently deletes contiguous rows stored in the given table's
# primary index. This fast path is only possible when certain conditions hold
# true:
# - there are no secondary indexes;
# - the input to the delete is a scan (without limits);
# - the table is not involved in interleaving, or it is at the root of an
# interleaving hierarchy with cascading FKs such that a delete of a row
# cascades and deletes all interleaved rows corresponding to that row;
# - there are no inbound FKs to the table (other than within the
# interleaving as described above).
#
# See the comment for ConstructScan for descriptions of the needed and
# indexConstraint parameters, since DeleteRange combines Delete + Scan into a
# single operator.
#
# If any interleavedTables are passed, they are all the descendant tables in
# an interleaving hierarchy we are deleting from.
define DeleteRange {
Table cat.Table
Needed exec.TableColumnOrdinalSet
IndexConstraint *constraint.Constraint
InterleavedTables []cat.Table
# If set, the operator will commit the transaction as part of its execution.
# This is false when executing inside an explicit transaction, or there are
# multiple mutations in a statement, or the output of the mutation is
# processed through side-effecting expressions, or the operation might
# process too many rows.
AutoCommit bool
}
# CreateTable implements a CREATE TABLE statement.
define CreateTable {
Schema cat.Schema
Ct *tree.CreateTable
}
# CreateTableAs implements a CREATE TABLE AS statement.
define CreateTableAs {
Input exec.Node
Schema cat.Schema
Ct *tree.CreateTable
}
# CreateView implements a CREATE VIEW statement.
define CreateView {
Schema cat.Schema
ViewName *cat.DataSourceName
IfNotExists bool
Replace bool
Persistence tree.Persistence
Materialized bool
ViewQuery string
Columns colinfo.ResultColumns
deps opt.ViewDeps
typeDeps opt.ViewTypeDeps
}
# SequenceSelect implements a scan of a sequence as a data source.
define SequenceSelect {
Sequence cat.Sequence
}
# SaveTable passes through all the input rows unchanged, but also creates a
# table and inserts all the rows into it.
define SaveTable {
Input exec.Node
Table *cat.DataSourceName
ColNames []string
}
# ErrorIfRows returns no results, but causes an execution error if the input
# returns any rows.
define ErrorIfRows {
Input exec.Node
# MkErr is used to create the error; it is passed an input row.
MkErr exec.MkErrFn
}
# Opaque implements operators that have no relational inputs and which require
# no specific treatment by the optimizer.
define Opaque {
Metadata opt.OpaqueMetadata
}
# AlterTableSplit implements ALTER TABLE/INDEX SPLIT AT.
define AlterTableSplit {
Index cat.Index
Input exec.Node
Expiration tree.TypedExpr
}
# AlterTableUnsplit implements ALTER TABLE/INDEX UNSPLIT AT.
define AlterTableUnsplit {
Index cat.Index
Input exec.Node
}
# AlterTableUnsplitAll implements ALTER TABLE/INDEX UNSPLIT ALL.
define AlterTableUnsplitAll {
Index cat.Index
}
# AlterTableRelocate implements ALTER TABLE/INDEX UNSPLIT AT.
define AlterTableRelocate {
Index cat.Index
input exec.Node
relocateLease bool
relocateNonVoters bool
}
# Buffer passes through the input rows but also saves them in a buffer, which
# can be referenced from elsewhere in the query (using ScanBuffer).
define Buffer {
Input exec.Node
Label string
}
# ScanBuffer refers to a node constructed by Buffer or passed to
# RecursiveCTEIterationFn.
define ScanBuffer {
Ref exec.Node
Label string
}
# RecursiveCTE executes a recursive CTE:
# * the initial plan is run first; the results are emitted and also saved
# in a buffer.
# * so long as the last buffer is not empty:
# - the RecursiveCTEIterationFn is used to create a plan for the
# recursive side; a reference to the last buffer is passed to this
# function. The returned plan uses this reference with a
# ConstructScanBuffer call.
# - the plan is executed; the results are emitted and also saved in a new
# buffer for the next iteration.
define RecursiveCTE {
Initial exec.Node
Fn exec.RecursiveCTEIterationFn
Label string
}
# ControlJobs implements PAUSE/CANCEL/RESUME JOBS.
define ControlJobs {
Command tree.JobCommand
input exec.Node
}
# ControlSchedules implements PAUSE/CANCEL/DROP SCHEDULES.
define ControlSchedules {
Command tree.ScheduleCommand
input exec.Node
}
# CancelQueries implements CANCEL QUERIES.
define CancelQueries {
Input exec.Node
IfExists bool
}
# CancelSessions implements CANCEL SESSIONS.
define CancelSessions {
Input exec.Node
IfExists bool
}
# CreateStatistics implements CREATE STATISTICS.
define CreateStatistics {
Cs *tree.CreateStats
}
# Export implements EXPORT.
define Export {
Input exec.Node
FileName tree.TypedExpr
FileFormat string
Options []exec.KVOption
}