-
Notifications
You must be signed in to change notification settings - Fork 3.8k
/
exec_util.go
2435 lines (2154 loc) · 84.1 KB
/
exec_util.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"bytes"
"context"
"fmt"
"net"
"net/url"
"reflect"
"regexp"
"sort"
"strings"
"time"
"github.com/cockroachdb/apd/v2"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/config"
"github.com/cockroachdb/cockroach/pkg/config/zonepb"
"github.com/cockroachdb/cockroach/pkg/featureflag"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangecache"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts"
"github.com/cockroachdb/cockroach/pkg/migration"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/server/status/statuspb"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/accessors"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/hydratedtables"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/lease"
"github.com/cockroachdb/cockroach/pkg/sql/colexec"
"github.com/cockroachdb/cockroach/pkg/sql/contention"
"github.com/cockroachdb/cockroach/pkg/sql/distsql"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/execstats"
"github.com/cockroachdb/cockroach/pkg/sql/gcjob/gcjobnotifier"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice"
"github.com/cockroachdb/cockroach/pkg/sql/physicalplan"
"github.com/cockroachdb/cockroach/pkg/sql/querycache"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb"
"github.com/cockroachdb/cockroach/pkg/sql/sqlliveness"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/sql/stmtdiagnostics"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/bitarray"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/metric"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
)
// ClusterOrganization is the organization name.
var ClusterOrganization = settings.RegisterStringSetting(
"cluster.organization",
"organization name",
"",
).WithPublic()
// ClusterIsInternal returns true if the cluster organization contains
// "Cockroach Labs", indicating an internal cluster.
func ClusterIsInternal(sv *settings.Values) bool {
return strings.Contains(ClusterOrganization.Get(sv), "Cockroach Labs")
}
// ClusterSecret is a cluster specific secret. This setting is
// non-reportable.
var ClusterSecret = func() *settings.StringSetting {
s := settings.RegisterStringSetting(
"cluster.secret",
"cluster specific secret",
"",
)
// Even though string settings are non-reportable by default, we
// still mark them explicitly in case a future code change flips the
// default.
s.SetReportable(false)
return s
}()
// defaultIntSize controls how a "naked" INT type will be parsed.
// TODO(bob): Change this to 4 in v2.3; https://github.com/cockroachdb/cockroach/issues/32534
// TODO(bob): Remove or n-op this in v2.4: https://github.com/cockroachdb/cockroach/issues/32844
var defaultIntSize = func() *settings.IntSetting {
s := settings.RegisterIntSetting(
"sql.defaults.default_int_size",
"the size, in bytes, of an INT type", 8, func(i int64) error {
if i != 4 && i != 8 {
return errors.New("only 4 or 8 are valid values")
}
return nil
})
s.SetVisibility(settings.Public)
return s
}()
const allowCrossDatabaseFKsSetting = "sql.cross_db_fks.enabled"
var allowCrossDatabaseFKs = settings.RegisterBoolSetting(
allowCrossDatabaseFKsSetting,
"if true, creating foreign key references across databases is allowed",
false,
).WithPublic()
const allowCrossDatabaseViewsSetting = "sql.cross_db_views.enabled"
var allowCrossDatabaseViews = settings.RegisterBoolSetting(
allowCrossDatabaseViewsSetting,
"if true, creating views that refer to other databases is allowed",
false,
).WithPublic()
const allowCrossDatabaseSeqOwnerSetting = "sql.cross_db_sequence_owners.enabled"
var allowCrossDatabaseSeqOwner = settings.RegisterBoolSetting(
allowCrossDatabaseSeqOwnerSetting,
"if true, creating sequences owned by tables from other databases is allowed",
false,
).WithPublic()
// traceTxnThreshold can be used to log SQL transactions that take
// longer than duration to complete. For example, traceTxnThreshold=1s
// will log the trace for any transaction that takes 1s or longer. To
// log traces for all transactions use traceTxnThreshold=1ns. Note
// that any positive duration will enable tracing and will slow down
// all execution because traces are gathered for all transactions even
// if they are not output.
var traceTxnThreshold = settings.RegisterDurationSetting(
"sql.trace.txn.enable_threshold",
"duration beyond which all transactions are traced (set to 0 to "+
"disable). This setting is coarser grained than"+
"sql.trace.stmt.enable_threshold because it applies to all statements "+
"within a transaction as well as client communication (e.g. retries).", 0,
).WithPublic()
// traceStmtThreshold is identical to traceTxnThreshold except it applies to
// individual statements in a transaction. The motivation for this setting is
// to be able to reduce the noise associated with a larger transaction (e.g.
// round trips to client).
var traceStmtThreshold = settings.RegisterDurationSetting(
"sql.trace.stmt.enable_threshold",
"duration beyond which all statements are traced (set to 0 to disable). "+
"This applies to individual statements within a transaction and is therefore "+
"finer-grained than sql.trace.txn.enable_threshold.",
0,
).WithPublic()
// traceSessionEventLogEnabled can be used to enable the event log
// that is normally kept for every SQL connection. The event log has a
// non-trivial performance impact and also reveals SQL statements
// which may be a privacy concern.
var traceSessionEventLogEnabled = settings.RegisterBoolSetting(
"sql.trace.session_eventlog.enabled",
"set to true to enable session tracing. "+
"Note that enabling this may have a non-trivial negative performance impact.",
false,
).WithPublic()
// ReorderJoinsLimitClusterSettingName is the name of the cluster setting for
// the maximum number of joins to reorder.
const ReorderJoinsLimitClusterSettingName = "sql.defaults.reorder_joins_limit"
// ReorderJoinsLimitClusterValue controls the cluster default for the maximum
// number of joins reordered.
var ReorderJoinsLimitClusterValue = settings.RegisterIntSetting(
ReorderJoinsLimitClusterSettingName,
"default number of joins to reorder",
opt.DefaultJoinOrderLimit,
func(limit int64) error {
if limit < 0 || limit > opt.MaxReorderJoinsLimit {
return pgerror.Newf(pgcode.InvalidParameterValue,
"cannot set %s to a value less than 0 or greater than %v",
ReorderJoinsLimitClusterSettingName,
opt.MaxReorderJoinsLimit,
)
}
return nil
},
)
var requireExplicitPrimaryKeysClusterMode = settings.RegisterBoolSetting(
"sql.defaults.require_explicit_primary_keys.enabled",
"default value for requiring explicit primary keys in CREATE TABLE statements",
false,
)
var temporaryTablesEnabledClusterMode = settings.RegisterBoolSetting(
"sql.defaults.experimental_temporary_tables.enabled",
"default value for experimental_enable_temp_tables; allows for use of temporary tables by default",
false,
)
var implicitColumnPartitioningEnabledClusterMode = settings.RegisterBoolSetting(
"sql.defaults.experimental_implicit_column_partitioning.enabled",
"default value for experimental_enable_temp_tables; allows for the use of implicit column partitioning",
false,
)
var dropEnumValueEnabledClusterMode = settings.RegisterBoolSetting(
"sql.defaults.drop_enum_value.enabled",
"default value for enable_drop_enum_value; allows for dropping enum values",
false,
)
var hashShardedIndexesEnabledClusterMode = settings.RegisterBoolSetting(
"sql.defaults.experimental_hash_sharded_indexes.enabled",
"default value for experimental_enable_hash_sharded_indexes; allows for creation of hash sharded indexes by default",
false,
)
var zigzagJoinClusterMode = settings.RegisterBoolSetting(
"sql.defaults.zigzag_join.enabled",
"default value for enable_zigzag_join session setting; allows use of zig-zag join by default",
true,
)
var optDrivenFKCascadesClusterLimit = settings.RegisterIntSetting(
"sql.defaults.foreign_key_cascades_limit",
"default value for foreign_key_cascades_limit session setting; limits the number of cascading operations that run as part of a single query",
10000,
settings.NonNegativeInt,
)
var preferLookupJoinsForFKs = settings.RegisterBoolSetting(
"sql.defaults.prefer_lookup_joins_for_fks.enabled",
"default value for prefer_lookup_joins_for_fks session setting; causes foreign key operations to use lookup joins when possible",
false,
)
// InterleavedTablesEnabled is the setting that controls whether it's possible
// to create interleaved indexes or tables.
var InterleavedTablesEnabled = settings.RegisterBoolSetting(
"sql.defaults.interleaved_tables.enabled",
"allows creation of interleaved tables or indexes",
false,
)
// optUseHistogramsClusterMode controls the cluster default for whether
// histograms are used by the optimizer for cardinality estimation.
// Note that it does not control histogram collection; regardless of the
// value of this setting, the optimizer cannot use histograms if they
// haven't been collected. Collection of histograms is controlled by the
// cluster setting sql.stats.histogram_collection.enabled.
var optUseHistogramsClusterMode = settings.RegisterBoolSetting(
"sql.defaults.optimizer_use_histograms.enabled",
"default value for optimizer_use_histograms session setting; enables usage of histograms in the optimizer by default",
true,
)
// optUseMultiColStatsClusterMode controls the cluster default for whether
// multi-column stats are used by the optimizer for cardinality estimation.
// Note that it does not control collection of multi-column stats; regardless
// of the value of this setting, the optimizer cannot use multi-column stats
// if they haven't been collected. Collection of multi-column stats is
// controlled by the cluster setting sql.stats.multi_column_collection.enabled.
var optUseMultiColStatsClusterMode = settings.RegisterBoolSetting(
"sql.defaults.optimizer_use_multicol_stats.enabled",
"default value for optimizer_use_multicol_stats session setting; enables usage of multi-column stats in the optimizer by default",
true,
)
// localityOptimizedSearchMode controls the cluster default for the use of
// locality optimized search. If enabled, the optimizer will try to plan scans
// and lookup joins in which local nodes (i.e., nodes in the gateway region) are
// searched for matching rows before remote nodes, in the hope that the
// execution engine can avoid visiting remote nodes.
var localityOptimizedSearchMode = settings.RegisterBoolSetting(
"sql.defaults.locality_optimized_partitioned_index_scan.enabled",
"default value for locality_optimized_partitioned_index_scan session setting; "+
"enables searching for rows in the current region before searching remote regions",
true,
)
var implicitSelectForUpdateClusterMode = settings.RegisterBoolSetting(
"sql.defaults.implicit_select_for_update.enabled",
"default value for enable_implicit_select_for_update session setting; enables FOR UPDATE locking during the row-fetch phase of mutation statements",
true,
)
var insertFastPathClusterMode = settings.RegisterBoolSetting(
"sql.defaults.insert_fast_path.enabled",
"default value for enable_insert_fast_path session setting; enables a specialized insert path",
true,
)
var experimentalAlterColumnTypeGeneralMode = settings.RegisterBoolSetting(
"sql.defaults.experimental_alter_column_type.enabled",
"default value for experimental_alter_column_type session setting; "+
"enables the use of ALTER COLUMN TYPE for general conversions",
false,
)
var clusterIdleInSessionTimeout = settings.RegisterDurationSetting(
"sql.defaults.idle_in_session_timeout",
"default value for the idle_in_session_timeout; "+
"enables automatically killing sessions that exceed the "+
"idle_in_session_timeout threshold",
0,
settings.NonNegativeDuration,
)
// TODO(rytaft): remove this once unique without index constraints are fully
// supported.
var experimentalUniqueWithoutIndexConstraintsMode = settings.RegisterBoolSetting(
"sql.defaults.experimental_enable_unique_without_index_constraints.enabled",
"default value for experimental_enable_unique_without_index_constraints session setting;"+
"disables unique without index constraints by default",
false,
)
// DistSQLClusterExecMode controls the cluster default for when DistSQL is used.
var experimentalUseNewSchemaChanger = settings.RegisterEnumSetting(
"sql.defaults.experimental_new_schema_changer.enabled",
"default value for experimental_use_new_schema_changer session setting;"+
"disables new schema changer by default",
"off",
map[int64]string{
int64(sessiondata.UseNewSchemaChangerOff): "off",
int64(sessiondata.UseNewSchemaChangerOn): "on",
int64(sessiondata.UseNewSchemaChangerUnsafeAlways): "unsafe_always",
},
)
var experimentalStreamReplicationEnabled = settings.RegisterBoolSetting(
"sql.defaults.experimental_stream_replication.enabled",
"default value for experimental_stream_replication session setting;"+
"enables the ability to setup a replication stream",
false,
)
// ExperimentalDistSQLPlanningClusterSettingName is the name for the cluster
// setting that controls experimentalDistSQLPlanningClusterMode below.
const ExperimentalDistSQLPlanningClusterSettingName = "sql.defaults.experimental_distsql_planning"
// experimentalDistSQLPlanningClusterMode can be used to enable
// optimizer-driven DistSQL planning that sidesteps intermediate planNode
// transition when going from opt.Expr to DistSQL processor specs.
var experimentalDistSQLPlanningClusterMode = settings.RegisterEnumSetting(
ExperimentalDistSQLPlanningClusterSettingName,
"default experimental_distsql_planning mode; enables experimental opt-driven DistSQL planning",
"off",
map[int64]string{
int64(sessiondata.ExperimentalDistSQLPlanningOff): "off",
int64(sessiondata.ExperimentalDistSQLPlanningOn): "on",
},
)
// VectorizeClusterSettingName is the name for the cluster setting that controls
// the VectorizeClusterMode below.
const VectorizeClusterSettingName = "sql.defaults.vectorize"
// VectorizeClusterMode controls the cluster default for when automatic
// vectorization is enabled.
var VectorizeClusterMode = settings.RegisterEnumSetting(
VectorizeClusterSettingName,
"default vectorize mode",
"on",
map[int64]string{
int64(sessiondatapb.VectorizeOff): "off",
int64(sessiondatapb.VectorizeOn): "on",
},
)
// VectorizeRowCountThresholdClusterValue controls the cluster default for the
// vectorize row count threshold. When it is met, the vectorized execution
// engine will be used if possible.
var VectorizeRowCountThresholdClusterValue = settings.RegisterIntSetting(
"sql.defaults.vectorize_row_count_threshold",
"default vectorize row count threshold",
colexec.DefaultVectorizeRowCountThreshold,
func(v int64) error {
if v < 0 {
return pgerror.Newf(pgcode.InvalidParameterValue,
"cannot set sql.defaults.vectorize_row_count_threshold to a negative value: %d", v)
}
return nil
},
)
// DistSQLClusterExecMode controls the cluster default for when DistSQL is used.
var DistSQLClusterExecMode = settings.RegisterEnumSetting(
"sql.defaults.distsql",
"default distributed SQL execution mode",
"auto",
map[int64]string{
int64(sessiondata.DistSQLOff): "off",
int64(sessiondata.DistSQLAuto): "auto",
int64(sessiondata.DistSQLOn): "on",
},
)
// SerialNormalizationMode controls how the SERIAL type is interpreted in table
// definitions.
var SerialNormalizationMode = settings.RegisterEnumSetting(
"sql.defaults.serial_normalization",
"default handling of SERIAL in table definitions",
"rowid",
map[int64]string{
int64(sessiondata.SerialUsesRowID): "rowid",
int64(sessiondata.SerialUsesVirtualSequences): "virtual_sequence",
int64(sessiondata.SerialUsesSQLSequences): "sql_sequence",
int64(sessiondata.SerialUsesCachedSQLSequences): "sql_sequence_cached",
},
).WithPublic()
var disallowFullTableScans = settings.RegisterBoolSetting(
`sql.defaults.disallow_full_table_scans.enabled`,
"setting to true rejects queries that have planned a full table scan",
false,
).WithPublic()
var errNoTransactionInProgress = errors.New("there is no transaction in progress")
var errTransactionInProgress = errors.New("there is already a transaction in progress")
const sqlTxnName string = "sql txn"
const metricsSampleInterval = 10 * time.Second
// Fully-qualified names for metrics.
var (
MetaSQLExecLatency = metric.Metadata{
Name: "sql.exec.latency",
Help: "Latency of SQL statement execution",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
MetaSQLServiceLatency = metric.Metadata{
Name: "sql.service.latency",
Help: "Latency of SQL request execution",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
MetaSQLOptFallback = metric.Metadata{
Name: "sql.optimizer.fallback.count",
Help: "Number of statements which the cost-based optimizer was unable to plan",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSQLOptPlanCacheHits = metric.Metadata{
Name: "sql.optimizer.plan_cache.hits",
Help: "Number of non-prepared statements for which a cached plan was used",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSQLOptPlanCacheMisses = metric.Metadata{
Name: "sql.optimizer.plan_cache.misses",
Help: "Number of non-prepared statements for which a cached plan was not used",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDistSQLSelect = metric.Metadata{
Name: "sql.distsql.select.count",
Help: "Number of DistSQL SELECT statements",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDistSQLExecLatency = metric.Metadata{
Name: "sql.distsql.exec.latency",
Help: "Latency of DistSQL statement execution",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
MetaDistSQLServiceLatency = metric.Metadata{
Name: "sql.distsql.service.latency",
Help: "Latency of DistSQL request execution",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
MetaTxnAbort = metric.Metadata{
Name: "sql.txn.abort.count",
Help: "Number of SQL transaction abort errors",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaFailure = metric.Metadata{
Name: "sql.failure.count",
Help: "Number of statements resulting in a planning or runtime error",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSQLTxnLatency = metric.Metadata{
Name: "sql.txn.latency",
Help: "Latency of SQL transactions",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
MetaSQLTxnsOpen = metric.Metadata{
Name: "sql.txns.open",
Help: "Number of currently open SQL transactions",
Measurement: "Open SQL Transactions",
Unit: metric.Unit_COUNT,
}
MetaFullTableOrIndexScan = metric.Metadata{
Name: "sql.full.scan.count",
Help: "Number of full table or index scans",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
// Below are the metadata for the statement started counters.
MetaQueryStarted = metric.Metadata{
Name: "sql.query.started.count",
Help: "Number of SQL queries started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnBeginStarted = metric.Metadata{
Name: "sql.txn.begin.started.count",
Help: "Number of SQL transaction BEGIN statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnCommitStarted = metric.Metadata{
Name: "sql.txn.commit.started.count",
Help: "Number of SQL transaction COMMIT statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnRollbackStarted = metric.Metadata{
Name: "sql.txn.rollback.started.count",
Help: "Number of SQL transaction ROLLBACK statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSelectStarted = metric.Metadata{
Name: "sql.select.started.count",
Help: "Number of SQL SELECT statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaUpdateStarted = metric.Metadata{
Name: "sql.update.started.count",
Help: "Number of SQL UPDATE statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaInsertStarted = metric.Metadata{
Name: "sql.insert.started.count",
Help: "Number of SQL INSERT statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDeleteStarted = metric.Metadata{
Name: "sql.delete.started.count",
Help: "Number of SQL DELETE statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSavepointStarted = metric.Metadata{
Name: "sql.savepoint.started.count",
Help: "Number of SQL SAVEPOINT statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaReleaseSavepointStarted = metric.Metadata{
Name: "sql.savepoint.release.started.count",
Help: "Number of `RELEASE SAVEPOINT` statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaRollbackToSavepointStarted = metric.Metadata{
Name: "sql.savepoint.rollback.started.count",
Help: "Number of `ROLLBACK TO SAVEPOINT` statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaRestartSavepointStarted = metric.Metadata{
Name: "sql.restart_savepoint.started.count",
Help: "Number of `SAVEPOINT cockroach_restart` statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaReleaseRestartSavepointStarted = metric.Metadata{
Name: "sql.restart_savepoint.release.started.count",
Help: "Number of `RELEASE SAVEPOINT cockroach_restart` statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaRollbackToRestartSavepointStarted = metric.Metadata{
Name: "sql.restart_savepoint.rollback.started.count",
Help: "Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDdlStarted = metric.Metadata{
Name: "sql.ddl.started.count",
Help: "Number of SQL DDL statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaMiscStarted = metric.Metadata{
Name: "sql.misc.started.count",
Help: "Number of other SQL statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
// Below are the metadata for the statement executed counters.
MetaQueryExecuted = metric.Metadata{
Name: "sql.query.count",
Help: "Number of SQL queries executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnBeginExecuted = metric.Metadata{
Name: "sql.txn.begin.count",
Help: "Number of SQL transaction BEGIN statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnCommitExecuted = metric.Metadata{
Name: "sql.txn.commit.count",
Help: "Number of SQL transaction COMMIT statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnRollbackExecuted = metric.Metadata{
Name: "sql.txn.rollback.count",
Help: "Number of SQL transaction ROLLBACK statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSelectExecuted = metric.Metadata{
Name: "sql.select.count",
Help: "Number of SQL SELECT statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaUpdateExecuted = metric.Metadata{
Name: "sql.update.count",
Help: "Number of SQL UPDATE statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaInsertExecuted = metric.Metadata{
Name: "sql.insert.count",
Help: "Number of SQL INSERT statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDeleteExecuted = metric.Metadata{
Name: "sql.delete.count",
Help: "Number of SQL DELETE statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSavepointExecuted = metric.Metadata{
Name: "sql.savepoint.count",
Help: "Number of SQL SAVEPOINT statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaReleaseSavepointExecuted = metric.Metadata{
Name: "sql.savepoint.release.count",
Help: "Number of `RELEASE SAVEPOINT` statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaRollbackToSavepointExecuted = metric.Metadata{
Name: "sql.savepoint.rollback.count",
Help: "Number of `ROLLBACK TO SAVEPOINT` statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaRestartSavepointExecuted = metric.Metadata{
Name: "sql.restart_savepoint.count",
Help: "Number of `SAVEPOINT cockroach_restart` statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaReleaseRestartSavepointExecuted = metric.Metadata{
Name: "sql.restart_savepoint.release.count",
Help: "Number of `RELEASE SAVEPOINT cockroach_restart` statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaRollbackToRestartSavepointExecuted = metric.Metadata{
Name: "sql.restart_savepoint.rollback.count",
Help: "Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDdlExecuted = metric.Metadata{
Name: "sql.ddl.count",
Help: "Number of SQL DDL statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaMiscExecuted = metric.Metadata{
Name: "sql.misc.count",
Help: "Number of other SQL statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
)
func getMetricMeta(meta metric.Metadata, internal bool) metric.Metadata {
if internal {
meta.Name += ".internal"
meta.Help += " (internal queries)"
meta.Measurement = "SQL Internal Statements"
}
return meta
}
// NodeInfo contains metadata about the executing node and cluster.
type NodeInfo struct {
ClusterID func() uuid.UUID
NodeID *base.SQLIDContainer
AdminURL func() *url.URL
PGURL func(*url.Userinfo) (*url.URL, error)
}
// nodeStatusGenerator is a limited portion of the status.MetricsRecorder
// struct, to avoid having to import all of status in sql.
type nodeStatusGenerator interface {
GenerateNodeStatus(ctx context.Context) *statuspb.NodeStatus
}
// An ExecutorConfig encompasses the auxiliary objects and configuration
// required to create an executor.
// All fields holding a pointer or an interface are required to create
// a Executor; the rest will have sane defaults set if omitted.
type ExecutorConfig struct {
Settings *cluster.Settings
NodeInfo
Codec keys.SQLCodec
DefaultZoneConfig *zonepb.ZoneConfig
Locality roachpb.Locality
AmbientCtx log.AmbientContext
DB *kv.DB
Gossip gossip.OptionalGossip
SystemConfig config.SystemConfigProvider
DistSender *kvcoord.DistSender
RPCContext *rpc.Context
LeaseManager *lease.Manager
Clock *hlc.Clock
DistSQLSrv *distsql.ServerImpl
// NodesStatusServer gives access to the NodesStatus service and is only
// available when running as a system tenant.
NodesStatusServer serverpb.OptionalNodesStatusServer
// SQLStatusServer gives access to a subset of the Status service and is
// available when not running as a system tenant.
SQLStatusServer serverpb.SQLStatusServer
MetricsRecorder nodeStatusGenerator
SessionRegistry *SessionRegistry
SQLLivenessReader sqlliveness.Reader
JobRegistry *jobs.Registry
VirtualSchemas *VirtualSchemaHolder
DistSQLPlanner *DistSQLPlanner
TableStatsCache *stats.TableStatisticsCache
StatsRefresher *stats.Refresher
InternalExecutor *InternalExecutor
QueryCache *querycache.C
SchemaChangerMetrics *SchemaChangerMetrics
FeatureFlagMetrics *featureflag.DenialMetrics
TestingKnobs ExecutorTestingKnobs
PGWireTestingKnobs *PGWireTestingKnobs
SchemaChangerTestingKnobs *SchemaChangerTestingKnobs
TypeSchemaChangerTestingKnobs *TypeSchemaChangerTestingKnobs
GCJobTestingKnobs *GCJobTestingKnobs
DistSQLRunTestingKnobs *execinfra.TestingKnobs
EvalContextTestingKnobs tree.EvalContextTestingKnobs
TenantTestingKnobs *TenantTestingKnobs
BackupRestoreTestingKnobs *BackupRestoreTestingKnobs
// HistogramWindowInterval is (server.Config).HistogramWindowInterval.
HistogramWindowInterval time.Duration
// RangeDescriptorCache is updated by DistSQL when it finds out about
// misplanned spans.
RangeDescriptorCache *rangecache.RangeCache
// Role membership cache.
RoleMemberCache *MembershipCache
// ProtectedTimestampProvider encapsulates the protected timestamp subsystem.
ProtectedTimestampProvider protectedts.Provider
// StmtDiagnosticsRecorder deals with recording statement diagnostics.
StmtDiagnosticsRecorder *stmtdiagnostics.Registry
ExternalIODirConfig base.ExternalIODirConfig
// HydratedTables is a node-level cache of table descriptors which utilize
// user-defined types.
HydratedTables *hydratedtables.Cache
GCJobNotifier *gcjobnotifier.Notifier
RangeFeedFactory *rangefeed.Factory
// VersionUpgradeHook is called after validating a `SET CLUSTER SETTING
// version` but before executing it. It can carry out arbitrary migrations
// that allow us to eventually remove legacy code. It will only be populated
// on the system tenant.
//
// TODO(tbg,irfansharif,ajwerner): Hook up for secondary tenants.
VersionUpgradeHook func(ctx context.Context, user security.SQLUsername, from, to clusterversion.ClusterVersion) error
// MigrationJobDeps is used to drive migrations.
//
// TODO(tbg,irfansharif,ajwerner): Hook up for secondary tenants.
MigrationJobDeps migration.JobDeps
// IndexBackfiller is used to backfill indexes. It is another rather circular
// object which mostly just holds on to an ExecConfig.
IndexBackfiller *IndexBackfillPlanner
// ContentionRegistry is a node-level registry of contention events used for
// contention observability.
ContentionRegistry *contention.Registry
}
// Organization returns the value of cluster.organization.
func (cfg *ExecutorConfig) Organization() string {
return ClusterOrganization.Get(&cfg.Settings.SV)
}
// GetFeatureFlagMetrics returns the value of the FeatureFlagMetrics struct.
func (cfg *ExecutorConfig) GetFeatureFlagMetrics() *featureflag.DenialMetrics {
return cfg.FeatureFlagMetrics
}
// SV returns the setting values.
func (cfg *ExecutorConfig) SV() *settings.Values {
return &cfg.Settings.SV
}
var _ base.ModuleTestingKnobs = &ExecutorTestingKnobs{}
// ModuleTestingKnobs is part of the base.ModuleTestingKnobs interface.
func (*ExecutorTestingKnobs) ModuleTestingKnobs() {}
// StatementFilter is the type of callback that
// ExecutorTestingKnobs.StatementFilter takes.
type StatementFilter func(context.Context, string, error)
// ExecutorTestingKnobs is part of the context used to control parts of the
// system during testing.
type ExecutorTestingKnobs struct {
// StatementFilter can be used to trap execution of SQL statements and
// optionally change their results. The filter function is invoked after each
// statement has been executed.
StatementFilter StatementFilter
// BeforePrepare can be used to trap execution of SQL statement preparation.
// If a nil error is returned, planning continues as usual.
BeforePrepare func(ctx context.Context, stmt string, txn *kv.Txn) error
// BeforeExecute is called by the Executor before plan execution. It is useful
// for synchronizing statement execution.
BeforeExecute func(ctx context.Context, stmt string)
// AfterExecute is like StatementFilter, but it runs in the same goroutine of the
// statement.
AfterExecute func(ctx context.Context, stmt string, err error)
// AfterExecCmd is called after successful execution of any command.
AfterExecCmd func(ctx context.Context, cmd Command, buf *StmtBuf)
// DisableAutoCommit, if set, disables the auto-commit functionality of some
// SQL statements. That functionality allows some statements to commit
// directly when they're executed in an implicit SQL txn, without waiting for
// the Executor to commit the implicit txn.
// This has to be set in tests that need to abort such statements using a
// StatementFilter; otherwise, the statement commits immediately after
// execution so there'll be nothing left to abort by the time the filter runs.
DisableAutoCommit bool
// BeforeAutoCommit is called when the Executor is about to commit the KV
// transaction after running a statement in an implicit transaction, allowing
// tests to inject errors into that commit.
// If an error is returned, that error will be considered the result of
// txn.Commit(), and the txn.Commit() call will not actually be
// made. If no error is returned, txn.Commit() is called normally.
//
// Note that this is not called if the SQL statement representing the implicit
// transaction has committed the KV txn itself (e.g. if it used the 1-PC
// optimization). This is only called when the Executor is the one doing the
// committing.
BeforeAutoCommit func(ctx context.Context, stmt string) error
// DisableTempObjectsCleanupOnSessionExit disables cleaning up temporary schemas
// and tables when a session is closed.
DisableTempObjectsCleanupOnSessionExit bool
// TempObjectsCleanupCh replaces the time.Ticker.C channel used for scheduling
// a cleanup on every temp object in the cluster. If this is set, the job
// will now trigger when items come into this channel.
TempObjectsCleanupCh chan time.Time
// OnTempObjectsCleanupDone will trigger when the temporary objects cleanup
// job is done.
OnTempObjectsCleanupDone func()
// WithStatementTrace is called after the statement is executed in
// execStmtInOpenState.
WithStatementTrace func(trace tracing.Recording, stmt string)
// RunAfterSCJobsCacheLookup is called after the SchemaChangeJobCache is checked for
// a given table id.
RunAfterSCJobsCacheLookup func(*jobs.Job)
// TestingSaveFlows, if set, will be called with the given stmt. The resulting
// function will be called with the physical plan of that statement's main
// query (i.e. no subqueries). The physical plan is only safe for use for the
// lifetime of this function. Note that returning a nil function is
// unsupported and will lead to a panic.
TestingSaveFlows func(stmt string) func(map[roachpb.NodeID]*execinfrapb.FlowSpec) error
// DeterministicExplainAnalyze, if set, will result in overriding fields in
// EXPLAIN ANALYZE (PLAN) that can vary between runs (like elapsed times).
DeterministicExplainAnalyze bool
}
// PGWireTestingKnobs contains knobs for the pgwire module.
type PGWireTestingKnobs struct {
// CatchPanics causes the pgwire.conn to recover from panics in its execution
// thread and return them as errors to the client, closing the connection
// afterward.
CatchPanics bool
// AuthHook is used to override the normal authentication handling on new
// connections.
AuthHook func(context.Context) error
}
var _ base.ModuleTestingKnobs = &PGWireTestingKnobs{}
// ModuleTestingKnobs implements the base.ModuleTestingKnobs interface.
func (*PGWireTestingKnobs) ModuleTestingKnobs() {}
// TenantTestingKnobs contains knobs for tenant behavior.
type TenantTestingKnobs struct {
// ClusterSettingsUpdater is a field that if set, allows the tenant to set
// in-memory cluster settings. SQL tenants are otherwise prohibited from
// setting cluster settings.
ClusterSettingsUpdater settings.Updater
// TenantIDCodecOverride overrides the tenant ID used to construct the SQL
// server's codec, but nothing else (e.g. its certs). Used for testing.
TenantIDCodecOverride roachpb.TenantID
// IdleExitCountdownDuration is set will overwrite the default countdown
// duration of the countdown timer that leads to shutdown in case of no SQL
// connections.
IdleExitCountdownDuration time.Duration
}
var _ base.ModuleTestingKnobs = &TenantTestingKnobs{}
// ModuleTestingKnobs implements the base.ModuleTestingKnobs interface.
func (*TenantTestingKnobs) ModuleTestingKnobs() {}
// BackupRestoreTestingKnobs contains knobs for backup and restore behavior.
type BackupRestoreTestingKnobs struct {
// AllowImplicitAccess allows implicit access to data sources for non-admin
// users. This enables using nodelocal for testing BACKUP/RESTORE permissions.
AllowImplicitAccess bool
// CaptureResolvedTableDescSpans allows for intercepting the spans which are
// resolved during backup planning, and will eventually be backed up during
// execution.
CaptureResolvedTableDescSpans func([]roachpb.Span)
// RunAfterProcessingRestoreSpanEntry allows blocking the RESTORE job after a
// single RestoreSpanEntry has been processed and added to the SSTBatcher.