-
Notifications
You must be signed in to change notification settings - Fork 3.8k
/
mutation_builder_unique.go
668 lines (612 loc) · 26.4 KB
/
mutation_builder_unique.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package optbuilder
import (
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/isolation"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/cat"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/intsets"
)
// UniquenessChecksForGenRandomUUIDClusterMode controls the cluster setting for
// enabling uniqueness checks for UUID columns set to gen_random_uuid().
var UniquenessChecksForGenRandomUUIDClusterMode = settings.RegisterBoolSetting(
settings.ApplicationLevel,
"sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled",
"if enabled, uniqueness checks may be planned for mutations of UUID columns updated with"+
" gen_random_uuid(); otherwise, uniqueness is assumed due to near-zero collision probability",
false,
settings.WithPublic)
// buildUniqueChecksForInsert builds uniqueness check queries for an insert.
// These check queries are used to enforce UNIQUE WITHOUT INDEX constraints.
func (mb *mutationBuilder) buildUniqueChecksForInsert() {
// We only need to build unique checks if there is at least one unique
// constraint without an index.
if !mb.hasUniqueWithoutIndexConstraints() {
return
}
h := &mb.uniqueCheckHelper
buildFastPathCheck := true
for i, n := 0, mb.tab.UniqueCount(); i < n; i++ {
// If this constraint is already enforced by an index, we don't need to plan
// a check.
if !mb.tab.Unique(i).WithoutIndex() {
continue
}
// If this constraint is an arbiter of an INSERT ... ON CONFLICT ... DO
// NOTHING clause, we don't need to plan a check (ON CONFLICT ... DO UPDATE
// does not go through this code path; that's handled by
// buildUniqueChecksForUpsert).
if mb.uniqueConstraintIsArbiter(i) {
continue
}
if h.init(mb, i) {
uniqueChecksItem, fastPathUniqueChecksItem := h.buildInsertionCheck(buildFastPathCheck)
if fastPathUniqueChecksItem == nil {
// If we can't build one fast path check, don't build any of them into
// the expression tree.
buildFastPathCheck = false
mb.fastPathUniqueChecks = nil
} else {
mb.fastPathUniqueChecks = append(mb.fastPathUniqueChecks, *fastPathUniqueChecksItem)
}
mb.uniqueChecks = append(mb.uniqueChecks, uniqueChecksItem)
}
}
telemetry.Inc(sqltelemetry.UniqueChecksUseCounter)
}
// buildUniqueChecksForUpdate builds uniqueness check queries for an update.
// These check queries are used to enforce UNIQUE WITHOUT INDEX constraints.
func (mb *mutationBuilder) buildUniqueChecksForUpdate() {
// We only need to build unique checks if there is at least one unique
// constraint without an index.
if !mb.hasUniqueWithoutIndexConstraints() {
return
}
mb.ensureWithID()
h := &mb.uniqueCheckHelper
for i, n := 0, mb.tab.UniqueCount(); i < n; i++ {
// If this constraint is already enforced by an index, we don't need to plan
// a check.
if !mb.tab.Unique(i).WithoutIndex() {
continue
}
// If this constraint doesn't include the updated columns we don't need to
// plan a check.
if !mb.uniqueColsUpdated(i) {
continue
}
if h.init(mb, i) {
// The insertion check works for updates too since it simply checks that
// the unique columns in the newly inserted or updated rows do not match
// any existing rows. The check prevents rows from matching themselves by
// adding a filter based on the primary key.
uniqueCheckItems, _ := h.buildInsertionCheck(false /* buildFastPathCheck */)
mb.uniqueChecks = append(mb.uniqueChecks, uniqueCheckItems)
}
}
telemetry.Inc(sqltelemetry.UniqueChecksUseCounter)
}
// buildUniqueChecksForUpsert builds uniqueness check queries for an upsert.
// These check queries are used to enforce UNIQUE WITHOUT INDEX constraints.
func (mb *mutationBuilder) buildUniqueChecksForUpsert() {
// We only need to build unique checks if there is at least one unique
// constraint without an index.
if !mb.hasUniqueWithoutIndexConstraints() {
return
}
mb.ensureWithID()
h := &mb.uniqueCheckHelper
for i, n := 0, mb.tab.UniqueCount(); i < n; i++ {
// If this constraint is already enforced by an index, we don't need to plan
// a check.
if !mb.tab.Unique(i).WithoutIndex() {
continue
}
// If this constraint is an arbiter of an INSERT ... ON CONFLICT ... DO
// UPDATE clause and not updated by the DO UPDATE clause, we don't need to
// plan a check (ON CONFLICT ... DO NOTHING does not go through this code
// path; that's handled by buildUniqueChecksForInsert). Note that that if
// the constraint is partial and columns referenced in the predicate are
// updated, we'll still plan the check (this is handled correctly by
// mb.uniqueColsUpdated).
if mb.uniqueConstraintIsArbiter(i) && !mb.uniqueColsUpdated(i) {
continue
}
if h.init(mb, i) {
// The insertion check works for upserts too since it simply checks that
// the unique columns in the newly inserted or updated rows do not match
// any existing rows. The check prevents rows from matching themselves by
// adding a filter based on the primary key.
uniqueCheckItems, _ := h.buildInsertionCheck(false /* buildFastPathCheck */)
mb.uniqueChecks = append(mb.uniqueChecks, uniqueCheckItems)
}
}
telemetry.Inc(sqltelemetry.UniqueChecksUseCounter)
}
// hasUniqueWithoutIndexConstraints returns true if there are any
// UNIQUE WITHOUT INDEX constraints on the table.
func (mb *mutationBuilder) hasUniqueWithoutIndexConstraints() bool {
for i, n := 0, mb.tab.UniqueCount(); i < n; i++ {
if mb.tab.Unique(i).WithoutIndex() {
return true
}
}
return false
}
// uniqueColsUpdated returns true if any of the columns for a unique
// constraint are being updated (according to updateColIDs). When the unique
// constraint has a partial predicate, it also returns true if the predicate
// references any of the columns being updated.
func (mb *mutationBuilder) uniqueColsUpdated(uniqueOrdinal cat.UniqueOrdinal) bool {
uc := mb.tab.Unique(uniqueOrdinal)
for i, n := 0, uc.ColumnCount(); i < n; i++ {
if ord := uc.ColumnOrdinal(mb.tab, i); mb.updateColIDs[ord] != 0 {
return true
}
}
if _, isPartial := uc.Predicate(); isPartial {
pred := mb.parseUniqueConstraintPredicateExpr(uniqueOrdinal)
typedPred := mb.fetchScope.resolveAndRequireType(pred, types.Bool)
var predCols opt.ColSet
mb.b.buildScalar(typedPred, mb.fetchScope, nil, nil, &predCols)
for colID, ok := predCols.Next(0); ok; colID, ok = predCols.Next(colID + 1) {
ord := mb.md.ColumnMeta(colID).Table.ColumnOrdinal(colID)
if mb.updateColIDs[ord] != 0 {
return true
}
}
}
return false
}
// uniqueConstraintIsArbiter returns true if the given unique constraint is used
// as an arbiter to detect conflicts in an INSERT ... ON CONFLICT statement.
func (mb *mutationBuilder) uniqueConstraintIsArbiter(uniqueOrdinal int) bool {
return mb.arbiters.ContainsUniqueConstraint(uniqueOrdinal)
}
// uniqueCheckHelper is a type associated with a single unique constraint and
// is used to build the "leaves" of a unique check expression, namely the
// WithScan of the mutation input and the Scan of the table.
type uniqueCheckHelper struct {
mb *mutationBuilder
unique cat.UniqueConstraint
uniqueOrdinal int
// uniqueOrdinals are the table ordinals of the unique columns in the table
// that is being mutated. They correspond 1-to-1 to the columns in the
// UniqueConstraint.
uniqueOrdinals intsets.Fast
// primaryKeyOrdinals includes the ordinals from any primary key columns
// that are not included in uniqueOrdinals.
primaryKeyOrdinals intsets.Fast
// The scope and column ordinals of the scan that will serve as the right
// side of the semi join for the uniqueness checks.
scanScope *scope
scanOrdinals []int
}
// init initializes the helper with a unique constraint.
//
// Returns false if the constraint should be ignored (e.g. because the new
// values for the unique columns are known to be always NULL).
func (h *uniqueCheckHelper) init(mb *mutationBuilder, uniqueOrdinal int) bool {
// This initialization pattern ensures that fields are not unwittingly
// reused. Field reuse must be explicit.
*h = uniqueCheckHelper{
mb: mb,
unique: mb.tab.Unique(uniqueOrdinal),
uniqueOrdinal: uniqueOrdinal,
}
var uniqueOrds intsets.Fast
for i, n := 0, h.unique.ColumnCount(); i < n; i++ {
uniqueOrds.Add(h.unique.ColumnOrdinal(mb.tab, i))
}
// Find the primary key columns that are not part of the unique constraint.
// If there aren't any, we don't need a check.
// TODO(mgartner): We also don't need a check if there exists a unique index
// with columns that are a subset of the unique constraint columns.
// Similarly, we don't need a check for a partial unique constraint if there
// exists a non-partial unique constraint with columns that are a subset of
// the partial unique constraint columns.
primaryOrds := getIndexLaxKeyOrdinals(mb.tab.Index(cat.PrimaryIndex))
primaryOrds.DifferenceWith(uniqueOrds)
if primaryOrds.Empty() {
// The primary key columns are a subset of the unique columns; unique check
// not needed.
return false
}
h.uniqueOrdinals = uniqueOrds
h.primaryKeyOrdinals = primaryOrds
for tabOrd, ok := h.uniqueOrdinals.Next(0); ok; tabOrd, ok = h.uniqueOrdinals.Next(tabOrd + 1) {
colID := mb.mapToReturnColID(tabOrd)
// Check if we are setting NULL values for the unique columns, like when
// this mutation is the result of a SET NULL cascade action. If at least one
// unique column is getting a NULL value, unique check not needed.
if memo.OutputColumnIsAlwaysNull(mb.outScope.expr, colID) {
return false
}
// If one of the columns is a UUID (or UUID casted to STRING or BYTES) set
// to gen_random_uuid() and we don't require uniqueness checks for
// gen_random_uuid(), unique check not needed.
switch mb.md.ColumnMeta(colID).Type.Family() {
case types.UuidFamily, types.StringFamily, types.BytesFamily:
if columnIsGenRandomUUID(mb.outScope.expr, colID) {
requireCheck := UniquenessChecksForGenRandomUUIDClusterMode.Get(&mb.b.evalCtx.Settings.SV)
if !requireCheck {
return false
}
}
}
}
// Build the scan that will serve as the right side of the semi join in the
// uniqueness check. We need to build the scan now so that we can use its
// FDs below.
h.scanScope, h.scanOrdinals = h.buildTableScan()
// Check that the columns in the unique constraint aren't already known to
// form a lax key. This can happen if there is a unique index on a superset of
// these columns, where all other columns are computed columns that depend
// only on our columns. This is especially important for multi-region tables
// when the region column is computed.
//
// For example:
//
// CREATE TABLE tab (
// k INT PRIMARY KEY,
// region crdb_internal_region AS (
// CASE WHEN k < 10 THEN 'us-east1' ELSE 'us-west1' END
// ) STORED
// ) LOCALITY REGIONAL BY ROW AS region
//
// Because this is a REGIONAL BY ROW table, the region column is implicitly
// added to the front of every index, including the primary index. As a
// result, we would normally need to add a uniqueness check to all mutations
// to ensure that the primary key column (k in this case) remains unique.
// However, because the region column is computed and depends only on k, the
// presence of the unique index on (region, k) (i.e., the primary index) is
// sufficient to guarantee the uniqueness of k.
var uniqueCols opt.ColSet
h.uniqueOrdinals.ForEach(func(ord int) {
colID := h.scanScope.cols[ord].id
uniqueCols.Add(colID)
})
fds := &h.scanScope.expr.Relational().FuncDeps
return !fds.ColsAreLaxKey(uniqueCols)
}
// buildFiltersForFastPathCheck builds ANDed equality filters between the
// columns in the uniqueness check defined by h.uniqueOrdinals and scalar
// expressions present in a single Values row being inserted. It is expected
// that buildCheckInputScan has been called and has set up in
// uniqueCheckExpr the columns corresponding with the scalars in the
// insert row. buildCheckInputScan has either inlined the insert row as a Values
// expression, or embedded it within a WithScanExpr, in which case `h.mb.inputForInsertExpr`
// holds the input to the WithScanExpr. In the latter case, for a
// given table column ordinal `i` in `h.uniqueOrdinals`, instead of finding the
// matching scalar in the Values row via uniqueCheckCols[i],
// withScanExpr.InCols[i] holds the column ID to match on. scanExpr is
// the scan on the insert target table used on the right side of the semijoins
// in the non-fast-path uniqueness checks, with column ids matching h.scanScope.cols.
//
// The purpose of this function is to build filters representing a uniqueness
// check on a given insert row, which can be applied as a Select from a Scan,
// and optimized during exploration when all placeholders have been filled in.
// The goal is to find a constrained Scan of an index, which consumes all
// filters (meaning it could also be safely executed via a KV lookup in a fast
// path uniqueness check).
func (h *uniqueCheckHelper) buildFiltersForFastPathCheck(
uniqueCheckExpr memo.RelExpr, uniqueCheckCols []scopeColumn, scanExpr *memo.ScanExpr,
) (scanFilters memo.FiltersExpr) {
f := h.mb.b.factory
scanFilters = make(memo.FiltersExpr, 0, h.uniqueOrdinals.Len())
// It is expected that `inputForInsertExpr` contains the result of the most
// recent call to `buildInputForInsert`.
insertInputValues := h.mb.inputForInsertExpr
// Skip to the WithScan or Values.
for skipProjectExpr, ok := insertInputValues.(*memo.ProjectExpr); ok; skipProjectExpr, ok = insertInputValues.(*memo.ProjectExpr) {
insertInputValues = skipProjectExpr.Input
}
// If the source is a WithScan, we use InCols to find the matching value
// in the Values tuple. If the source is a Values expression, use the scope's
// column ID directly to find the desired scalar in the Values tuple.
withScanExpr, isWithScan := uniqueCheckExpr.(*memo.WithScanExpr)
valuesExpr, isValues := uniqueCheckExpr.(*memo.ValuesExpr)
// Find a Values expression either inlined, or embedded in a WithScanExpr.
if !isValues {
valuesExpr, isValues = insertInputValues.(*memo.ValuesExpr)
}
// valuesExpr may be sourced from a values expression, or a values expression
// nested in a WithScanExpr. The value of variable `isWithScan` determines
// how to find the desired field in the valuesExpr below.
// This currently only supports single-row insert. It may be possible to
// support multi-row insert here by generating a tuple IN expression or an
// ORed predicate, eg. (col1 = 1 AND col2 = 2) OR (col1 = 3 AND col2 = 4)...
if !isValues || len(valuesExpr.Rows) != 1 || valuesExpr.Rows[0].Op() != opt.TupleOp {
return nil
}
tupleExpr, _ := valuesExpr.Rows[0].(*memo.TupleExpr)
// Match either unique constraint table column ordinal with the corresponding
// scalar expression using either the columns defined in the uniqueCheckScope
// or the input columns to the WithScanExpr.
for i, ok := h.uniqueOrdinals.Next(0); ok; i, ok = h.uniqueOrdinals.Next(i + 1) {
uniqueCheckColID := opt.ColumnID(0)
if isWithScan {
// Sanity check the index is in bounds.
if i >= len(withScanExpr.InCols) {
return nil
}
uniqueCheckColID = withScanExpr.InCols[i]
} else {
uniqueCheckColID = uniqueCheckCols[i].id
}
found := false
var tupleScalarExpression opt.ScalarExpr
for tupleIndex, valuesColID := range valuesExpr.Cols {
if valuesColID == uniqueCheckColID {
found = true
tupleScalarExpression = tupleExpr.Elems[tupleIndex]
break
}
}
// If we can't build any part of the filters, need to give up on fast
// path.
if !found {
return nil
}
if !scanExpr.Cols.Contains(h.scanScope.cols[i].id) {
// Trying to build a predicate on a column added in the projection
// on top of the scan. This may be from an expression index such as:
// `UNIQUE INDEX ((col1 + 10))`
// This is currently not supported.
return nil
}
scanFilters = append(scanFilters, f.ConstructFiltersItem(
f.ConstructEq(
f.ConstructVariable(h.scanScope.cols[i].id),
tupleScalarExpression,
),
))
}
return scanFilters
}
// buildInsertionCheck creates a unique check for rows which are added to a
// table. The input to the insertion check will be produced from the input to
// the mutation operator. If buildFastPathCheck is true, a fast-path unique
// check for the insert is also built. A `UniqueChecksItem` can always be built,
// but if it is not possible to build a `FastPathUniqueChecksItem`, the second
// return value is nil.
func (h *uniqueCheckHelper) buildInsertionCheck(
buildFastPathCheck bool,
) (memo.UniqueChecksItem, *memo.FastPathUniqueChecksItem) {
f := h.mb.b.factory
// Build a self semi-join, with the new values on the left and the
// existing values on the right.
uniqueCheckScope, _ := h.mb.buildCheckInputScan(
checkInputScanNewVals, h.scanOrdinals, false, /* isFK */
)
// Do NOT build any expressions using uniqueCheckScope in between the call to
// buildCheckInputScan and the setting of uniqueCheckExpr and uniqueCheckCols.
uniqueCheckExpr := uniqueCheckScope.expr
uniqueCheckCols := uniqueCheckScope.cols
// Build the join filters:
// (new_a = existing_a) AND (new_b = existing_b) AND ...
//
// Set the capacity to h.uniqueOrdinals.Len()+1 since we'll have an equality
// condition for each column in the unique constraint, plus one additional
// condition to prevent rows from matching themselves (see below). If the
// constraint is partial, add 2 to account for filtering both the WithScan
// and the Scan by the partial unique constraint predicate.
numFilters := h.uniqueOrdinals.Len() + 1
_, isPartial := h.unique.Predicate()
if isPartial {
numFilters += 2
}
semiJoinFilters := make(memo.FiltersExpr, 0, numFilters)
for i, ok := h.uniqueOrdinals.Next(0); ok; i, ok = h.uniqueOrdinals.Next(i + 1) {
semiJoinFilters = append(semiJoinFilters, f.ConstructFiltersItem(
f.ConstructEq(
f.ConstructVariable(uniqueCheckScope.cols[i].id),
f.ConstructVariable(h.scanScope.cols[i].id),
),
))
}
// Find the ScanExpr which reads from the table this unique check applies to.
var uniqueFastPathCheck memo.RelExpr
var foundScan bool
var scanExpr *memo.ScanExpr
var scanFilters memo.FiltersExpr
if buildFastPathCheck {
possibleScan := h.scanScope.expr
// Projections may have been added such as for computed column expressions.
// Skip over these to find the Scan on the target table of the Insert.
// Fast path uniqueness checks on computed columns aren't supported
// currently. We just need to access the Scan defining regular columns.
if skipProjectExpr, ok := possibleScan.(*memo.ProjectExpr); ok {
possibleScan = skipProjectExpr.Input
}
scanExpr, foundScan = possibleScan.(*memo.ScanExpr)
// Fast path is disabled if this check is for a UNIQUE WITHOUT INDEX with a
// partial index predicate.
if foundScan && !isPartial {
scanFilters = h.buildFiltersForFastPathCheck(uniqueCheckExpr, uniqueCheckCols, scanExpr)
}
}
// If the unique constraint is partial, we need to filter out inserted rows
// that don't satisfy the predicate. We also need to make sure that rows do
// not match existing rows in the table that do not satisfy the
// predicate. So we add the predicate as a filter on both the WithScan
// columns and the Scan columns.
if isPartial {
pred := h.mb.parseUniqueConstraintPredicateExpr(h.uniqueOrdinal)
typedPred := uniqueCheckScope.resolveAndRequireType(pred, types.Bool)
withScanPred := h.mb.b.buildScalar(typedPred, uniqueCheckScope, nil, nil, nil)
semiJoinFilters = append(semiJoinFilters, f.ConstructFiltersItem(withScanPred))
typedPred = h.scanScope.resolveAndRequireType(pred, types.Bool)
scanPred := h.mb.b.buildScalar(typedPred, h.scanScope, nil, nil, nil)
semiJoinFilters = append(semiJoinFilters, f.ConstructFiltersItem(scanPred))
}
// We need to prevent rows from matching themselves in the semi join. We can
// do this by adding another filter that uses the primary keys to check if
// two rows are identical:
// (new_pk1 != existing_pk1) OR (new_pk2 != existing_pk2) OR ...
var pkFilter opt.ScalarExpr
for i, ok := h.primaryKeyOrdinals.Next(0); ok; i, ok = h.primaryKeyOrdinals.Next(i + 1) {
pkFilterLocal := f.ConstructNe(
f.ConstructVariable(uniqueCheckScope.cols[i].id),
f.ConstructVariable(h.scanScope.cols[i].id),
)
if pkFilter == nil {
pkFilter = pkFilterLocal
} else {
pkFilter = f.ConstructOr(pkFilter, pkFilterLocal)
}
}
semiJoinFilters = append(semiJoinFilters, f.ConstructFiltersItem(pkFilter))
joinPrivate := memo.EmptyJoinPrivate
// If we're using a weaker isolation level, the semi-joined scan needs to
// obtain predicate locks. We must use a lookup semi-join for predicate locks
// to work.
if h.mb.b.evalCtx.TxnIsoLevel != isolation.Serializable {
joinPrivate = &memo.JoinPrivate{
Flags: memo.PreferLookupJoinIntoRight,
}
}
semiJoin := f.ConstructSemiJoin(uniqueCheckScope.expr, h.scanScope.expr, semiJoinFilters, joinPrivate)
// Collect the key columns that will be shown in the error message if there
// is a duplicate key violation resulting from this uniqueness check.
keyCols := make(opt.ColList, 0, h.uniqueOrdinals.Len())
for i, ok := h.uniqueOrdinals.Next(0); ok; i, ok = h.uniqueOrdinals.Next(i + 1) {
keyCols = append(keyCols, uniqueCheckScope.cols[i].id)
}
// Create a Project that passes-through only the key columns. This allows
// normalization rules to prune any unnecessary columns from the expression.
// The key columns are always needed in order to display the constraint
// violation error.
project := f.ConstructProject(semiJoin, nil /* projections */, keyCols.ToSet())
// Build a SelectExpr which can be optimized in the explore phase and used
// to build information needed to perform the fast path uniqueness check.
// The goal is for the Select to be rewritten into a constrained scan on
// an index which applies all filters. If no such scans are found, insert
// fast path cannot be applied.
if foundScan && len(scanFilters) != 0 {
newScanScope, _ := h.buildTableScan()
newPossibleScan := newScanScope.expr
if skipProjectExpr, ok := newPossibleScan.(*memo.ProjectExpr); ok {
newPossibleScan = skipProjectExpr.Input
}
if newScanExpr, ok := newPossibleScan.(*memo.ScanExpr); ok {
newScanPrivate := &newScanExpr.ScanPrivate
newFilters := f.CustomFuncs().RemapScanColsInFilter(scanFilters, &scanExpr.ScanPrivate, newScanPrivate)
uniqueFastPathCheck = f.ConstructSelect(newScanExpr, newFilters)
} else {
// Don't build a fast-path check if we failed to create a new ScanExpr.
buildFastPathCheck = false
}
} else if buildFastPathCheck {
// Don't build a fast-path check if we failed to build a ScanExpr with
// filters on all unique check columns.
buildFastPathCheck = false
}
uniqueChecks := f.ConstructUniqueChecksItem(project, &memo.UniqueChecksItemPrivate{
Table: h.mb.tabID,
CheckOrdinal: h.uniqueOrdinal,
KeyCols: keyCols,
})
if !buildFastPathCheck {
return uniqueChecks, nil
}
fastPathChecks := f.ConstructFastPathUniqueChecksItem(uniqueFastPathCheck, &memo.FastPathUniqueChecksItemPrivate{ReferencedTableID: h.mb.tabID, CheckOrdinal: h.uniqueOrdinal})
return uniqueChecks, &fastPathChecks
}
// buildTableScan builds a Scan of the table. The ordinals of the columns
// scanned are also returned.
func (h *uniqueCheckHelper) buildTableScan() (outScope *scope, ordinals []int) {
tabMeta := h.mb.b.addTable(h.mb.tab, tree.NewUnqualifiedTableName(h.mb.tab.Name()))
ordinals = tableOrdinals(tabMeta.Table, columnKinds{
includeMutations: false,
includeSystem: false,
includeInverted: false,
})
locking := noRowLocking
// If we're using a weaker isolation level, we lock the checked predicate(s)
// to prevent concurrent inserts from other transactions from violating the
// unique constraint.
if h.mb.b.evalCtx.TxnIsoLevel != isolation.Serializable {
locking = lockingSpec{
&tree.LockingItem{
// TODO(michae2): Change this to ForKeyShare when it is supported.
Strength: tree.ForShare,
Targets: []tree.TableName{tree.MakeUnqualifiedTableName(h.mb.tab.Name())},
WaitPolicy: tree.LockWaitBlock,
// Unique checks must ensure the non-existence of certain rows, so we
// use predicate locks instead of record locks to prevent insertion of
// new rows into the locked span(s) by other concurrent transactions.
Form: tree.LockPredicate,
},
}
}
return h.mb.b.buildScan(
tabMeta,
ordinals,
// After the update we can't guarantee that the constraints are unique
// (which is why we need the uniqueness checks in the first place).
&tree.IndexFlags{IgnoreUniqueWithoutIndexKeys: true},
locking,
h.mb.b.allocScope(),
true, /* disableNotVisibleIndex */
), ordinals
}
// columnIsGenRandomUUID returns true if the expression returns the function
// gen_random_uuid() for the given column.
func columnIsGenRandomUUID(e memo.RelExpr, col opt.ColumnID) bool {
isGenRandomUUIDFunction := func(scalar opt.ScalarExpr) bool {
if cast, ok := scalar.(*memo.CastExpr); ok &&
(cast.Typ.Family() == types.StringFamily || cast.Typ.Family() == types.BytesFamily) &&
cast.Typ.Width() == 0 {
scalar = cast.Input
} else if cast, ok := scalar.(*memo.AssignmentCastExpr); ok &&
(cast.Typ.Family() == types.StringFamily || cast.Typ.Family() == types.BytesFamily) &&
cast.Typ.Width() == 0 {
scalar = cast.Input
}
if function, ok := scalar.(*memo.FunctionExpr); ok {
if function.Name == "gen_random_uuid" {
return true
}
}
return false
}
switch e.Op() {
case opt.ProjectOp:
p := e.(*memo.ProjectExpr)
if p.Passthrough.Contains(col) {
return columnIsGenRandomUUID(p.Input, col)
}
for i := range p.Projections {
if p.Projections[i].Col == col {
return isGenRandomUUIDFunction(p.Projections[i].Element)
}
}
case opt.ValuesOp:
v := e.(*memo.ValuesExpr)
colOrdinal, ok := v.Cols.Find(col)
if !ok {
return false
}
for i := range v.Rows {
if !isGenRandomUUIDFunction(v.Rows[i].(*memo.TupleExpr).Elems[colOrdinal]) {
return false
}
}
return true
}
return false
}