Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

opt: fix FK cascades to child tables with partial indexes #57100

Merged
merged 1 commit into from
Dec 1, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
89 changes: 89 additions & 0 deletions pkg/sql/logictest/testdata/logic_test/partial_index
Original file line number Diff line number Diff line change
Expand Up @@ -1426,6 +1426,7 @@ SELECT k FROM t55387 WHERE a > 1 AND b > 3

# Regression test for #55672. Do not build partial index predicates when the
# scope does not include all table columns, like FK check scans.
subtest regression_55672

statement ok
CREATE TABLE t55672_a (
Expand All @@ -1451,3 +1452,91 @@ INSERT INTO t55672_a (a, t) VALUES (2, now())

statement ok
INSERT INTO t55672_b (b,a) VALUES (2,2)

# Regression test for #57085. Cascading UPDATEs should correctly update partial
# indexes of the child table.
subtest regression_57085

# Update a partial index in a child table.
statement ok
CREATE TABLE t57085_p1 (
p INT PRIMARY KEY
);
CREATE TABLE t57085_c1 (
c INT PRIMARY KEY,
p INT REFERENCES t57085_p1 ON UPDATE CASCADE,
i INT,
INDEX idx (p) WHERE i > 0
);

statement ok
INSERT INTO t57085_p1 VALUES (1);
INSERT INTO t57085_c1 VALUES (10, 1, 100), (20, 1, -100);
UPDATE t57085_p1 SET p = 2 WHERE p = 1;

query III rowsort
SELECT c, p, i FROM t57085_c1@idx WHERE p = 2 AND i > 0
----
10 2 100

# Update a partial index in a child table with a single variable boolean
# predicate.
statement ok
CREATE TABLE t57085_p2 (
p INT PRIMARY KEY
);
CREATE TABLE t57085_c2 (
c INT PRIMARY KEY,
p INT REFERENCES t57085_p2 ON UPDATE CASCADE,
b BOOL,
INDEX idx (p) WHERE b
);

statement ok
INSERT INTO t57085_p2 VALUES (1);
INSERT INTO t57085_c2 VALUES (10, 1, true), (20, 1, false);
UPDATE t57085_p2 SET p = 2 WHERE p = 1;

query IIB rowsort
SELECT c, p, b FROM t57085_c2@idx WHERE p = 2 AND b
----
10 2 true

# Update the parent with an INSERT ON CONFLICT DO UPDATE.
statement ok
INSERT INTO t57085_p2 VALUES (2) ON CONFLICT (p) DO UPDATE SET p = 3

query IIB rowsort
SELECT c, p, b FROM t57085_c2@idx WHERE p = 3 AND b
----
10 3 true

# Update a partial index that references the column being updated in the
# cascade.
statement ok
CREATE TABLE t57085_p3 (
p INT PRIMARY KEY
);
CREATE TABLE t57085_c3 (
c INT PRIMARY KEY,
p INT REFERENCES t57085_p3 ON UPDATE CASCADE,
i INT,
INDEX idx (i) WHERE p = 3
);

statement ok
INSERT INTO t57085_p3 VALUES (1), (2);
INSERT INTO t57085_c3 VALUES (10, 1, 100), (20, 2, 200);
UPDATE t57085_p3 SET p = 3 WHERE p = 1;

query III rowsort
SELECT c, p, i FROM t57085_c3@idx WHERE p = 3 AND i = 100
----
10 3 100

statement ok
UPDATE t57085_p3 SET p = 4 WHERE p = 3;

query III rowsort
SELECT c, p, i FROM t57085_c3@idx WHERE p = 3 AND i = 100
----
2 changes: 2 additions & 0 deletions pkg/sql/opt/exec/execbuilder/mutation.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@ func (b *Builder) buildMutationInput(
return execPlan{}, err
}

// TODO(mgartner/radu): This can incorrectly append columns in a FK cascade
// update that are never used during execution. See issue #57097.
if p.WithID != 0 {
// The input might have extra columns that are used only by FK or unique
// checks; make sure we don't project them away.
Expand Down
28 changes: 28 additions & 0 deletions pkg/sql/opt/exec/execbuilder/testdata/partial_index
Original file line number Diff line number Diff line change
Expand Up @@ -939,3 +939,31 @@ vectorized: true
missing stats
table: inv@i (partial index)
spans: 1 span

# Regression test for #57085. Cascading DELETEs should not issue DEL operations
# for partial indexes of a child table when the deleted row was not in the
# partial index.
statement ok
CREATE TABLE t57085_p (
p INT PRIMARY KEY
);
CREATE TABLE t57085_c (
c INT PRIMARY KEY,
p INT REFERENCES t57085_p ON DELETE CASCADE,
b BOOL,
INDEX idx (p) WHERE b,
FAMILY (c, p, b)
);

statement ok
INSERT INTO t57085_p VALUES (1), (2);
INSERT INTO t57085_c VALUES (10, 1, true), (20, 1, false), (30, 2, true);

query T kvtrace
DELETE FROM t57085_p WHERE p = 1;
----
DelRange /Table/56/1/1 - /Table/56/1/1/#
Scan /Table/57/{1-2}
Del /Table/57/2/1/10/0
Del /Table/57/1/10/0
Del /Table/57/1/20/0
18 changes: 9 additions & 9 deletions pkg/sql/opt/norm/testdata/rules/prune_cols
Original file line number Diff line number Diff line change
Expand Up @@ -1946,23 +1946,23 @@ update partial_indexes
├── columns: <none>
├── fetch columns: a:5 b:6 c:7
├── update-mapping:
│ └── b_new:10 => b:2
├── partial index put columns: partial_index_put1:11
├── partial index del columns: partial_index_del1:9
│ └── b_new:9 => b:2
├── partial index put columns: partial_index_put1:10
├── partial index del columns: partial_index_del1:11
├── cardinality: [0 - 0]
├── volatile, mutations
└── project
├── columns: partial_index_put1:11 a:5!null b:6 c:7 partial_index_del1:9 b_new:10
├── columns: partial_index_put1:10 partial_index_del1:11 a:5!null b:6 c:7 b_new:9
├── cardinality: [0 - 1]
├── immutable
├── key: ()
├── fd: ()-->(5-7,9-11)
├── project
│ ├── columns: b_new:10 partial_index_del1:9 a:5!null b:6 c:7
│ ├── columns: b_new:9 a:5!null b:6 c:7
│ ├── cardinality: [0 - 1]
│ ├── immutable
│ ├── key: ()
│ ├── fd: ()-->(5-7,9,10)
│ ├── fd: ()-->(5-7,9)
│ ├── select
│ │ ├── columns: a:5!null b:6 c:7
│ │ ├── cardinality: [0 - 1]
Expand All @@ -1978,10 +1978,10 @@ update partial_indexes
│ │ └── filters
│ │ └── a:5 = 1 [outer=(5), constraints=(/5: [/1 - /1]; tight), fd=()-->(5)]
│ └── projections
│ ├── b:6 + 1 [as=b_new:10, outer=(6), immutable]
│ └── b:6 > 1 [as=partial_index_del1:9, outer=(6)]
│ └── b:6 + 1 [as=b_new:9, outer=(6), immutable]
└── projections
└── b_new:10 > 1 [as=partial_index_put1:11, outer=(10)]
├── b_new:9 > 1 [as=partial_index_put1:10, outer=(9)]
└── b:6 > 1 [as=partial_index_del1:11, outer=(6)]

# Prune secondary family column not needed for the update.
norm expect=(PruneMutationFetchCols,PruneMutationInputCols)
Expand Down
3 changes: 3 additions & 0 deletions pkg/sql/opt/optbuilder/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,9 @@ func (b *Builder) buildDelete(del *tree.Delete, inScope *scope) (outScope *scope
func (mb *mutationBuilder) buildDelete(returning tree.ReturningExprs) {
mb.buildFKChecksAndCascadesForDelete()

// Project partial index DEL boolean columns.
mb.projectPartialIndexDelCols(mb.fetchScope)

private := mb.makeMutationPrivate(returning != nil)
mb.outScope.expr = mb.b.factory.ConstructDelete(
mb.outScope.expr, mb.uniqueChecks, mb.fkChecks, private,
Expand Down
19 changes: 16 additions & 3 deletions pkg/sql/opt/optbuilder/fk_cascade.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,9 +95,14 @@ func (cb *onDeleteCascadeBuilder) Build(
mb.init(b, "delete", cb.childTable, tree.MakeUnqualifiedTableName(cb.childTable.Name()))

// Build a semi join of the table with the mutation input.
mb.outScope = b.buildDeleteCascadeMutationInput(
//
// The scope returned by buildDeleteCascadeMutationInput has one column
// for each public table column, making it appropriate to set it as
// mb.fetchScope.
mb.fetchScope = b.buildDeleteCascadeMutationInput(
cb.childTable, &mb.alias, fk, binding, bindingProps, oldValues,
)
mb.outScope = mb.fetchScope

// Set list of columns that will be fetched by the input expression.
mb.setFetchColIDs(mb.outScope.cols)
Expand Down Expand Up @@ -272,7 +277,7 @@ func (cb *onDeleteFastCascadeBuilder) Build(

// Build the input to the delete mutation, which is simply a Scan with a
// Select on top.
mb.outScope = b.buildScan(
mb.fetchScope = b.buildScan(
b.addTable(cb.childTable, &mb.alias),
tableOrdinals(cb.childTable, columnKinds{
includeMutations: false,
Expand All @@ -284,6 +289,7 @@ func (cb *onDeleteFastCascadeBuilder) Build(
noRowLocking,
b.allocScope(),
)
mb.outScope = mb.fetchScope

var filters memo.FiltersExpr

Expand Down Expand Up @@ -420,9 +426,14 @@ func (cb *onDeleteSetBuilder) Build(
mb.init(b, "update", cb.childTable, tree.MakeUnqualifiedTableName(cb.childTable.Name()))

// Build a semi join of the table with the mutation input.
mb.outScope = b.buildDeleteCascadeMutationInput(
//
// The scope returned by buildDeleteCascadeMutationInput has one column
// for each public table column, making it appropriate to set it as
// mb.fetchScope.
mb.fetchScope = b.buildDeleteCascadeMutationInput(
cb.childTable, &mb.alias, fk, binding, bindingProps, oldValues,
)
mb.outScope = mb.fetchScope

// Set list of columns that will be fetched by the input expression.
mb.setFetchColIDs(mb.outScope.cols)
Expand Down Expand Up @@ -639,6 +650,8 @@ func (cb *onUpdateCascadeBuilder) Build(
numFKCols := fk.ColumnCount()
tableScopeCols := mb.outScope.cols[:len(mb.outScope.cols)-2*numFKCols]
newValScopeCols := mb.outScope.cols[len(mb.outScope.cols)-numFKCols:]
mb.fetchScope = b.allocScope()
mb.fetchScope.appendColumns(tableScopeCols)

// Set list of columns that will be fetched by the input expression.
mb.setFetchColIDs(tableScopeCols)
Expand Down
56 changes: 23 additions & 33 deletions pkg/sql/opt/optbuilder/insert.go
Original file line number Diff line number Diff line change
Expand Up @@ -649,7 +649,7 @@ func (mb *mutationBuilder) buildInsert(returning tree.ReturningExprs) {
// Add any check constraint boolean columns to the input.
mb.addCheckConstraintCols()

// Add any partial index put boolean columns to the input.
// Project partial index PUT boolean columns.
mb.projectPartialIndexPutCols(preCheckScope)

mb.buildUniqueChecksForInsert()
Expand Down Expand Up @@ -905,7 +905,7 @@ func (mb *mutationBuilder) buildInputForUpsert(
// NOTE: Include mutation columns, but be careful to never use them for any
// reason other than as "fetch columns". See buildScan comment.
// TODO(andyk): Why does execution engine need mutation columns for Insert?
fetchScope := mb.b.buildScan(
mb.fetchScope = mb.b.buildScan(
mb.b.addTable(mb.tab, &mb.alias),
tableOrdinals(mb.tab, columnKinds{
includeMutations: true,
Expand All @@ -923,40 +923,40 @@ func (mb *mutationBuilder) buildInputForUpsert(
// the scan on the right side of the left outer join with the partial index
// predicate expression as the filter.
if isPartial {
texpr := fetchScope.resolveAndRequireType(predExpr, types.Bool)
predScalar := mb.b.buildScalar(texpr, fetchScope, nil, nil, nil)
fetchScope.expr = mb.b.factory.ConstructSelect(
fetchScope.expr,
texpr := mb.fetchScope.resolveAndRequireType(predExpr, types.Bool)
predScalar := mb.b.buildScalar(texpr, mb.fetchScope, nil, nil, nil)
mb.fetchScope.expr = mb.b.factory.ConstructSelect(
mb.fetchScope.expr,
memo.FiltersExpr{mb.b.factory.ConstructFiltersItem(predScalar)},
)
}

// Record a not-null "canary" column. After the left-join, this will be null
// if no conflict has been detected, or not null otherwise. At least one not-
// null column must exist, since primary key columns are not-null.
canaryScopeCol := &fetchScope.cols[findNotNullIndexCol(index)]
canaryScopeCol := &mb.fetchScope.cols[findNotNullIndexCol(index)]
mb.canaryColID = canaryScopeCol.id

// Set fetchColIDs to reference the columns created for the fetch values.
mb.setFetchColIDs(fetchScope.cols)
mb.setFetchColIDs(mb.fetchScope.cols)

// Add the fetch columns to the current scope. It's OK to modify the current
// scope because it contains only INSERT columns that were added by the
// mutationBuilder, and which aren't needed for any other purpose.
mb.outScope.appendColumnsFromScope(fetchScope)
mb.outScope.appendColumnsFromScope(mb.fetchScope)

// Build the join condition by creating a conjunction of equality conditions
// that test each conflict column:
//
// ON ins.x = scan.a AND ins.y = scan.b
//
var on memo.FiltersExpr
for i := range fetchScope.cols {
for i := range mb.fetchScope.cols {
// Include fetch columns with ordinal positions in conflictOrds.
if conflictOrds.Contains(i) {
condition := mb.b.factory.ConstructEq(
mb.b.factory.ConstructVariable(mb.insertColIDs[i]),
mb.b.factory.ConstructVariable(fetchScope.cols[i].id),
mb.b.factory.ConstructVariable(mb.fetchScope.cols[i].id),
)
on = append(on, mb.b.factory.ConstructFiltersItem(condition))
}
Expand All @@ -975,7 +975,7 @@ func (mb *mutationBuilder) buildInputForUpsert(
// Construct the left join.
mb.outScope.expr = mb.b.factory.ConstructLeftJoin(
mb.outScope.expr,
fetchScope.expr,
mb.fetchScope.expr,
on,
memo.EmptyJoinPrivate,
)
Expand All @@ -998,9 +998,6 @@ func (mb *mutationBuilder) buildInputForUpsert(

mb.targetColList = make(opt.ColList, 0, mb.tab.ColumnCount())
mb.targetColSet = opt.ColSet{}

// Add any partial index del boolean columns to the input for UPSERTs.
mb.projectPartialIndexDelCols(fetchScope)
}

// setUpsertCols sets the list of columns to be updated in case of conflict.
Expand Down Expand Up @@ -1070,25 +1067,18 @@ func (mb *mutationBuilder) buildUpsert(returning tree.ReturningExprs) {
// Add any check constraint boolean columns to the input.
mb.addCheckConstraintCols()

// Add any partial index put boolean columns. The variables in these partial
// index predicates must resolve to the new column values of the row which
// are either the existing values of the columns or new values provided in
// the upsert. Therefore, the variables must resolve to the upsert CASE
// expression columns, so the project must be added after the upsert columns
// are.
//
// For example, consider the table and upsert:
//
// CREATE TABLE t (a INT PRIMARY KEY, b INT, INDEX (b) WHERE a > 1)
// INSERT INTO t (a, b) VALUES (1, 2) ON CONFLICT (a) DO UPDATE a = t.a + 1
// Project partial index PUT and DEL boolean columns.
//
// An entry in the partial index should only be added when a > 1. The
// resulting value of a is dependent on whether or not there is a conflict.
// In the case of no conflict, the (1, 2) is inserted into the table, and no
// partial index entry should be added. But if there is a conflict, The
// existing row where a = 1 has a incremented to 2, and an entry should be
// added to the partial index.
mb.projectPartialIndexPutCols(preCheckScope)
// In some cases existing rows may not be fetched for an UPSERT (see
// mutationBuilder.needExistingRows for more details). In theses cases
// there is no need to project partial index DEL columns and
// mb.fetchScope will be nil. Therefore, we only project partial index
// PUT columns.
if mb.needExistingRows() {
mb.projectPartialIndexPutAndDelCols(preCheckScope, mb.fetchScope)
} else {
mb.projectPartialIndexPutCols(preCheckScope)
}

mb.buildFKChecksForUpsert()

Expand Down
Loading