Skip to content

Commit

Permalink
Merge #50662 #50742
Browse files Browse the repository at this point in the history
50662: sql: disable arrays in forward indexes r=mjibson a=mjibson

There are some open questions about inverted index types being used in
forward indexes. Disable those for now.

Fixes #50656
See #50659
See #17154

Release note (sql change): disable arrays in non-inverted indexes.

50742: sql: granular detection of non-immutable constant casts r=RaduBerinde a=RaduBerinde

#### opt: improve tests for parsing timestamps

Add more `sem/tree` unit tests for parsing time-related types. In
particular:
 - relative timestamps ('now', 'tomorrow')
 - TZ variants
 - timestamps that include timezones for DDate, DTime
 - cases where the result depends on the current time and/or timezone.

In a future change, the parsing functions will also return a flag
indicating if the result depends on the context or not; these tests
will be updated to check that flag as well.

Release note: None

#### pgdate: add WithoutTimezone variants

The way we parse DTimestamp (and other non-TZ variants) is convoluted:
we initially parse it just like a DTimestampTZ, and then we strip away
the timezone (adjusting it so the timestamp looks the same as before,
except with UTC location). This means that we incorporate the local
timezone, even though it is inconsequential.

This change adds WithoutTimezone API variants to `pgdate` which avoids
using the local timezone. This will allow extending the parsing APIs
to indicate whether the result depends on the current time or
timezone.

Release note: None

#### sql: use dummy ParseTimeContext during type-checking

We are not supposed to use any context-dependent parsing results
during type-checking. As such, use a dummy ParseTimeContext instead of
having SemaContext implement the interface.

Release note: None

#### sql: granular detection of non-immutable constant casts

Interpreting a timestamp (and similar types) can depend on the current
timezone or even the current time. A recent change prevented these
casts from happening during type-checking.

Unfortunately, this includes a lot of common cases which aren't
actually context-dependent. The most important are dates and
timestamps without time zone (except rare cases like 'now' or
'tomorrow'). In these cases, the type-checked expressions won't seem
constant and won't be able to be used for partitioning, index
predicates, etc.

This change improves the parsing functions to return a
`dependsOnContext` boolean. When this flag is false, the result is
immutable and is replaced during type-checking.

The volatility of the version of date_trunc that returns a TIMESTAMPTZ
is corrected to Stable.

Release note: None

Co-authored-by: Matt Jibson <[email protected]>
Co-authored-by: Radu Berinde <[email protected]>
  • Loading branch information
3 people committed Jun 29, 2020
3 parents dbb5ad1 + cd1318a + 557e9db commit 0672a96
Show file tree
Hide file tree
Showing 44 changed files with 1,019 additions and 421 deletions.
3 changes: 2 additions & 1 deletion pkg/ccl/changefeedccl/avro.go
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,8 @@ func columnDescToAvroSchema(colDesc *sqlbase.ColumnDescriptor) (*avroSchemaField
return d.(*tree.DTimeTZ).TimeTZ.String(), nil
}
schema.decodeFn = func(x interface{}) (tree.Datum, error) {
return tree.ParseDTimeTZ(nil, x.(string), time.Microsecond)
d, _, err := tree.ParseDTimeTZ(nil, x.(string), time.Microsecond)
return d, err
}
case types.TimestampFamily:
avroType = avroLogicalType{
Expand Down
4 changes: 2 additions & 2 deletions pkg/ccl/importccl/read_import_mysql_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -228,14 +228,14 @@ func TestMysqlValueToDatum(t *testing.T) {
defer leaktest.AfterTest(t)()

date := func(s string) tree.Datum {
d, err := tree.ParseDDate(nil, s)
d, _, err := tree.ParseDDate(nil, s)
if err != nil {
t.Fatal(err)
}
return d
}
ts := func(s string) tree.Datum {
d, err := tree.ParseDTimestamp(nil, s, time.Microsecond)
d, _, err := tree.ParseDTimestamp(nil, s, time.Microsecond)
if err != nil {
t.Fatal(err)
}
Expand Down
9 changes: 5 additions & 4 deletions pkg/cli/dump.go
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ WHERE
if enumMembersS != nil {
// The driver sends back arrays as bytes, so we have to parse the array
// if we want to access its elements.
arr, err := tree.ParseDArrayFromString(
arr, _, err := tree.ParseDArrayFromString(
tree.NewTestingEvalContext(serverCfg.Settings), string(enumMembersS), types.String)
if err != nil {
return nil, err
Expand Down Expand Up @@ -439,7 +439,7 @@ func getAsOf(conn *sqlConn, asOf string) (string, error) {
clusterTS = string(vals[0].([]byte))
} else {
// Validate the timestamp. This prevents SQL injection.
if _, err := tree.ParseDTimestamp(nil, asOf, time.Nanosecond); err != nil {
if _, _, err := tree.ParseDTimestamp(nil, asOf, time.Nanosecond); err != nil {
return "", err
}
clusterTS = asOf
Expand Down Expand Up @@ -601,7 +601,8 @@ func extractArray(val interface{}) ([]string, error) {
if !ok {
return nil, fmt.Errorf("unexpected value: %T", b)
}
arr, err := tree.ParseDArrayFromString(tree.NewTestingEvalContext(serverCfg.Settings), string(b), types.String)
evalCtx := tree.NewTestingEvalContext(serverCfg.Settings)
arr, _, err := tree.ParseDArrayFromString(evalCtx, string(b), types.String)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -932,7 +933,7 @@ func dumpTableData(
}
case types.ArrayFamily:
// We can only observe ARRAY types by their [] suffix.
d, err = tree.ParseDArrayFromString(
d, _, err = tree.ParseDArrayFromString(
tree.NewTestingEvalContext(serverCfg.Settings), string(t), ct.ArrayContents())
if err != nil {
return err
Expand Down
2 changes: 1 addition & 1 deletion pkg/internal/sqlsmith/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ FROM
// If enum members were provided, parse the result into a string array.
var members []string
if len(membersRaw) != 0 {
arr, err := tree.ParseDArrayFromString(&evalCtx, string(membersRaw), types.String)
arr, _, err := tree.ParseDArrayFromString(&evalCtx, string(membersRaw), types.String)
if err != nil {
return nil, err
}
Expand Down
1 change: 0 additions & 1 deletion pkg/sql/conn_executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -2059,7 +2059,6 @@ func (ex *connExecutor) resetPlanner(
p.cancelChecker.Reset(ctx)

p.semaCtx = tree.MakeSemaContext()
p.semaCtx.Location = &ex.sessionData.DataConversion.Location
p.semaCtx.SearchPath = ex.sessionData.SearchPath
p.semaCtx.AsOfTimestamp = nil
p.semaCtx.Annotations = nil
Expand Down
114 changes: 68 additions & 46 deletions pkg/sql/logictest/testdata/logic_test/array
Original file line number Diff line number Diff line change
Expand Up @@ -1333,11 +1333,18 @@ SELECT x, y FROM t ORDER BY (x, y)

subtest array_indexes

# Create indexes on arrays.
# TODO(mjibson): Fix #50659 and revert all removed tests from #50622.

# Indexes not yet supported for arrays, see #50659.
statement ok
DROP TABLE IF EXISTS t;
DROP TABLE IF EXISTS t

statement error column x is of type int\[\] and thus is not indexable
CREATE TABLE t (x INT[] PRIMARY KEY)

statement ok
CREATE TABLE t (x INT[])

statement ok
INSERT INTO t VALUES
(ARRAY[1]),
Expand All @@ -1350,8 +1357,9 @@ INSERT INTO t VALUES
(ARRAY[NULL, NULL, NULL])

# Test that the unique index rejects bad inserts.
statement error pq: duplicate key value \(x\)=\(ARRAY\[1,NULL,10\]\) violates unique constraint "primary"
INSERT INTO t VALUES (ARRAY[1, NULL, 10])
# Disabled until #50659 is resolved.
#statement error pq: duplicate key value \(x\)=\(ARRAY\[1,NULL,10\]\) violates unique constraint "primary"
#INSERT INTO t VALUES (ARRAY[1, NULL, 10])

query T
SELECT x FROM t ORDER BY x
Expand Down Expand Up @@ -1413,18 +1421,20 @@ SELECT x FROM t WHERE x > ARRAY[NULL, NULL]:::INT[] ORDER BY x
{5}

# Test some operations on a descending index.
statement ok
statement error column x is of type int\[\] and thus is not indexable
CREATE INDEX i ON t(x DESC)

# Add "t@i" index annotation once #50659 is fixed.
query T
SELECT x FROM t@i WHERE x <= ARRAY[1] ORDER BY x DESC
SELECT x FROM t WHERE x <= ARRAY[1] ORDER BY x DESC
----
{1}
{NULL,NULL,NULL}
{NULL}

# Add "t@i" index annotation once #50659 is fixed.
query T
SELECT x FROM t@i WHERE x > ARRAY[1] ORDER BY x
SELECT x FROM t WHERE x > ARRAY[1] ORDER BY x
----
{1,NULL,10}
{1,4,5}
Expand Down Expand Up @@ -1470,13 +1480,15 @@ SELECT x FROM t ORDER BY x DESC
{NULL,NULL,NULL}
{NULL}

# Enable index creation once #50659 is fixed.
statement ok
CREATE INDEX i ON t (x);
--CREATE INDEX i ON t (x);
INSERT INTO t VALUES (NULL), (NULL)

# Test that NULL's are differentiated from {NULL}.
# Add "t@i" index annotation once #50659 is fixed.
query T
SELECT x FROM t@i WHERE x IS NOT NULL ORDER BY x
SELECT x FROM t WHERE x IS NOT NULL ORDER BY x
----
{NULL}
{NULL,NULL,NULL}
Expand All @@ -1492,8 +1504,9 @@ statement error pq: unimplemented: column x is of type geography\[\] and thus is
CREATE TABLE tbad (x GEOGRAPHY[] PRIMARY KEY)

# Test arrays of composite types.
# Add x to PRIMARY KEY once #50659 is fixed.
statement ok
CREATE TABLE tarray(x DECIMAL[] PRIMARY KEY);
CREATE TABLE tarray(x DECIMAL[]);
INSERT INTO tarray VALUES (ARRAY[1.00]), (ARRAY[1.501])

# Ensure these are round tripped correctly.
Expand All @@ -1504,9 +1517,10 @@ SELECT x FROM tarray ORDER BY x
{1.501}

# Test indexes on multiple columns with arrays.
# Add multicolumn INDEX i (x, y, z) once #50659 is fixed.
statement ok
DROP TABLE t;
CREATE TABLE t (x INT, y INT[], z INT, INDEX i (x, y, z));
CREATE TABLE t (x INT, y INT[], z INT);
INSERT INTO t VALUES
(1, ARRAY[1, 2, 3], 3),
(NULL, ARRAY[1, NULL, 3], NULL),
Expand All @@ -1527,20 +1541,24 @@ SELECT x, y, z FROM t WHERE x = 2 AND y < ARRAY[10] ORDER BY y
2 {4,5} 7

# Test that interleaving an array index doesn't lead to problems.
# Add parent PRIMARY KEY (x, y DESC) once #50659 is fixed.
# Add child PRIMARY KEY (x, y DESC, z) once #50659 is fixed.
# Add INTERLEAVE IN PARENT parent (x, y) once #50659 is fixed.
# Uncomment INSERT values once #50659 is fixed.
statement ok
DROP TABLE IF EXISTS parent, child CASCADE;
CREATE TABLE parent (x INT, y INT[], PRIMARY KEY (x, y DESC));
CREATE TABLE child (x INT, y INT[], z INT[], PRIMARY KEY (x, y DESC, z)) INTERLEAVE IN PARENT parent (x, y);
CREATE TABLE parent (x INT, y INT[], PRIMARY KEY (x));
CREATE TABLE child (x INT, y INT[], z INT[], PRIMARY KEY (x)) INTERLEAVE IN PARENT parent (x);
INSERT INTO parent VALUES
(1, ARRAY[1, 2, 3]),
(1, ARRAY[1, NULL]),
-- (1, ARRAY[1, NULL]),
(2, ARRAY[NULL]),
(3, ARRAY[NULL, 1, NULL]);
INSERT INTO child VALUES
(1, ARRAY[1, 2, 3], ARRAY[4]),
(1, ARRAY[1, 2, 3, 4], ARRAY[5]),
(1, ARRAY[1, NULL], ARRAY[5]),
(1, ARRAY[1, NULL, NULL], ARRAY[10]),
-- (1, ARRAY[1, 2, 3, 4], ARRAY[5]),
-- (1, ARRAY[1, NULL], ARRAY[5]),
-- (1, ARRAY[1, NULL, NULL], ARRAY[10]),
(2, ARRAY[NULL], ARRAY[1]),
(3, ARRAY[NULL, 1, NULL], ARRAY[3]);

Expand All @@ -1549,24 +1567,21 @@ query IT
SELECT x, y FROM parent ORDER BY x, y DESC
----
1 {1,2,3}
1 {1,NULL}
2 {NULL}
3 {NULL,1,NULL}

query ITT
SELECT x, y, z FROM child ORDER BY x, y DESC, z
----
1 {1,2,3,4} {5}
1 {1,2,3} {4}
1 {1,NULL,NULL} {10}
1 {1,NULL} {5}
2 {NULL} {1}
3 {NULL,1,NULL} {3}

# Test arrays of strings.
# Add parent PRIMARY KEY (x) once #50659 is fixed.
statement ok
DROP TABLE t;
CREATE TABLE t (x STRING[] PRIMARY KEY);
CREATE TABLE t (x STRING[]);
INSERT INTO t VALUES
(ARRAY['']),
(ARRAY['hello', 'hi\nthere']),
Expand All @@ -1588,9 +1603,10 @@ SELECT x FROM t WHERE x > ARRAY['hell'] AND x < ARRAY['i']

# Test arrays of bytes, and insert some bytes that are used by the
# array encoding itself.
# Add parent PRIMARY KEY (x) once #50659 is fixed.
statement ok
DROP TABLE t;
CREATE TABLE t (x BYTES[] PRIMARY KEY);
CREATE TABLE t (x BYTES[]);
INSERT INTO t VALUES
(ARRAY[b'\xFF', b'\x00']),
(ARRAY[NULL, b'\x01', b'\x01', NULL]),
Expand All @@ -1604,9 +1620,10 @@ SELECT x FROM t ORDER BY x
{"\\xff","\\x00"}

# Repeat the above test with a descending encoding.
# Add parent PRIMARY KEY (x DESC) once #50659 is fixed.
statement ok
DROP TABLE t;
CREATE TABLE t (x BYTES[], PRIMARY KEY (x DESC));
CREATE TABLE t (x BYTES[]);
INSERT INTO t VALUES
(ARRAY[b'\xFF', b'\x00']),
(ARRAY[NULL, b'\x01', b'\x01', NULL]),
Expand All @@ -1620,9 +1637,10 @@ SELECT x FROM t ORDER BY x
{"\\xff","\\x00"}

# Test some indexes with multiple array columns.
# Add parent PRIMARY KEY (x, y) once #50659 is fixed.
statement ok
DROP TABLE t;
CREATE TABLE t (x INT[], y INT[], PRIMARY KEY (x, y));
CREATE TABLE t (x INT[], y INT[]);
INSERT INTO t VALUES
(ARRAY[1, 2], ARRAY[3, 4]),
(ARRAY[NULL, NULL], ARRAY[NULL, NULL]),
Expand All @@ -1646,29 +1664,31 @@ SELECT x, y FROM t WHERE x > ARRAY[NULL]:::INT[] ORDER BY y
{1,2} {3,4}

# Test that we can create foreign key references on arrays.
statement ok
DROP TABLE IF EXISTS t1, t2 CASCADE;
CREATE TABLE t1 (x INT[] PRIMARY KEY);
CREATE TABLE t2 (x INT[] PRIMARY KEY);
ALTER TABLE t2 ADD FOREIGN KEY (x) REFERENCES t1 (x)
# Enable once #50659 is fixed.
#statement ok
#DROP TABLE IF EXISTS t1, t2 CASCADE;
#CREATE TABLE t1 (x INT[] PRIMARY KEY);
#CREATE TABLE t2 (x INT[] PRIMARY KEY);
#ALTER TABLE t2 ADD FOREIGN KEY (x) REFERENCES t1 (x)

statement ok
INSERT INTO t1 VALUES (ARRAY[1, 2, NULL, 3])
#statement ok
#INSERT INTO t1 VALUES (ARRAY[1, 2, NULL, 3])

statement error pq: insert on table "t2" violates foreign key constraint "fk_x_ref_t1"
INSERT INTO t2 VALUES (ARRAY[1, 2])
#statement error pq: insert on table "t2" violates foreign key constraint "fk_x_ref_t1"
#INSERT INTO t2 VALUES (ARRAY[1, 2])

statement ok
INSERT INTO t2 VALUES (ARRAY[1, 2, NULL, 3])
#statement ok
#INSERT INTO t2 VALUES (ARRAY[1, 2, NULL, 3])

statement error pq: delete on table "t1" violates foreign key constraint "fk_x_ref_t1" on table "t2"
DELETE FROM t1 WHERE x > ARRAY[1]
#statement error pq: delete on table "t1" violates foreign key constraint "fk_x_ref_t1" on table "t2"
#DELETE FROM t1 WHERE x > ARRAY[1]

# Test different joins on indexed arrays.
# Add t1 and t2 PRIMARY KEY x once #50659 is fixed.
statement ok
DROP TABLE IF EXISTS t1, t2 CASCADE;
CREATE TABLE t1 (x INT[] PRIMARY KEY);
CREATE TABLE t2 (x INT[] PRIMARY KEY);
CREATE TABLE t1 (x INT[]);
CREATE TABLE t2 (x INT[]);
INSERT INTO t1 VALUES
(ARRAY[1, 2]),
(ARRAY[NULL]),
Expand All @@ -1690,16 +1710,18 @@ SELECT t1.x FROM t1 INNER MERGE JOIN t2 ON t1.x = t2.x
{NULL}
{1,2}

query T rowsort
SELECT t1.x FROM t1 INNER LOOKUP JOIN t2 ON t1.x = t2.x
----
{NULL}
{1,2}
# Enable once #50659 is fixed.
#query T rowsort
#SELECT t1.x FROM t1 INNER LOOKUP JOIN t2 ON t1.x =# t2.x
#----
#{NULL}
#{1,2}

# Test that we can group by arrays.
# Add INDEX (x) once #50659 is fixed.
statement ok
DROP TABLE t;
CREATE TABLE t (x INT[], INDEX (x));
CREATE TABLE t (x INT[]);
INSERT INTO t VALUES
(ARRAY[1, 2]),
(ARRAY[1, 2]),
Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/mutations/mutations.go
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ func foreignKeyMutator(
for refI, refCol := range availCols {
fkColType := tree.MustBeStaticallyKnownType(fkCol.Type)
refColType := tree.MustBeStaticallyKnownType(refCol.Type)
if fkColType.Equivalent(refColType) {
if fkColType.Equivalent(refColType) && sqlbase.ColumnTypeIsIndexable(refColType) {
usingCols = append(usingCols, refCol)
availCols = append(availCols[:refI], availCols[refI+1:]...)
found = true
Expand Down
6 changes: 3 additions & 3 deletions pkg/sql/opt/constraint/testutils.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,11 +139,11 @@ func parseDatumPath(evalCtx *tree.EvalContext, str string, typs []types.Family)
case types.DecimalFamily:
val, err = tree.ParseDDecimal(valStr)
case types.DateFamily:
val, err = tree.ParseDDate(evalCtx, valStr)
val, _, err = tree.ParseDDate(evalCtx, valStr)
case types.TimestampFamily:
val, err = tree.ParseDTimestamp(evalCtx, valStr, time.Microsecond)
val, _, err = tree.ParseDTimestamp(evalCtx, valStr, time.Microsecond)
case types.TimestampTZFamily:
val, err = tree.ParseDTimestampTZ(evalCtx, valStr, time.Microsecond)
val, _, err = tree.ParseDTimestampTZ(evalCtx, valStr, time.Microsecond)
case types.StringFamily:
val = tree.NewDString(valStr)
case types.OidFamily:
Expand Down
Loading

0 comments on commit 0672a96

Please sign in to comment.