From 1ab9adb30aac0fcf837e2e6d2e9449d26f873b8a Mon Sep 17 00:00:00 2001
From: Rebecca Taft
Date: Wed, 24 Feb 2021 16:52:58 -0700
Subject: [PATCH 1/7] opt: use computed columns to build functional
dependencies
This commit updates MakeTableFuncDep so that it adds equivalencies
or synthesized columns to the table FDs for each of the computed
columns available in the metadata. This will be necessary to support
removing uniqueness checks in some cases in a future commit.
Release justification: This commit is a low risk, high benefit change
to existing functionality.
Release note (performance improvement): The optimizer now infers
additional functional dependencies based on computed columns in tables.
This may enable additional optimizations and lead to better query plans.
---
pkg/sql/opt/memo/logical_props_builder.go | 33 +++++
pkg/sql/opt/memo/testdata/logprops/delete | 22 +--
pkg/sql/opt/memo/testdata/logprops/scan | 57 ++++++++
pkg/sql/opt/memo/testdata/logprops/update | 36 ++---
pkg/sql/opt/memo/testdata/logprops/upsert | 34 ++---
pkg/sql/opt/memo/testdata/stats/insert | 20 +--
pkg/sql/opt/memo/testdata/stats/upsert | 6 +-
pkg/sql/opt/norm/testdata/rules/prune_cols | 8 +-
.../testutils/opttester/testdata/opt-steps | 130 +++++++++---------
pkg/sql/opt/xform/testdata/rules/computed | 6 +-
pkg/sql/opt/xform/testdata/rules/join | 5 +-
pkg/sql/opt/xform/testdata/rules/select | 16 +--
12 files changed, 231 insertions(+), 142 deletions(-)
diff --git a/pkg/sql/opt/memo/logical_props_builder.go b/pkg/sql/opt/memo/logical_props_builder.go
index 95dd0540a1ab..8a1af93897d1 100644
--- a/pkg/sql/opt/memo/logical_props_builder.go
+++ b/pkg/sql/opt/memo/logical_props_builder.go
@@ -1654,6 +1654,8 @@ func MakeTableFuncDep(md *opt.Metadata, tabID opt.TableID) *props.FuncDepSet {
}
fd = &props.FuncDepSet{}
+
+ // Add keys from indexes.
for i := 0; i < tab.IndexCount(); i++ {
var keyCols opt.ColSet
index := tab.Index(i)
@@ -1690,6 +1692,7 @@ func MakeTableFuncDep(md *opt.Metadata, tabID opt.TableID) *props.FuncDepSet {
}
}
+ // Add keys from unique constraints.
if !md.TableMeta(tabID).IgnoreUniqueWithoutIndexKeys {
for i := 0; i < tab.UniqueCount(); i++ {
unique := tab.Unique(i)
@@ -1735,6 +1738,36 @@ func MakeTableFuncDep(md *opt.Metadata, tabID opt.TableID) *props.FuncDepSet {
}
}
+ // Add computed columns.
+ for i, n := 0, tab.ColumnCount(); i < n; i++ {
+ if tab.Column(i).IsComputed() {
+ tabMeta := md.TableMeta(tabID)
+ colID := tabMeta.MetaID.ColumnID(i)
+ expr := tabMeta.ComputedCols[colID]
+ if expr == nil {
+ // The computed columns haven't been added to the metadata.
+ continue
+ }
+ if v, ok := expr.(*VariableExpr); ok {
+ // This computed column is exactly equal to another column in the table,
+ // so add an equivalency.
+ fd.AddEquivalency(v.Col, colID)
+ continue
+ }
+ // Else, this computed column is an immutable expression over zero or more
+ // other columns in the table.
+
+ from := getOuterCols(expr)
+ // We want to set up the FD: from --> colID.
+ // This does not necessarily hold for "composite" types like decimals or
+ // collated strings. For example if d is a decimal, d::TEXT can have
+ // different values for equal values of d, like 1 and 1.0.
+ if !CanBeCompositeSensitive(md, expr) {
+ fd.AddSynthesizedCol(from, colID)
+ }
+ }
+ }
+
md.SetTableAnnotation(tabID, fdAnnID, fd)
return fd
}
diff --git a/pkg/sql/opt/memo/testdata/logprops/delete b/pkg/sql/opt/memo/testdata/logprops/delete
index 013da35d6772..bdfec000e7e1 100644
--- a/pkg/sql/opt/memo/testdata/logprops/delete
+++ b/pkg/sql/opt/memo/testdata/logprops/delete
@@ -28,7 +28,7 @@ delete abcde
└── select
├── columns: a:8(int!null) b:9(int) c:10(int!null) d:11(int) rowid:12(int!null) e:13(int) crdb_internal_mvcc_timestamp:14(decimal)
├── key: (12)
- ├── fd: ()-->(8), (12)-->(9-11,13,14)
+ ├── fd: ()-->(8), (12)-->(9-11,13,14), (9,10)-->(11)
├── prune: (9-14)
├── interesting orderings: (+12)
├── scan abcde
@@ -41,7 +41,7 @@ delete abcde
│ │ │ └── variable: c:10 [type=int]
│ │ └── const: 1 [type=int]
│ ├── key: (12)
- │ ├── fd: (12)-->(8-11,13,14)
+ │ ├── fd: (12)-->(8-11,13,14), (9,10)-->(11)
│ ├── prune: (8-14)
│ └── interesting orderings: (+12)
└── filters
@@ -56,18 +56,18 @@ DELETE FROM abcde WHERE a=1 RETURNING *
project
├── columns: a:1(int!null) b:2(int) c:3(int!null) d:4(int)
├── volatile, mutations
- ├── fd: ()-->(1)
+ ├── fd: ()-->(1), (2,3)-->(4)
├── prune: (1-4)
└── delete abcde
├── columns: a:1(int!null) b:2(int) c:3(int!null) d:4(int) rowid:5(int!null)
├── fetch columns: a:8(int) b:9(int) c:10(int) d:11(int) rowid:12(int) e:13(int)
├── volatile, mutations
├── key: (5)
- ├── fd: ()-->(1), (5)-->(2-4)
+ ├── fd: ()-->(1), (5)-->(2-4), (2,3)-->(4)
└── select
├── columns: a:8(int!null) b:9(int) c:10(int!null) d:11(int) rowid:12(int!null) e:13(int) crdb_internal_mvcc_timestamp:14(decimal)
├── key: (12)
- ├── fd: ()-->(8), (12)-->(9-11,13,14)
+ ├── fd: ()-->(8), (12)-->(9-11,13,14), (9,10)-->(11)
├── prune: (9-14)
├── interesting orderings: (+12)
├── scan abcde
@@ -80,7 +80,7 @@ project
│ │ │ └── variable: c:10 [type=int]
│ │ └── const: 1 [type=int]
│ ├── key: (12)
- │ ├── fd: (12)-->(8-11,13,14)
+ │ ├── fd: (12)-->(8-11,13,14), (9,10)-->(11)
│ ├── prune: (8-14)
│ └── interesting orderings: (+12)
└── filters
@@ -123,7 +123,7 @@ project
│ │ │ └── variable: c:10 [type=int]
│ │ └── const: 1 [type=int]
│ ├── key: (12)
- │ ├── fd: (12)-->(8-11,13,14)
+ │ ├── fd: (12)-->(8-11,13,14), (9,10)-->(11)
│ ├── prune: (8-14)
│ └── interesting orderings: (+12)
└── filters
@@ -138,18 +138,18 @@ DELETE FROM abcde WHERE b=c RETURNING *;
project
├── columns: a:1(int!null) b:2(int!null) c:3(int!null) d:4(int)
├── volatile, mutations
- ├── fd: (2)==(3), (3)==(2)
+ ├── fd: (2)==(3), (3)==(2), (2)-->(4)
├── prune: (1-4)
└── delete abcde
├── columns: a:1(int!null) b:2(int!null) c:3(int!null) d:4(int) rowid:5(int!null)
├── fetch columns: a:8(int) b:9(int) c:10(int) d:11(int) rowid:12(int) e:13(int)
├── volatile, mutations
├── key: (5)
- ├── fd: (2)==(3), (3)==(2), (5)-->(1-4)
+ ├── fd: (2)==(3), (3)==(2), (5)-->(1-4), (2)-->(4)
└── select
├── columns: a:8(int!null) b:9(int!null) c:10(int!null) d:11(int) rowid:12(int!null) e:13(int) crdb_internal_mvcc_timestamp:14(decimal)
├── key: (12)
- ├── fd: (12)-->(8-11,13,14), (9)==(10), (10)==(9)
+ ├── fd: (12)-->(8-11,13,14), (9,10)-->(11), (9)==(10), (10)==(9)
├── prune: (8,11-14)
├── interesting orderings: (+12)
├── scan abcde
@@ -162,7 +162,7 @@ project
│ │ │ └── variable: c:10 [type=int]
│ │ └── const: 1 [type=int]
│ ├── key: (12)
- │ ├── fd: (12)-->(8-11,13,14)
+ │ ├── fd: (12)-->(8-11,13,14), (9,10)-->(11)
│ ├── prune: (8-14)
│ └── interesting orderings: (+12)
└── filters
diff --git a/pkg/sql/opt/memo/testdata/logprops/scan b/pkg/sql/opt/memo/testdata/logprops/scan
index b0443e06a0e3..f7133bf4cb62 100644
--- a/pkg/sql/opt/memo/testdata/logprops/scan
+++ b/pkg/sql/opt/memo/testdata/logprops/scan
@@ -397,3 +397,60 @@ index-join c
├── fd: (1)-->(3), (3)~~>(1)
├── prune: (1,3)
└── interesting orderings: (+1) (+3,+1)
+
+# Test FDs for computed columns.
+exec-ddl
+CREATE TABLE computed (
+ i INT,
+ s STRING,
+ d DECIMAL,
+ c_i_expr STRING AS (CASE WHEN i < 0 THEN 'foo' ELSE 'bar' END) STORED,
+ c_s STRING AS (s) VIRTUAL,
+ c_d DECIMAL AS (d) STORED,
+ c_d_expr STRING AS (d::string) STORED,
+ PRIMARY KEY (c_i_expr, i),
+ UNIQUE (c_s, s),
+ UNIQUE (c_d_expr, d)
+)
+----
+
+build
+SELECT * FROM computed
+----
+project
+ ├── columns: i:1(int!null) s:2(string) d:3(decimal) c_i_expr:4(string!null) c_s:5(string) c_d:6(decimal) c_d_expr:7(string)
+ ├── key: (1)
+ ├── fd: (1,4)-->(2,3,5-7), (3,7)~~>(1,2,4-6), (1)-->(4), (3)==(6), (6)==(3), (2)~~>(1,3-7), (2)==(5), (5)==(2)
+ ├── prune: (1-7)
+ ├── interesting orderings: (+4,+1) (+7,+3,+4,+1)
+ └── project
+ ├── columns: c_s:5(string) i:1(int!null) s:2(string) d:3(decimal) c_i_expr:4(string!null) c_d:6(decimal) c_d_expr:7(string) crdb_internal_mvcc_timestamp:8(decimal)
+ ├── key: (1)
+ ├── fd: (1,4)-->(2,3,6-8), (3,7)~~>(1,2,4,6,8), (1)-->(4), (3)==(6), (6)==(3), (2)~~>(1,3,4,6-8), (2)==(5), (5)==(2)
+ ├── prune: (1-8)
+ ├── interesting orderings: (+4,+1) (+7,+3,+4,+1)
+ ├── scan computed
+ │ ├── columns: i:1(int!null) s:2(string) d:3(decimal) c_i_expr:4(string!null) c_d:6(decimal) c_d_expr:7(string) crdb_internal_mvcc_timestamp:8(decimal)
+ │ ├── computed column expressions
+ │ │ ├── c_i_expr:4
+ │ │ │ └── case [type=string]
+ │ │ │ ├── true [type=bool]
+ │ │ │ ├── when [type=string]
+ │ │ │ │ ├── lt [type=bool]
+ │ │ │ │ │ ├── variable: i:1 [type=int]
+ │ │ │ │ │ └── const: 0 [type=int]
+ │ │ │ │ └── const: 'foo' [type=string]
+ │ │ │ └── const: 'bar' [type=string]
+ │ │ ├── c_s:5
+ │ │ │ └── variable: s:2 [type=string]
+ │ │ ├── c_d:6
+ │ │ │ └── variable: d:3 [type=decimal]
+ │ │ └── c_d_expr:7
+ │ │ └── cast: STRING [type=string]
+ │ │ └── variable: d:3 [type=decimal]
+ │ ├── key: (1)
+ │ ├── fd: (1,4)-->(2,3,6-8), (3,7)~~>(1,2,4,6,8), (1)-->(4), (3)==(6), (6)==(3), (2)~~>(1,3,4,6-8)
+ │ ├── prune: (1-4,6-8)
+ │ └── interesting orderings: (+4,+1) (+7,+3,+4,+1)
+ └── projections
+ └── variable: s:2 [as=c_s:5, type=string, outer=(2)]
diff --git a/pkg/sql/opt/memo/testdata/logprops/update b/pkg/sql/opt/memo/testdata/logprops/update
index c3a49de86de4..46ec642d9426 100644
--- a/pkg/sql/opt/memo/testdata/logprops/update
+++ b/pkg/sql/opt/memo/testdata/logprops/update
@@ -33,25 +33,25 @@ update abcde
├── columns: column17:17(int!null) a:8(int!null) b:9(int) c:10(int!null) d:11(int) rowid:12(int!null) e:13(int) crdb_internal_mvcc_timestamp:14(decimal) b_new:15(int!null) column16:16(int!null)
├── immutable
├── key: (12)
- ├── fd: ()-->(8,15,16), (12)-->(9-11,13,14), (10)-->(17)
+ ├── fd: ()-->(8,15,16), (12)-->(9-11,13,14), (9,10)-->(11), (10)-->(17)
├── prune: (8-17)
├── interesting orderings: (+12)
├── project
│ ├── columns: column16:16(int!null) a:8(int!null) b:9(int) c:10(int!null) d:11(int) rowid:12(int!null) e:13(int) crdb_internal_mvcc_timestamp:14(decimal) b_new:15(int!null)
│ ├── key: (12)
- │ ├── fd: ()-->(8,15,16), (12)-->(9-11,13,14)
+ │ ├── fd: ()-->(8,15,16), (12)-->(9-11,13,14), (9,10)-->(11)
│ ├── prune: (8-16)
│ ├── interesting orderings: (+12)
│ ├── project
│ │ ├── columns: b_new:15(int!null) a:8(int!null) b:9(int) c:10(int!null) d:11(int) rowid:12(int!null) e:13(int) crdb_internal_mvcc_timestamp:14(decimal)
│ │ ├── key: (12)
- │ │ ├── fd: ()-->(8,15), (12)-->(9-11,13,14)
+ │ │ ├── fd: ()-->(8,15), (12)-->(9-11,13,14), (9,10)-->(11)
│ │ ├── prune: (8-15)
│ │ ├── interesting orderings: (+12)
│ │ ├── select
│ │ │ ├── columns: a:8(int!null) b:9(int) c:10(int!null) d:11(int) rowid:12(int!null) e:13(int) crdb_internal_mvcc_timestamp:14(decimal)
│ │ │ ├── key: (12)
- │ │ │ ├── fd: ()-->(8), (12)-->(9-11,13,14)
+ │ │ │ ├── fd: ()-->(8), (12)-->(9-11,13,14), (9,10)-->(11)
│ │ │ ├── prune: (9-14)
│ │ │ ├── interesting orderings: (+12)
│ │ │ ├── scan abcde
@@ -64,7 +64,7 @@ update abcde
│ │ │ │ │ │ └── variable: c:10 [type=int]
│ │ │ │ │ └── const: 1 [type=int]
│ │ │ │ ├── key: (12)
- │ │ │ │ ├── fd: (12)-->(8-11,13,14)
+ │ │ │ │ ├── fd: (12)-->(8-11,13,14), (9,10)-->(11)
│ │ │ │ ├── prune: (8-14)
│ │ │ │ └── interesting orderings: (+12)
│ │ │ └── filters
@@ -105,25 +105,25 @@ project
├── columns: column17:17(int!null) a:8(int!null) b:9(int) c:10(int!null) d:11(int) rowid:12(int!null) e:13(int) crdb_internal_mvcc_timestamp:14(decimal) b_new:15(int!null) column16:16(int!null)
├── immutable
├── key: (12)
- ├── fd: ()-->(8,15,16), (12)-->(9-11,13,14), (10)-->(17)
+ ├── fd: ()-->(8,15,16), (12)-->(9-11,13,14), (9,10)-->(11), (10)-->(17)
├── prune: (8-17)
├── interesting orderings: (+12)
├── project
│ ├── columns: column16:16(int!null) a:8(int!null) b:9(int) c:10(int!null) d:11(int) rowid:12(int!null) e:13(int) crdb_internal_mvcc_timestamp:14(decimal) b_new:15(int!null)
│ ├── key: (12)
- │ ├── fd: ()-->(8,15,16), (12)-->(9-11,13,14)
+ │ ├── fd: ()-->(8,15,16), (12)-->(9-11,13,14), (9,10)-->(11)
│ ├── prune: (8-16)
│ ├── interesting orderings: (+12)
│ ├── project
│ │ ├── columns: b_new:15(int!null) a:8(int!null) b:9(int) c:10(int!null) d:11(int) rowid:12(int!null) e:13(int) crdb_internal_mvcc_timestamp:14(decimal)
│ │ ├── key: (12)
- │ │ ├── fd: ()-->(8,15), (12)-->(9-11,13,14)
+ │ │ ├── fd: ()-->(8,15), (12)-->(9-11,13,14), (9,10)-->(11)
│ │ ├── prune: (8-15)
│ │ ├── interesting orderings: (+12)
│ │ ├── select
│ │ │ ├── columns: a:8(int!null) b:9(int) c:10(int!null) d:11(int) rowid:12(int!null) e:13(int) crdb_internal_mvcc_timestamp:14(decimal)
│ │ │ ├── key: (12)
- │ │ │ ├── fd: ()-->(8), (12)-->(9-11,13,14)
+ │ │ │ ├── fd: ()-->(8), (12)-->(9-11,13,14), (9,10)-->(11)
│ │ │ ├── prune: (9-14)
│ │ │ ├── interesting orderings: (+12)
│ │ │ ├── scan abcde
@@ -136,7 +136,7 @@ project
│ │ │ │ │ │ └── variable: c:10 [type=int]
│ │ │ │ │ └── const: 1 [type=int]
│ │ │ │ ├── key: (12)
- │ │ │ │ ├── fd: (12)-->(8-11,13,14)
+ │ │ │ │ ├── fd: (12)-->(8-11,13,14), (9,10)-->(11)
│ │ │ │ ├── prune: (8-14)
│ │ │ │ └── interesting orderings: (+12)
│ │ │ └── filters
@@ -215,7 +215,7 @@ project
│ │ │ │ │ │ └── variable: c:10 [type=int]
│ │ │ │ │ └── const: 1 [type=int]
│ │ │ │ ├── key: (12)
- │ │ │ │ ├── fd: (12)-->(8-11,13,14)
+ │ │ │ │ ├── fd: (12)-->(8-11,13,14), (9,10)-->(11)
│ │ │ │ ├── prune: (8-14)
│ │ │ │ └── interesting orderings: (+12)
│ │ │ └── filters
@@ -240,7 +240,7 @@ UPDATE abcde SET a=1 WHERE b=c RETURNING *;
project
├── columns: a:1(int!null) b:2(int!null) c:3(int!null) d:4(int)
├── volatile, mutations
- ├── fd: ()-->(1), (2)==(3), (3)==(2)
+ ├── fd: ()-->(1), (2)==(3), (3)==(2), (2)-->(4)
├── prune: (1-4)
└── update abcde
├── columns: a:1(int!null) b:2(int!null) c:3(int!null) d:4(int) rowid:5(int!null)
@@ -250,30 +250,30 @@ project
│ └── column16:16 => e:6
├── volatile, mutations
├── key: (5)
- ├── fd: ()-->(1), (2)==(3), (3)==(2), (5)-->(2-4)
+ ├── fd: ()-->(1), (2)==(3), (3)==(2), (5)-->(2-4), (2)-->(4)
└── project
├── columns: column17:17(int!null) a:8(int!null) b:9(int!null) c:10(int!null) d:11(int) rowid:12(int!null) e:13(int) crdb_internal_mvcc_timestamp:14(decimal) a_new:15(int!null) column16:16(int!null)
├── immutable
├── key: (12)
- ├── fd: ()-->(15,16), (12)-->(8-11,13,14), (9)==(10), (10)==(9), (10)-->(17)
+ ├── fd: ()-->(15,16), (12)-->(8-11,13,14), (9,10)-->(11), (9)==(10), (10)==(9), (10)-->(17)
├── prune: (8-17)
├── interesting orderings: (+12)
├── project
│ ├── columns: column16:16(int!null) a:8(int!null) b:9(int!null) c:10(int!null) d:11(int) rowid:12(int!null) e:13(int) crdb_internal_mvcc_timestamp:14(decimal) a_new:15(int!null)
│ ├── key: (12)
- │ ├── fd: ()-->(15,16), (12)-->(8-11,13,14), (9)==(10), (10)==(9)
+ │ ├── fd: ()-->(15,16), (12)-->(8-11,13,14), (9,10)-->(11), (9)==(10), (10)==(9)
│ ├── prune: (8-16)
│ ├── interesting orderings: (+12)
│ ├── project
│ │ ├── columns: a_new:15(int!null) a:8(int!null) b:9(int!null) c:10(int!null) d:11(int) rowid:12(int!null) e:13(int) crdb_internal_mvcc_timestamp:14(decimal)
│ │ ├── key: (12)
- │ │ ├── fd: ()-->(15), (12)-->(8-11,13,14), (9)==(10), (10)==(9)
+ │ │ ├── fd: ()-->(15), (12)-->(8-11,13,14), (9,10)-->(11), (9)==(10), (10)==(9)
│ │ ├── prune: (8-15)
│ │ ├── interesting orderings: (+12)
│ │ ├── select
│ │ │ ├── columns: a:8(int!null) b:9(int!null) c:10(int!null) d:11(int) rowid:12(int!null) e:13(int) crdb_internal_mvcc_timestamp:14(decimal)
│ │ │ ├── key: (12)
- │ │ │ ├── fd: (12)-->(8-11,13,14), (9)==(10), (10)==(9)
+ │ │ │ ├── fd: (12)-->(8-11,13,14), (9,10)-->(11), (9)==(10), (10)==(9)
│ │ │ ├── prune: (8,11-14)
│ │ │ ├── interesting orderings: (+12)
│ │ │ ├── scan abcde
@@ -286,7 +286,7 @@ project
│ │ │ │ │ │ └── variable: c:10 [type=int]
│ │ │ │ │ └── const: 1 [type=int]
│ │ │ │ ├── key: (12)
- │ │ │ │ ├── fd: (12)-->(8-11,13,14)
+ │ │ │ │ ├── fd: (12)-->(8-11,13,14), (9,10)-->(11)
│ │ │ │ ├── prune: (8-14)
│ │ │ │ └── interesting orderings: (+12)
│ │ │ └── filters
diff --git a/pkg/sql/opt/memo/testdata/logprops/upsert b/pkg/sql/opt/memo/testdata/logprops/upsert
index ab929fd0230d..5dfa46e0bf96 100644
--- a/pkg/sql/opt/memo/testdata/logprops/upsert
+++ b/pkg/sql/opt/memo/testdata/logprops/upsert
@@ -57,7 +57,7 @@ project
├── cardinality: [0 - 1]
├── volatile
├── key: (15)
- ├── fd: ()-->(6,7,10,11,17), (15)-->(12-14,16,20-23), (12)-->(13-16), (13,14)~~>(12,15,16), (14)-->(18), (18)-->(19)
+ ├── fd: ()-->(6,7,10,11,17), (15)-->(12-14,16,20-23), (12)-->(13-16), (13,14)~~>(12,15,16), (13)~~>(14), (14)-->(18), (18)-->(19)
├── prune: (6,7,10-23)
├── reject-nulls: (12-16,18,19)
├── interesting orderings: (+15) (+12) (+13,+14,+15)
@@ -66,7 +66,7 @@ project
│ ├── cardinality: [0 - 1]
│ ├── volatile
│ ├── key: (15)
- │ ├── fd: ()-->(6,7,10,11,17), (15)-->(12-14,16), (12)-->(13-16), (13,14)~~>(12,15,16), (14)-->(18), (18)-->(19)
+ │ ├── fd: ()-->(6,7,10,11,17), (15)-->(12-14,16), (12)-->(13-16), (13,14)~~>(12,15,16), (13)~~>(14), (14)-->(18), (18)-->(19)
│ ├── prune: (6,7,10-19)
│ ├── reject-nulls: (12-16,18,19)
│ ├── interesting orderings: (+15) (+12) (+13,+14,+15)
@@ -75,7 +75,7 @@ project
│ │ ├── cardinality: [0 - 1]
│ │ ├── volatile
│ │ ├── key: (15)
- │ │ ├── fd: ()-->(6,7,10,11,17), (15)-->(12-14,16), (12)-->(13-16), (13,14)~~>(12,15,16), (14)-->(18)
+ │ │ ├── fd: ()-->(6,7,10,11,17), (15)-->(12-14,16), (12)-->(13-16), (13,14)~~>(12,15,16), (13)~~>(14), (14)-->(18)
│ │ ├── prune: (6,7,10-18)
│ │ ├── reject-nulls: (12-16,18)
│ │ ├── interesting orderings: (+15) (+12) (+13,+14,+15)
@@ -85,7 +85,7 @@ project
│ │ │ ├── multiplicity: left-rows(exactly-one), right-rows(zero-or-one)
│ │ │ ├── volatile
│ │ │ ├── key: (15)
- │ │ │ ├── fd: ()-->(6,7,10,11), (15)-->(12-14,16), (12)-->(13-16), (13,14)~~>(12,15,16)
+ │ │ │ ├── fd: ()-->(6,7,10,11), (15)-->(12-14,16), (12)-->(13-16), (13,14)~~>(12,15,16), (13)~~>(14)
│ │ │ ├── prune: (12,15,16)
│ │ │ ├── reject-nulls: (12-16)
│ │ │ ├── interesting orderings: (+15) (+12) (+13,+14,+15)
@@ -152,7 +152,7 @@ project
│ │ │ │ │ ├── variable: b:13 [type=int]
│ │ │ │ │ └── const: 1 [type=int]
│ │ │ │ ├── key: (15)
- │ │ │ │ ├── fd: (15)-->(12-14,16), (12)-->(13-16), (13,14)~~>(12,15,16)
+ │ │ │ │ ├── fd: (15)-->(12-14,16), (12)-->(13-16), (13,14)~~>(12,15,16), (13)-->(14)
│ │ │ │ ├── prune: (12-16)
│ │ │ │ ├── interesting orderings: (+15) (+12) (+13,+14,+15)
│ │ │ │ └── unfiltered-cols: (12-16)
@@ -312,7 +312,7 @@ project
│ │ │ │ │ │ │ ├── variable: b:13 [type=int]
│ │ │ │ │ │ │ └── const: 1 [type=int]
│ │ │ │ │ │ ├── key: (15)
- │ │ │ │ │ │ ├── fd: (15)-->(12-14), (12)-->(13-15), (13,14)~~>(12,15)
+ │ │ │ │ │ │ ├── fd: (15)-->(12-14), (12)-->(13-15), (13,14)~~>(12,15), (13)-->(14)
│ │ │ │ │ │ ├── prune: (12-15)
│ │ │ │ │ │ ├── interesting orderings: (+15) (+12) (+13,+14,+15)
│ │ │ │ │ │ └── unfiltered-cols: (12-16)
@@ -328,7 +328,7 @@ project
│ │ │ │ │ │ ├── variable: b:18 [type=int]
│ │ │ │ │ │ └── const: 1 [type=int]
│ │ │ │ │ ├── key: (20)
- │ │ │ │ │ ├── fd: (20)-->(17-19), (17)-->(18-20), (18,19)~~>(17,20)
+ │ │ │ │ │ ├── fd: (20)-->(17-19), (17)-->(18-20), (18,19)~~>(17,20), (18)-->(19)
│ │ │ │ │ ├── prune: (17-20)
│ │ │ │ │ ├── interesting orderings: (+20) (+17) (+18,+19,+20)
│ │ │ │ │ └── unfiltered-cols: (17-21)
@@ -344,7 +344,7 @@ project
│ │ │ │ │ ├── variable: b:23 [type=int]
│ │ │ │ │ └── const: 1 [type=int]
│ │ │ │ ├── key: (25)
- │ │ │ │ ├── fd: (25)-->(22-24), (22)-->(23-25), (23,24)~~>(22,25)
+ │ │ │ │ ├── fd: (25)-->(22-24), (22)-->(23-25), (23,24)~~>(22,25), (23)-->(24)
│ │ │ │ ├── prune: (22-25)
│ │ │ │ ├── interesting orderings: (+25) (+22) (+23,+24,+25)
│ │ │ │ └── unfiltered-cols: (22-26)
@@ -408,7 +408,7 @@ project
│ ├── cardinality: [1 - 2]
│ ├── volatile
│ ├── lax-key: (8,13)
- │ ├── fd: ()-->(7,9), (8)~~>(6), (13)-->(10-12,14,16,17), (10)-->(11-14), (11,12)~~>(10,13,14), (11)-->(15), (8,13)-->(18)
+ │ ├── fd: ()-->(7,9), (8)~~>(6), (13)-->(10-12,14,16,17), (10)-->(11-14), (11,12)~~>(10,13,14), (11)~~>(12), (11)-->(15), (8,13)-->(18)
│ ├── prune: (6-18)
│ ├── reject-nulls: (10-15)
│ ├── interesting orderings: (+13) (+10) (+11,+12,+13)
@@ -417,7 +417,7 @@ project
│ │ ├── cardinality: [1 - 2]
│ │ ├── volatile
│ │ ├── lax-key: (8,13)
- │ │ ├── fd: ()-->(7,9), (8)~~>(6), (13)-->(10-12,14), (10)-->(11-14), (11,12)~~>(10,13,14), (11)-->(15)
+ │ │ ├── fd: ()-->(7,9), (8)~~>(6), (13)-->(10-12,14), (10)-->(11-14), (11,12)~~>(10,13,14), (11)~~>(12), (11)-->(15)
│ │ ├── prune: (6-15)
│ │ ├── reject-nulls: (10-15)
│ │ ├── interesting orderings: (+13) (+10) (+11,+12,+13)
@@ -427,7 +427,7 @@ project
│ │ │ ├── multiplicity: left-rows(exactly-one), right-rows(zero-or-one)
│ │ │ ├── volatile
│ │ │ ├── lax-key: (8,13)
- │ │ │ ├── fd: ()-->(7,9), (8)~~>(6), (13)-->(10-12,14), (10)-->(11-14), (11,12)~~>(10,13,14)
+ │ │ │ ├── fd: ()-->(7,9), (8)~~>(6), (13)-->(10-12,14), (10)-->(11-14), (11,12)~~>(10,13,14), (11)~~>(12)
│ │ │ ├── prune: (10-12,14)
│ │ │ ├── reject-nulls: (10-14)
│ │ │ ├── interesting orderings: (+13) (+10) (+11,+12,+13)
@@ -481,7 +481,7 @@ project
│ │ │ │ │ ├── variable: b:11 [type=int]
│ │ │ │ │ └── const: 1 [type=int]
│ │ │ │ ├── key: (13)
- │ │ │ │ ├── fd: (13)-->(10-12,14), (10)-->(11-14), (11,12)~~>(10,13,14)
+ │ │ │ │ ├── fd: (13)-->(10-12,14), (10)-->(11-14), (11,12)~~>(10,13,14), (11)-->(12)
│ │ │ │ ├── prune: (10-14)
│ │ │ │ ├── interesting orderings: (+13) (+10) (+11,+12,+13)
│ │ │ │ └── unfiltered-cols: (10-14)
@@ -550,7 +550,7 @@ upsert abc
├── columns: upsert_a:20(int) upsert_b:21(int!null) upsert_c:22(int!null) upsert_rowid:23(int) y:7(int!null) column10:10(int!null) column11:11(int) column12:12(int!null) a:13(int) b:14(int) c:15(int) rowid:16(int) abc.crdb_internal_mvcc_timestamp:17(decimal) b_new:18(int!null) column19:19(int!null)
├── volatile
├── key: (7)
- ├── fd: ()-->(10,12,18,19), (7)-->(11,13-17,20), (16)-->(13-15,17), (13)-->(14-17,21,22), (14,15)~~>(13,16,17), (11,16)-->(23)
+ ├── fd: ()-->(10,12,18,19), (7)-->(11,13-17,20), (16)-->(13-15,17), (13)-->(14-17,21,22), (14,15)~~>(13,16,17), (14)~~>(15), (11,16)-->(23)
├── prune: (7,10-23)
├── reject-nulls: (13-17)
├── interesting orderings: (+16) (+13) (+14,+15,+16)
@@ -558,7 +558,7 @@ upsert abc
│ ├── columns: column19:19(int!null) y:7(int!null) column10:10(int!null) column11:11(int) column12:12(int!null) a:13(int) b:14(int) c:15(int) rowid:16(int) abc.crdb_internal_mvcc_timestamp:17(decimal) b_new:18(int!null)
│ ├── volatile
│ ├── key: (7)
- │ ├── fd: ()-->(10,12,18,19), (7)-->(11,13-17), (16)-->(13-15,17), (13)-->(14-17), (14,15)~~>(13,16,17)
+ │ ├── fd: ()-->(10,12,18,19), (7)-->(11,13-17), (16)-->(13-15,17), (13)-->(14-17), (14,15)~~>(13,16,17), (14)~~>(15)
│ ├── prune: (7,10-19)
│ ├── reject-nulls: (13-17)
│ ├── interesting orderings: (+16) (+13) (+14,+15,+16)
@@ -566,7 +566,7 @@ upsert abc
│ │ ├── columns: b_new:18(int!null) y:7(int!null) column10:10(int!null) column11:11(int) column12:12(int!null) a:13(int) b:14(int) c:15(int) rowid:16(int) abc.crdb_internal_mvcc_timestamp:17(decimal)
│ │ ├── volatile
│ │ ├── key: (7)
- │ │ ├── fd: ()-->(10,12,18), (7)-->(11,13-17), (16)-->(13-15,17), (13)-->(14-17), (14,15)~~>(13,16,17)
+ │ │ ├── fd: ()-->(10,12,18), (7)-->(11,13-17), (16)-->(13-15,17), (13)-->(14-17), (14,15)~~>(13,16,17), (14)~~>(15)
│ │ ├── prune: (7,10-18)
│ │ ├── reject-nulls: (13-17)
│ │ ├── interesting orderings: (+16) (+13) (+14,+15,+16)
@@ -575,7 +575,7 @@ upsert abc
│ │ │ ├── multiplicity: left-rows(exactly-one), right-rows(zero-or-one)
│ │ │ ├── volatile
│ │ │ ├── key: (7)
- │ │ │ ├── fd: ()-->(10,12), (7)-->(11,13-17), (16)-->(13-15,17), (13)-->(14-17), (14,15)~~>(13,16,17)
+ │ │ │ ├── fd: ()-->(10,12), (7)-->(11,13-17), (16)-->(13-15,17), (13)-->(14-17), (14,15)~~>(13,16,17), (14)~~>(15)
│ │ │ ├── prune: (14-17)
│ │ │ ├── reject-nulls: (13-17)
│ │ │ ├── interesting orderings: (+16) (+13) (+14,+15,+16)
@@ -640,7 +640,7 @@ upsert abc
│ │ │ │ │ ├── variable: b:14 [type=int]
│ │ │ │ │ └── const: 1 [type=int]
│ │ │ │ ├── key: (16)
- │ │ │ │ ├── fd: (16)-->(13-15,17), (13)-->(14-17), (14,15)~~>(13,16,17)
+ │ │ │ │ ├── fd: (16)-->(13-15,17), (13)-->(14-17), (14,15)~~>(13,16,17), (14)-->(15)
│ │ │ │ ├── prune: (13-17)
│ │ │ │ ├── interesting orderings: (+16) (+13) (+14,+15,+16)
│ │ │ │ └── unfiltered-cols: (13-17)
diff --git a/pkg/sql/opt/memo/testdata/stats/insert b/pkg/sql/opt/memo/testdata/stats/insert
index 361ffbfb7b67..0200408ab9b9 100644
--- a/pkg/sql/opt/memo/testdata/stats/insert
+++ b/pkg/sql/opt/memo/testdata/stats/insert
@@ -42,7 +42,7 @@ with &1
├── columns: x:10(string!null) y:11(int!null) z:12(float!null)
├── volatile, mutations
├── stats: [rows=66.5105818, distinct(12)=43.4214373, null(12)=0]
- ├── fd: ()-->(10)
+ ├── fd: ()-->(10), (11)-->(12)
├── insert xyz
│ ├── columns: xyz.x:1(string!null) xyz.y:2(int!null) xyz.z:3(float)
│ ├── insert-mapping:
@@ -51,16 +51,16 @@ with &1
│ │ └── c:7 => xyz.z:3
│ ├── volatile, mutations
│ ├── stats: [rows=200, distinct(1)=1, null(1)=0, distinct(2)=200, null(2)=0, distinct(3)=130.264312, null(3)=2]
- │ ├── fd: ()-->(1)
+ │ ├── fd: ()-->(1), (2)-->(3)
│ └── project
│ ├── columns: a:5(int!null) b:6(string!null) c:7(float)
│ ├── stats: [rows=200, distinct(5)=200, null(5)=0, distinct(6)=1, null(6)=0, distinct(7)=130.264312, null(7)=2]
- │ ├── fd: ()-->(6)
+ │ ├── fd: ()-->(6), (5)-->(7)
│ └── select
│ ├── columns: a:5(int!null) b:6(string!null) c:7(float) rowid:8(int!null) abc.crdb_internal_mvcc_timestamp:9(decimal)
│ ├── stats: [rows=200, distinct(5)=200, null(5)=0, distinct(6)=1, null(6)=0, distinct(7)=130.264312, null(7)=2]
│ ├── key: (8)
- │ ├── fd: ()-->(6), (8)-->(5,7,9)
+ │ ├── fd: ()-->(6), (8)-->(5,7,9), (5)-->(7)
│ ├── scan abc
│ │ ├── columns: a:5(int!null) b:6(string) c:7(float) rowid:8(int!null) abc.crdb_internal_mvcc_timestamp:9(decimal)
│ │ ├── computed column expressions
@@ -68,13 +68,13 @@ with &1
│ │ │ └── a:5::FLOAT8 [type=float]
│ │ ├── stats: [rows=2000, distinct(5)=2000, null(5)=0, distinct(6)=10, null(6)=0, distinct(7)=200, null(7)=20, distinct(8)=2000, null(8)=0]
│ │ ├── key: (8)
- │ │ └── fd: (8)-->(5-7,9)
+ │ │ └── fd: (8)-->(5-7,9), (5)-->(7)
│ └── filters
│ └── b:6 = 'foo' [type=bool, outer=(6), constraints=(/6: [/'foo' - /'foo']; tight), fd=()-->(6)]
└── select
├── columns: x:10(string!null) y:11(int!null) z:12(float!null)
├── stats: [rows=66.5105818, distinct(12)=43.4214373, null(12)=0]
- ├── fd: ()-->(10)
+ ├── fd: ()-->(10), (11)-->(12)
├── with-scan &1
│ ├── columns: x:10(string!null) y:11(int!null) z:12(float)
│ ├── mapping:
@@ -82,7 +82,7 @@ with &1
│ │ ├── xyz.y:2(int) => y:11(int)
│ │ └── xyz.z:3(float) => z:12(float)
│ ├── stats: [rows=200, distinct(10)=1, null(10)=0, distinct(11)=200, null(11)=0, distinct(12)=130.264312, null(12)=2]
- │ └── fd: ()-->(10)
+ │ └── fd: ()-->(10), (11)-->(12)
└── filters
└── z:12 > 1.0 [type=bool, outer=(12), constraints=(/12: [/1.0000000000000002 - ]; tight)]
@@ -99,16 +99,18 @@ insert xyz
├── cardinality: [0 - 0]
├── volatile, mutations
├── stats: [rows=0]
+ ├── fd: (2)-->(3)
└── project
├── columns: a:5(int!null) b:6(string) c:7(float)
├── cardinality: [0 - 0]
├── stats: [rows=0]
+ ├── fd: (5)-->(7)
└── select
├── columns: a:5(int!null) b:6(string) c:7(float) rowid:8(int!null) abc.crdb_internal_mvcc_timestamp:9(decimal)
├── cardinality: [0 - 0]
├── stats: [rows=0]
├── key: (8)
- ├── fd: (8)-->(5-7,9)
+ ├── fd: (8)-->(5-7,9), (5)-->(7)
├── scan abc
│ ├── columns: a:5(int!null) b:6(string) c:7(float) rowid:8(int!null) abc.crdb_internal_mvcc_timestamp:9(decimal)
│ ├── computed column expressions
@@ -116,6 +118,6 @@ insert xyz
│ │ └── a:5::FLOAT8 [type=float]
│ ├── stats: [rows=2000]
│ ├── key: (8)
- │ └── fd: (8)-->(5-7,9)
+ │ └── fd: (8)-->(5-7,9), (5)-->(7)
└── filters
└── false [type=bool, constraints=(contradiction; tight)]
diff --git a/pkg/sql/opt/memo/testdata/stats/upsert b/pkg/sql/opt/memo/testdata/stats/upsert
index 2bcc66e14ba3..db322d56032b 100644
--- a/pkg/sql/opt/memo/testdata/stats/upsert
+++ b/pkg/sql/opt/memo/testdata/stats/upsert
@@ -157,7 +157,7 @@ with &1
│ │ │ │ │ │ │ │ └── a:5::FLOAT8 [type=float]
│ │ │ │ │ │ │ ├── stats: [rows=2000, distinct(5)=2000, null(5)=0, distinct(6)=10, null(6)=0, distinct(7)=200, null(7)=20, distinct(8)=2000, null(8)=0]
│ │ │ │ │ │ │ ├── key: (8)
- │ │ │ │ │ │ │ └── fd: (8)-->(5-7,9)
+ │ │ │ │ │ │ │ └── fd: (8)-->(5-7,9), (5)-->(7)
│ │ │ │ │ │ └── filters
│ │ │ │ │ │ └── c:7 = 1.0 [type=bool, outer=(7), constraints=(/7: [/1.0 - /1.0]; tight), fd=()-->(7)]
│ │ │ │ │ └── projections
@@ -223,7 +223,7 @@ upsert xyz
│ ├── cardinality: [0 - 0]
│ ├── stats: [rows=0]
│ ├── key: (8)
- │ ├── fd: (8)-->(5-7,9)
+ │ ├── fd: (8)-->(5-7,9), (5)-->(7)
│ ├── scan abc
│ │ ├── columns: a:5(int!null) b:6(string) c:7(float) rowid:8(int!null) abc.crdb_internal_mvcc_timestamp:9(decimal)
│ │ ├── computed column expressions
@@ -231,7 +231,7 @@ upsert xyz
│ │ │ └── a:5::FLOAT8 [type=float]
│ │ ├── stats: [rows=2000]
│ │ ├── key: (8)
- │ │ └── fd: (8)-->(5-7,9)
+ │ │ └── fd: (8)-->(5-7,9), (5)-->(7)
│ └── filters
│ └── false [type=bool, constraints=(contradiction; tight)]
└── projections
diff --git a/pkg/sql/opt/norm/testdata/rules/prune_cols b/pkg/sql/opt/norm/testdata/rules/prune_cols
index 6d4d935295a6..baac757e9e03 100644
--- a/pkg/sql/opt/norm/testdata/rules/prune_cols
+++ b/pkg/sql/opt/norm/testdata/rules/prune_cols
@@ -2132,23 +2132,23 @@ update computed
├── columns: column14:14 column15:15 a:7!null c:9 d:10 x:11 c_new:13
├── immutable
├── key: (7)
- ├── fd: (7)-->(9-11), (9)-->(13), (13)-->(14,15)
+ ├── fd: (7)-->(9-11), (9)-->(10,13), (13)-->(14,15)
├── project
│ ├── columns: c_new:13 a:7!null c:9 d:10 x:11
│ ├── immutable
│ ├── key: (7)
- │ ├── fd: (7)-->(9-11), (9)-->(13)
+ │ ├── fd: (7)-->(9-11), (9)-->(10,13)
│ ├── select
│ │ ├── columns: a:7!null b:8!null c:9 d:10 x:11
│ │ ├── key: (7)
- │ │ ├── fd: ()-->(8), (7)-->(9-11)
+ │ │ ├── fd: ()-->(8), (7)-->(9-11), (9)-->(10)
│ │ ├── scan computed
│ │ │ ├── columns: a:7!null b:8 c:9 d:10 x:11
│ │ │ ├── computed column expressions
│ │ │ │ └── d:10
│ │ │ │ └── c:9 + 1
│ │ │ ├── key: (7)
- │ │ │ └── fd: (7)-->(8-11)
+ │ │ │ └── fd: (7)-->(8-11), (9)-->(10)
│ │ └── filters
│ │ └── b:8 = 1 [outer=(8), constraints=(/8: [/1 - /1]; tight), fd=()-->(8)]
│ └── projections
diff --git a/pkg/sql/opt/testutils/opttester/testdata/opt-steps b/pkg/sql/opt/testutils/opttester/testdata/opt-steps
index 659777030ba6..10efb04b73f6 100644
--- a/pkg/sql/opt/testutils/opttester/testdata/opt-steps
+++ b/pkg/sql/opt/testutils/opttester/testdata/opt-steps
@@ -688,11 +688,11 @@ Initial expression
================================================================================
project
├── columns: k:1(int!null) c:2(bool)
- ├── fd: ()-->(1)
+ ├── fd: ()-->(1,2)
└── select
├── columns: k:1(int!null) c:2(bool) rowid:3(int!null) crdb_internal_mvcc_timestamp:4(decimal)
├── key: (3)
- ├── fd: ()-->(1), (3)-->(2,4)
+ ├── fd: ()-->(1,2), (3)-->(4)
├── scan comp
│ ├── columns: k:1(int) c:2(bool) rowid:3(int!null) crdb_internal_mvcc_timestamp:4(decimal)
│ ├── computed column expressions
@@ -704,7 +704,7 @@ Initial expression
│ │ ├── const: 3 [type=int]
│ │ └── const: 2 [type=int]
│ ├── key: (3)
- │ └── fd: (3)-->(1,2,4)
+ │ └── fd: (3)-->(1,2,4), (1)-->(2)
└── filters
└── eq [type=bool, outer=(1), constraints=(/1: [/1 - /1]; tight), fd=()-->(1)]
├── variable: k:1 [type=int]
@@ -715,11 +715,11 @@ NormalizeInConst
================================================================================
project
├── columns: k:1(int!null) c:2(bool)
- ├── fd: ()-->(1)
+ ├── fd: ()-->(1,2)
└── select
├── columns: k:1(int!null) c:2(bool) rowid:3(int!null) crdb_internal_mvcc_timestamp:4(decimal)
├── key: (3)
- ├── fd: ()-->(1), (3)-->(2,4)
+ ├── fd: ()-->(1,2), (3)-->(4)
├── scan comp
│ ├── columns: k:1(int) c:2(bool) rowid:3(int!null) crdb_internal_mvcc_timestamp:4(decimal)
│ ├── computed column expressions
@@ -733,7 +733,7 @@ NormalizeInConst
+ │ │ ├── const: 2 [type=int]
+ │ │ └── const: 3 [type=int]
│ ├── key: (3)
- │ └── fd: (3)-->(1,2,4)
+ │ └── fd: (3)-->(1,2,4), (1)-->(2)
└── filters
└── eq [type=bool, outer=(1), constraints=(/1: [/1 - /1]; tight), fd=()-->(1)]
├── variable: k:1 [type=int]
@@ -744,34 +744,27 @@ PruneSelectCols
================================================================================
project
├── columns: k:1(int!null) c:2(bool)
- ├── fd: ()-->(1)
+ ├── fd: ()-->(1,2)
└── select
- ├── columns: k:1(int!null) c:2(bool) rowid:3(int!null) crdb_internal_mvcc_timestamp:4(decimal)
- ├── key: (3)
- - ├── fd: ()-->(1), (3)-->(2,4)
+ - ├── fd: ()-->(1,2), (3)-->(4)
+ ├── columns: k:1(int!null) c:2(bool)
- + ├── fd: ()-->(1)
+ + ├── fd: ()-->(1,2)
├── scan comp
- │ ├── columns: k:1(int) c:2(bool) rowid:3(int!null) crdb_internal_mvcc_timestamp:4(decimal)
- - │ ├── computed column expressions
- - │ │ └── c:2
- - │ │ └── in [type=bool]
- - │ │ ├── variable: k:1 [type=int]
- - │ │ └── tuple [type=tuple{int, int, int}]
- - │ │ ├── const: 1 [type=int]
- - │ │ ├── const: 2 [type=int]
- - │ │ └── const: 3 [type=int]
- - │ ├── key: (3)
- - │ └── fd: (3)-->(1,2,4)
+ │ ├── columns: k:1(int) c:2(bool)
- + │ └── computed column expressions
- + │ └── c:2
- + │ └── in [type=bool]
- + │ ├── variable: k:1 [type=int]
- + │ └── tuple [type=tuple{int, int, int}]
- + │ ├── const: 1 [type=int]
- + │ ├── const: 2 [type=int]
- + │ └── const: 3 [type=int]
+ │ ├── computed column expressions
+ │ │ └── c:2
+ │ │ └── in [type=bool]
+ │ │ ├── variable: k:1 [type=int]
+ │ │ └── tuple [type=tuple{int, int, int}]
+ │ │ ├── const: 1 [type=int]
+ │ │ ├── const: 2 [type=int]
+ │ │ └── const: 3 [type=int]
+ - │ ├── key: (3)
+ - │ └── fd: (3)-->(1,2,4), (1)-->(2)
+ + │ └── fd: (1)-->(2)
└── filters
└── eq [type=bool, outer=(1), constraints=(/1: [/1 - /1]; tight), fd=()-->(1)]
├── variable: k:1 [type=int]
@@ -783,34 +776,36 @@ EliminateProject
-project
+select
├── columns: k:1(int!null) c:2(bool)
- ├── fd: ()-->(1)
+ ├── fd: ()-->(1,2)
- └── select
- ├── columns: k:1(int!null) c:2(bool)
- - ├── fd: ()-->(1)
+ - ├── fd: ()-->(1,2)
- ├── scan comp
- │ ├── columns: k:1(int) c:2(bool)
- - │ └── computed column expressions
- - │ └── c:2
- - │ └── in [type=bool]
- - │ ├── variable: k:1 [type=int]
- - │ └── tuple [type=tuple{int, int, int}]
- - │ ├── const: 1 [type=int]
- - │ ├── const: 2 [type=int]
- - │ └── const: 3 [type=int]
+ - │ ├── computed column expressions
+ - │ │ └── c:2
+ - │ │ └── in [type=bool]
+ - │ │ ├── variable: k:1 [type=int]
+ - │ │ └── tuple [type=tuple{int, int, int}]
+ - │ │ ├── const: 1 [type=int]
+ - │ │ ├── const: 2 [type=int]
+ - │ │ └── const: 3 [type=int]
+ - │ └── fd: (1)-->(2)
- └── filters
- └── eq [type=bool, outer=(1), constraints=(/1: [/1 - /1]; tight), fd=()-->(1)]
- ├── variable: k:1 [type=int]
- └── const: 1 [type=int]
+ ├── scan comp
+ │ ├── columns: k:1(int) c:2(bool)
- + │ └── computed column expressions
- + │ └── c:2
- + │ └── in [type=bool]
- + │ ├── variable: k:1 [type=int]
- + │ └── tuple [type=tuple{int, int, int}]
- + │ ├── const: 1 [type=int]
- + │ ├── const: 2 [type=int]
- + │ └── const: 3 [type=int]
+ + │ ├── computed column expressions
+ + │ │ └── c:2
+ + │ │ └── in [type=bool]
+ + │ │ ├── variable: k:1 [type=int]
+ + │ │ └── tuple [type=tuple{int, int, int}]
+ + │ │ ├── const: 1 [type=int]
+ + │ │ ├── const: 2 [type=int]
+ + │ │ └── const: 3 [type=int]
+ + │ └── fd: (1)-->(2)
+ └── filters
+ └── eq [type=bool, outer=(1), constraints=(/1: [/1 - /1]; tight), fd=()-->(1)]
+ ├── variable: k:1 [type=int]
@@ -820,19 +815,19 @@ GenerateIndexScans (higher cost)
--------------------------------------------------------------------------------
select
├── columns: k:1(int!null) c:2(bool)
- ├── fd: ()-->(1)
+ ├── fd: ()-->(1,2)
- ├── scan comp
- - │ ├── columns: k:1(int) c:2(bool)
- - │ └── computed column expressions
- - │ └── c:2
- - │ └── in [type=bool]
- - │ ├── variable: k:1 [type=int]
- - │ └── tuple [type=tuple{int, int, int}]
- - │ ├── const: 1 [type=int]
- - │ ├── const: 2 [type=int]
- - │ └── const: 3 [type=int]
+ ├── scan comp@secondary
- + │ └── columns: k:1(int) c:2(bool)
+ │ ├── columns: k:1(int) c:2(bool)
+ - │ ├── computed column expressions
+ - │ │ └── c:2
+ - │ │ └── in [type=bool]
+ - │ │ ├── variable: k:1 [type=int]
+ - │ │ └── tuple [type=tuple{int, int, int}]
+ - │ │ ├── const: 1 [type=int]
+ - │ │ ├── const: 2 [type=int]
+ - │ │ └── const: 3 [type=int]
+ │ └── fd: (1)-->(2)
└── filters
└── eq [type=bool, outer=(1), constraints=(/1: [/1 - /1]; tight), fd=()-->(1)]
├── variable: k:1 [type=int]
@@ -850,23 +845,24 @@ FoldComparison
-select
+scan comp@secondary
├── columns: k:1(int!null) c:2(bool)
- - ├── fd: ()-->(1)
+ - ├── fd: ()-->(1,2)
- ├── scan comp
- │ ├── columns: k:1(int) c:2(bool)
- - │ └── computed column expressions
- - │ └── c:2
- - │ └── in [type=bool]
- - │ ├── variable: k:1 [type=int]
- - │ └── tuple [type=tuple{int, int, int}]
- - │ ├── const: 1 [type=int]
- - │ ├── const: 2 [type=int]
- - │ └── const: 3 [type=int]
+ - │ ├── computed column expressions
+ - │ │ └── c:2
+ - │ │ └── in [type=bool]
+ - │ │ ├── variable: k:1 [type=int]
+ - │ │ └── tuple [type=tuple{int, int, int}]
+ - │ │ ├── const: 1 [type=int]
+ - │ │ ├── const: 2 [type=int]
+ - │ │ └── const: 3 [type=int]
+ - │ └── fd: (1)-->(2)
- └── filters
- └── eq [type=bool, outer=(1), constraints=(/1: [/1 - /1]; tight), fd=()-->(1)]
- ├── variable: k:1 [type=int]
- └── const: 1 [type=int]
+ ├── constraint: /2/1/3: [/true/1 - /true/1]
- + └── fd: ()-->(1)
+ + └── fd: ()-->(1,2)
--------------------------------------------------------------------------------
GenerateZigzagJoins (no changes)
--------------------------------------------------------------------------------
@@ -877,7 +873,7 @@ Final best expression
scan comp@secondary
├── columns: k:1(int!null) c:2(bool)
├── constraint: /2/1/3: [/true/1 - /true/1]
- └── fd: ()-->(1)
+ └── fd: ()-->(1,2)
exec-ddl
CREATE TABLE t (i INT)
diff --git a/pkg/sql/opt/xform/testdata/rules/computed b/pkg/sql/opt/xform/testdata/rules/computed
index 081259fcd8cd..21bb92dcf8b8 100644
--- a/pkg/sql/opt/xform/testdata/rules/computed
+++ b/pkg/sql/opt/xform/testdata/rules/computed
@@ -72,7 +72,7 @@ SELECT k_int, k_int_2, c_mult, c_mult_2, c_int FROM t_mult WHERE k_int = 5 AND k
scan t_mult@c_mult_index
├── columns: k_int:1!null k_int_2:2!null c_mult:4 c_mult_2:5 c_int:3
├── constraint: /4/5/3/1/2/6: [/300/6/1/5/10 - /300/6/1/5/10]
- └── fd: ()-->(1,2)
+ └── fd: ()-->(1-5)
# Test computed + check columns in same table.
opt
@@ -81,7 +81,7 @@ SELECT * FROM hashed WHERE k = 'andy'
scan hashed@secondary
├── columns: k:1!null hash:2
├── constraint: /2/1/3: [/1/'andy' - /1/'andy']
- └── fd: ()-->(1)
+ └── fd: ()-->(1,2)
# Don't constrain when filter has multiple columns.
opt
@@ -190,4 +190,4 @@ SELECT a, b FROM null_col WHERE a = 1
scan null_col@ab
├── columns: a:1!null b:2
├── constraint: /1/2/3: [/1/NULL - /1/NULL]
- └── fd: ()-->(1)
+ └── fd: ()-->(1,2)
diff --git a/pkg/sql/opt/xform/testdata/rules/join b/pkg/sql/opt/xform/testdata/rules/join
index 562967ac33f4..efaab871851f 100644
--- a/pkg/sql/opt/xform/testdata/rules/join
+++ b/pkg/sql/opt/xform/testdata/rules/join
@@ -3408,6 +3408,7 @@ left-join (lookup t59738_ab)
├── columns: c:1 d:2 a:5 b:6
├── key columns: [7] = [7]
├── lookup columns are key
+ ├── fd: (5)~~>(6)
├── left-join (lookup t59738_ab@secondary)
│ ├── columns: c:1 d:2 a:5 t59738_ab.rowid:7
│ ├── flags: force lookup join (into right side)
@@ -3979,7 +3980,7 @@ inner-join (lookup abcd_comp)
├── columns: m:1!null n:2 a:5!null b:6 c:7 d:8!null
├── key columns: [9] = [9]
├── lookup columns are key
- ├── fd: ()-->(8), (1)==(5), (5)==(1)
+ ├── fd: ()-->(6,8), (1)==(5), (5)==(1)
├── inner-join (lookup abcd_comp@secondary)
│ ├── columns: m:1!null n:2 a:5!null b:6!null c:7 abcd_comp.rowid:9!null
│ ├── key columns: [1 11] = [5 6]
@@ -4002,7 +4003,7 @@ inner-join (lookup abcd_comp)
├── columns: m:1!null n:2 a:5!null b:6 c:7!null d:8!null
├── key columns: [9] = [9]
├── lookup columns are key
- ├── fd: ()-->(7,8), (1)==(5), (5)==(1)
+ ├── fd: ()-->(6-8), (1)==(5), (5)==(1)
├── inner-join (lookup abcd_comp@secondary)
│ ├── columns: m:1!null n:2 a:5!null b:6!null c:7!null abcd_comp.rowid:9!null
│ ├── key columns: [1 11 12] = [5 6 7]
diff --git a/pkg/sql/opt/xform/testdata/rules/select b/pkg/sql/opt/xform/testdata/rules/select
index e1fe4b07a71a..62c34dabbcc2 100644
--- a/pkg/sql/opt/xform/testdata/rules/select
+++ b/pkg/sql/opt/xform/testdata/rules/select
@@ -3989,11 +3989,11 @@ select
├── columns: k:1!null a:2!null b:3!null c:4 geom:5!null
├── immutable
├── key: (1)
- ├── fd: (1)-->(2-5)
+ ├── fd: (1)-->(2-5), (2)-->(4)
├── index-join mc
│ ├── columns: k:1!null a:2!null b:3!null c:4 geom:5
│ ├── key: (1)
- │ ├── fd: (1)-->(2-5)
+ │ ├── fd: (1)-->(2-5), (2)-->(4)
│ └── inverted-filter
│ ├── columns: k:1!null
│ ├── inverted expression: /7
@@ -4037,11 +4037,11 @@ select
├── columns: k:1!null a:2!null b:3!null c:4 geom:5!null
├── immutable
├── key: (1)
- ├── fd: ()-->(2), (1)-->(3-5)
+ ├── fd: ()-->(2,4), (1)-->(3,5)
├── index-join mc
│ ├── columns: k:1!null a:2!null b:3!null c:4 geom:5
│ ├── key: (1)
- │ ├── fd: (1)-->(2-5)
+ │ ├── fd: (1)-->(2-5), (2)-->(4)
│ └── inverted-filter
│ ├── columns: k:1!null
│ ├── inverted expression: /8
@@ -4086,11 +4086,11 @@ select
├── columns: k:1!null a:2!null b:3!null c:4 geom:5!null
├── immutable
├── key: (1)
- ├── fd: ()-->(2), (1)-->(3-5)
+ ├── fd: ()-->(2,4), (1)-->(3,5)
├── index-join mc
│ ├── columns: k:1!null a:2!null b:3!null c:4 geom:5
│ ├── key: (1)
- │ ├── fd: (1)-->(2-5)
+ │ ├── fd: (1)-->(2-5), (2)-->(4)
│ └── inverted-filter
│ ├── columns: k:1!null
│ ├── inverted expression: /9
@@ -4125,11 +4125,11 @@ select
├── columns: k:1!null a:2!null b:3!null c:4 geom:5!null
├── immutable
├── key: (1)
- ├── fd: (1)-->(2-5)
+ ├── fd: (1)-->(2-5), (2)-->(4)
├── index-join mc
│ ├── columns: k:1!null a:2!null b:3!null c:4 geom:5
│ ├── key: (1)
- │ ├── fd: (1)-->(2-5)
+ │ ├── fd: (1)-->(2-5), (2)-->(4)
│ └── inverted-filter
│ ├── columns: k:1!null
│ ├── inverted expression: /9
From 392effc5dab5737954b0e26354a15b52e79338b4 Mon Sep 17 00:00:00 2001
From: Andy Yang
Date: Wed, 24 Feb 2021 21:21:55 -0500
Subject: [PATCH 2/7] builtins: implement ST_MakePoint and ST_MakePointM
This patch implements the geometry builtins `ST_MakePoint`
and `ST_MakePointM`.
Release justification: low-risk update to new functionality
Release note (sql change): The geometry builtins `ST_MakePoint`
and `ST_MakePointM` have been implemented and provide a mechanism
for easily creating new points.
---
docs/generated/sql/functions.md | 6 ++
pkg/geo/geo.go | 18 ++++-
.../testdata/logic_test/geospatial_zm | 17 +++++
pkg/sql/sem/builtins/geo_builtins.go | 71 ++++++++++++++++++-
4 files changed, 110 insertions(+), 2 deletions(-)
diff --git a/docs/generated/sql/functions.md b/docs/generated/sql/functions.md
index 349716acef1d..791528e298ea 100644
--- a/docs/generated/sql/functions.md
+++ b/docs/generated/sql/functions.md
@@ -1922,6 +1922,12 @@ calculated, the result is transformed back into a Geography with SRID 4326.
st_makepolygon(outer: geometry, interior: anyelement[]) → geometry | Returns a new Polygon with the given outer LineString and interior (hole) LineString(s).
diff --git a/pkg/geo/geo.go b/pkg/geo/geo.go
index 36b1ae6d353e..268248558ffe 100644
--- a/pkg/geo/geo.go
+++ b/pkg/geo/geo.go
@@ -133,7 +133,23 @@ func MakeGeometryUnsafe(spatialObject geopb.SpatialObject) Geometry {
// MakeGeometryFromPointCoords makes a point from x, y coordinates.
func MakeGeometryFromPointCoords(x, y float64) (Geometry, error) {
- s, err := spatialObjectFromGeomT(geom.NewPointFlat(geom.XY, []float64{x, y}), geopb.SpatialObjectType_GeometryType)
+ return MakeGeometryFromLayoutAndPointCoords(geom.XY, []float64{x, y})
+}
+
+// MakeGeometryFromLayoutAndPointCoords makes a point with a given layout and ordered slice of coordinates.
+func MakeGeometryFromLayoutAndPointCoords(
+ layout geom.Layout, flatCoords []float64,
+) (Geometry, error) {
+ // Validate that the stride matches what is expected for the layout.
+ switch {
+ case layout == geom.XY && len(flatCoords) == 2:
+ case layout == geom.XYM && len(flatCoords) == 3:
+ case layout == geom.XYZ && len(flatCoords) == 3:
+ case layout == geom.XYZM && len(flatCoords) == 4:
+ default:
+ return Geometry{}, errors.Newf("mismatch between layout %d and stride %d", layout, len(flatCoords))
+ }
+ s, err := spatialObjectFromGeomT(geom.NewPointFlat(layout, flatCoords), geopb.SpatialObjectType_GeometryType)
if err != nil {
return Geometry{}, err
}
diff --git a/pkg/sql/logictest/testdata/logic_test/geospatial_zm b/pkg/sql/logictest/testdata/logic_test/geospatial_zm
index b1af2d8e35c3..bc2bdd90e4f4 100644
--- a/pkg/sql/logictest/testdata/logic_test/geospatial_zm
+++ b/pkg/sql/logictest/testdata/logic_test/geospatial_zm
@@ -83,3 +83,20 @@ INSERT INTO geom_4d VALUES ('point(1 2)')
statement error pq: object type PointZ does not match column dimensionality GeometryZM
INSERT INTO geom_4d VALUES ('pointz(1 2 3)')
+
+# Builtins for creating Points
+query T
+SELECT st_astext(point) FROM
+( VALUES
+ (st_point(1, 2)),
+ (st_makepoint(1, 2)),
+ (st_makepoint(1, 2, 3)),
+ (st_makepoint(1, 2, 3, 4)),
+ (st_makepointm(1, 2, 3))
+) AS t(point)
+----
+POINT (1 2)
+POINT (1 2)
+POINT Z (1 2 3)
+POINT ZM (1 2 3 4)
+POINT M (1 2 3)
diff --git a/pkg/sql/sem/builtins/geo_builtins.go b/pkg/sql/sem/builtins/geo_builtins.go
index 8e361c55956d..7be256f7f23b 100644
--- a/pkg/sql/sem/builtins/geo_builtins.go
+++ b/pkg/sql/sem/builtins/geo_builtins.go
@@ -758,6 +758,76 @@ SELECT ST_S2Covering(geography, 's2_max_level=15,s2_level_mod=3').
tree.VolatilityImmutable,
),
),
+ "st_makepoint": makeBuiltin(
+ defProps(),
+ tree.Overload{
+ Types: tree.ArgTypes{{"x", types.Float}, {"y", types.Float}},
+ ReturnType: tree.FixedReturnType(types.Geometry),
+ Fn: func(ctx *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
+ x := float64(tree.MustBeDFloat(args[0]))
+ y := float64(tree.MustBeDFloat(args[1]))
+ g, err := geo.MakeGeometryFromLayoutAndPointCoords(geom.XY, []float64{x, y})
+ if err != nil {
+ return nil, err
+ }
+ return tree.NewDGeometry(g), nil
+ },
+ Info: infoBuilder{info: `Returns a new Point with the given X and Y coordinates.`}.String(),
+ Volatility: tree.VolatilityImmutable,
+ },
+ tree.Overload{
+ Types: tree.ArgTypes{{"x", types.Float}, {"y", types.Float}, {"z", types.Float}},
+ ReturnType: tree.FixedReturnType(types.Geometry),
+ Fn: func(ctx *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
+ x := float64(tree.MustBeDFloat(args[0]))
+ y := float64(tree.MustBeDFloat(args[1]))
+ z := float64(tree.MustBeDFloat(args[2]))
+ g, err := geo.MakeGeometryFromLayoutAndPointCoords(geom.XYZ, []float64{x, y, z})
+ if err != nil {
+ return nil, err
+ }
+ return tree.NewDGeometry(g), nil
+ },
+ Info: infoBuilder{info: `Returns a new Point with the given X, Y, and Z coordinates.`}.String(),
+ Volatility: tree.VolatilityImmutable,
+ },
+ tree.Overload{
+ Types: tree.ArgTypes{{"x", types.Float}, {"y", types.Float}, {"z", types.Float}, {"m", types.Float}},
+ ReturnType: tree.FixedReturnType(types.Geometry),
+ Fn: func(ctx *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
+ x := float64(tree.MustBeDFloat(args[0]))
+ y := float64(tree.MustBeDFloat(args[1]))
+ z := float64(tree.MustBeDFloat(args[2]))
+ m := float64(tree.MustBeDFloat(args[3]))
+ g, err := geo.MakeGeometryFromLayoutAndPointCoords(geom.XYZM, []float64{x, y, z, m})
+ if err != nil {
+ return nil, err
+ }
+ return tree.NewDGeometry(g), nil
+ },
+ Info: infoBuilder{info: `Returns a new Point with the given X, Y, Z, and M coordinates.`}.String(),
+ Volatility: tree.VolatilityImmutable,
+ },
+ ),
+ "st_makepointm": makeBuiltin(
+ defProps(),
+ tree.Overload{
+ Types: tree.ArgTypes{{"x", types.Float}, {"y", types.Float}, {"m", types.Float}},
+ ReturnType: tree.FixedReturnType(types.Geometry),
+ Fn: func(ctx *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
+ x := float64(tree.MustBeDFloat(args[0]))
+ y := float64(tree.MustBeDFloat(args[1]))
+ m := float64(tree.MustBeDFloat(args[2]))
+ g, err := geo.MakeGeometryFromLayoutAndPointCoords(geom.XYM, []float64{x, y, m})
+ if err != nil {
+ return nil, err
+ }
+ return tree.NewDGeometry(g), nil
+ },
+ Info: infoBuilder{info: `Returns a new Point with the given X, Y, and M coordinates.`}.String(),
+ Volatility: tree.VolatilityImmutable,
+ },
+ ),
"st_makepolygon": makeBuiltin(
defProps(),
geometryOverload1(
@@ -6245,7 +6315,6 @@ func initGeoBuiltins() {
{"st_geogfromtext", "st_geographyfromtext"},
{"st_geomfromtext", "st_geometryfromtext"},
{"st_numinteriorring", "st_numinteriorrings"},
- {"st_makepoint", "st_point"},
{"st_symmetricdifference", "st_symdifference"},
} {
if _, ok := geoBuiltins[alias.builtinName]; !ok {
From 1aee31761b38a31d5bbf083b3000e00668800ff4 Mon Sep 17 00:00:00 2001
From: Andrew Kimball
Date: Wed, 24 Feb 2021 17:44:22 -0800
Subject: [PATCH 3/7] diagnostics: lock while populating hardware information
The shirou/gopsutil/host library that we use to gather hardware information
during diagnostics reporting is not multi-thread safe. As one example, it
lazily initializes a global map the first time the Virtualization function
is called, but takes no lock while doing so. Work around this limitation by
taking our own lock.
This code never triggered race conditions before, but is doing so after recent
changes I made to the diagnostics reporting code. Previously, we were using a
single background goroutine to do both diagnostics reporting and checking for
updates. Now we are doing each of those on different goroutines, which triggers
race detection.
Fixes #61091
Release justification: fixes for high-priority or high-severity bugs in existing
functionality
Release note: None
---
pkg/server/diagnostics/BUILD.bazel | 1 +
pkg/server/diagnostics/diagnostics.go | 53 +++++++++++++++++++++++++++
pkg/server/diagnostics/reporter.go | 42 ---------------------
3 files changed, 54 insertions(+), 42 deletions(-)
diff --git a/pkg/server/diagnostics/BUILD.bazel b/pkg/server/diagnostics/BUILD.bazel
index 3626aa931696..421a61cff191 100644
--- a/pkg/server/diagnostics/BUILD.bazel
+++ b/pkg/server/diagnostics/BUILD.bazel
@@ -33,6 +33,7 @@ go_library(
"//pkg/util/log/logcrash",
"//pkg/util/protoutil",
"//pkg/util/stop",
+ "//pkg/util/syncutil",
"//pkg/util/timeutil",
"//pkg/util/uuid",
"@com_github_cockroachdb_errors//:errors",
diff --git a/pkg/server/diagnostics/diagnostics.go b/pkg/server/diagnostics/diagnostics.go
index 3e4ab9f4ae05..0f5e0f32316d 100644
--- a/pkg/server/diagnostics/diagnostics.go
+++ b/pkg/server/diagnostics/diagnostics.go
@@ -11,15 +11,23 @@
package diagnostics
import (
+ "context"
"math/rand"
"net/url"
+ "runtime"
"strconv"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server/diagnostics/diagnosticspb"
+ "github.com/cockroachdb/cockroach/pkg/util/cloudinfo"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
+ "github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
+ "github.com/shirou/gopsutil/cpu"
+ "github.com/shirou/gopsutil/host"
+ "github.com/shirou/gopsutil/load"
+ "github.com/shirou/gopsutil/mem"
)
// updatesURL is the URL used to check for new versions. Can be nil if an empty
@@ -112,3 +120,48 @@ func addJitter(d time.Duration) time.Duration {
j := time.Duration(rand.Intn(jitterSeconds*2)-jitterSeconds) * time.Second
return d + j
}
+
+var populateMutex syncutil.Mutex
+
+// populateHardwareInfo populates OS, CPU, memory, etc. information about the
+// environment in which CRDB is running.
+func populateHardwareInfo(ctx context.Context, e *diagnosticspb.Environment) {
+ // The shirou/gopsutil/host library is not multi-thread safe. As one
+ // example, it lazily initializes a global map the first time the
+ // Virtualization function is called, but takes no lock while doing so.
+ // Work around this limitation by taking our own lock.
+ populateMutex.Lock()
+ defer populateMutex.Unlock()
+
+ if platform, family, version, err := host.PlatformInformation(); err == nil {
+ e.Os.Family = family
+ e.Os.Platform = platform
+ e.Os.Version = version
+ }
+
+ if virt, role, err := host.Virtualization(); err == nil && role == "guest" {
+ e.Hardware.Virtualization = virt
+ }
+
+ if m, err := mem.VirtualMemory(); err == nil {
+ e.Hardware.Mem.Available = m.Available
+ e.Hardware.Mem.Total = m.Total
+ }
+
+ e.Hardware.Cpu.Numcpu = int32(runtime.NumCPU())
+ if cpus, err := cpu.InfoWithContext(ctx); err == nil && len(cpus) > 0 {
+ e.Hardware.Cpu.Sockets = int32(len(cpus))
+ c := cpus[0]
+ e.Hardware.Cpu.Cores = c.Cores
+ e.Hardware.Cpu.Model = c.ModelName
+ e.Hardware.Cpu.Mhz = float32(c.Mhz)
+ e.Hardware.Cpu.Features = c.Flags
+ }
+
+ if l, err := load.AvgWithContext(ctx); err == nil {
+ e.Hardware.Loadavg15 = float32(l.Load15)
+ }
+
+ e.Hardware.Provider, e.Hardware.InstanceClass = cloudinfo.GetInstanceClass(ctx)
+ e.Topology.Provider, e.Topology.Region = cloudinfo.GetInstanceRegion(ctx)
+}
diff --git a/pkg/server/diagnostics/reporter.go b/pkg/server/diagnostics/reporter.go
index 4123f60a5aa3..25c55d0e892e 100644
--- a/pkg/server/diagnostics/reporter.go
+++ b/pkg/server/diagnostics/reporter.go
@@ -17,7 +17,6 @@ import (
"net/http"
"net/url"
"reflect"
- "runtime"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
@@ -36,7 +35,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
- "github.com/cockroachdb/cockroach/pkg/util/cloudinfo"
"github.com/cockroachdb/cockroach/pkg/util/httputil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/log/logcrash"
@@ -46,10 +44,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/mitchellh/reflectwalk"
- "github.com/shirou/gopsutil/cpu"
- "github.com/shirou/gopsutil/host"
- "github.com/shirou/gopsutil/load"
- "github.com/shirou/gopsutil/mem"
"google.golang.org/protobuf/proto"
)
@@ -338,42 +332,6 @@ func getLicenseType(ctx context.Context, settings *cluster.Settings) string {
return licenseType
}
-// populateHardwareInfo populates OS, CPU, memory, etc. information about the
-// environment in which CRDB is running.
-func populateHardwareInfo(ctx context.Context, e *diagnosticspb.Environment) {
- if platform, family, version, err := host.PlatformInformation(); err == nil {
- e.Os.Family = family
- e.Os.Platform = platform
- e.Os.Version = version
- }
-
- if virt, role, err := host.Virtualization(); err == nil && role == "guest" {
- e.Hardware.Virtualization = virt
- }
-
- if m, err := mem.VirtualMemory(); err == nil {
- e.Hardware.Mem.Available = m.Available
- e.Hardware.Mem.Total = m.Total
- }
-
- e.Hardware.Cpu.Numcpu = int32(runtime.NumCPU())
- if cpus, err := cpu.InfoWithContext(ctx); err == nil && len(cpus) > 0 {
- e.Hardware.Cpu.Sockets = int32(len(cpus))
- c := cpus[0]
- e.Hardware.Cpu.Cores = c.Cores
- e.Hardware.Cpu.Model = c.ModelName
- e.Hardware.Cpu.Mhz = float32(c.Mhz)
- e.Hardware.Cpu.Features = c.Flags
- }
-
- if l, err := load.AvgWithContext(ctx); err == nil {
- e.Hardware.Loadavg15 = float32(l.Load15)
- }
-
- e.Hardware.Provider, e.Hardware.InstanceClass = cloudinfo.GetInstanceClass(ctx)
- e.Topology.Provider, e.Topology.Region = cloudinfo.GetInstanceRegion(ctx)
-}
-
func anonymizeZoneConfig(dst *zonepb.ZoneConfig, src zonepb.ZoneConfig, secret string) {
if src.RangeMinBytes != nil {
dst.RangeMinBytes = proto.Int64(*src.RangeMinBytes)
From 1e795d1ff571859ee1c2d43c8b4b49bec8eade95 Mon Sep 17 00:00:00 2001
From: Rebecca Taft
Date: Wed, 24 Feb 2021 17:03:38 -0700
Subject: [PATCH 4/7] opt: remove uniqueness checks when uniqueness inferred
through FDs
This commit removes uniqueness checks for columns that can be
inferred to be unique through functional dependencies. This is
relevant in particular for REGIONAL BY ROW tables with a computed
region column that depends on the primary key. In this case,
uniqueness checks are never needed on the primary key, since
uniqueness is already guaranteed by the primary index.
Fixes #57720
Release justification: This commit is a low-risk, high benefit
update to new functionality.
Release note (performance improvement): Removed uniqueness checks
on the primary key for REGIONAL BY ROW tables with a computed
region column that is a function of the primary key columns.
Uniqueness checks are not necessary in this case since uniqueness
can be suitably guaranteed by the primary index. Removing these
checks improves performance of INSERT, UPDATE, and UPSERT
statements.
---
.../testdata/logic_test/regional_by_row | 48 +++++++
pkg/sql/logictest/testdata/logic_test/unique | 78 +++++++++++
pkg/sql/opt/exec/execbuilder/testdata/unique | 64 ++++++++-
pkg/sql/opt/memo/testdata/logprops/scan | 3 +
.../opt/optbuilder/mutation_builder_unique.go | 59 +++++++--
.../optbuilder/testdata/unique-checks-insert | 77 +++++++++++
.../optbuilder/testdata/unique-checks-update | 95 +++++++++++++
.../optbuilder/testdata/unique-checks-upsert | 125 ++++++++++++++++++
8 files changed, 538 insertions(+), 11 deletions(-)
diff --git a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row
index 8209c741846e..62d578cea78c 100644
--- a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row
+++ b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row
@@ -1035,6 +1035,54 @@ pk a b crdb_region_col
statement error cannot drop column crdb_region_col as it is used to store the region in a REGIONAL BY ROW table\nHINT: You must change the table locality before dropping this table
ALTER TABLE regional_by_row_table_as DROP COLUMN crdb_region_col
+# We do not need uniqueness checks on pk since uniqueness can be inferred
+# through the functional dependency between pk and the computed region column.
+query T
+EXPLAIN INSERT INTO regional_by_row_table_as (pk, a, b) VALUES (1, 1, 1)
+----
+distribution: local
+vectorized: true
+·
+• root
+│
+├── • insert
+│ │ into: regional_by_row_table_as(pk, a, b, crdb_region_col)
+│ │
+│ └── • buffer
+│ │ label: buffer 1
+│ │
+│ └── • values
+│ size: 5 columns, 1 row
+│
+└── • constraint-check
+ │
+ └── • error if rows
+ │
+ └── • lookup join (semi)
+ │ table: regional_by_row_table_as@regional_by_row_table_as_b_key
+ │ equality: (lookup_join_const_col_@21, column3) = (crdb_region_col,b)
+ │ equality cols are key
+ │ pred: (column1 != pk) OR (column10 != crdb_region_col)
+ │
+ └── • cross join
+ │
+ ├── • values
+ │ size: 1 column, 3 rows
+ │
+ └── • scan buffer
+ label: buffer 1
+
+# TODO(mgartner): Update this error message to remove crdb_region (see #59504).
+statement error pq: duplicate key value violates unique constraint "primary"\nDETAIL: Key \(crdb_region_col,pk\)=\('us-east-1',1\) already exists\.
+INSERT INTO regional_by_row_table_as (pk, a, b) VALUES (1, 1, 1)
+
+statement ok
+INSERT INTO regional_by_row_table_as (pk, a, b) VALUES (30, 1, 1)
+
+statement error pq: duplicate key value violates unique constraint "regional_by_row_table_as_b_key"\nDETAIL: Key \(b\)=\(1\) already exists\.
+INSERT INTO regional_by_row_table_as (pk, a, b) VALUES (2, 1, 1)
+
+
# Tests for altering the survivability of a REGIONAL BY ROW table.
statement ok
CREATE DATABASE alter_survive_db PRIMARY REGION "us-east-1" REGIONS "ca-central-1", "ap-southeast-2" SURVIVE REGION FAILURE
diff --git a/pkg/sql/logictest/testdata/logic_test/unique b/pkg/sql/logictest/testdata/logic_test/unique
index 3de7a80548ba..ace06c2edae0 100644
--- a/pkg/sql/logictest/testdata/logic_test/unique
+++ b/pkg/sql/logictest/testdata/logic_test/unique
@@ -86,6 +86,23 @@ CREATE TABLE uniq_enum (
UNIQUE WITHOUT INDEX (s, j)
)
+statement ok
+CREATE TABLE uniq_computed_pk (
+ i INT,
+ s STRING,
+ d DECIMAL,
+ c_i_expr STRING AS (CASE WHEN i < 0 THEN 'foo' ELSE 'bar' END) STORED,
+ c_s STRING AS (s) VIRTUAL,
+ c_d DECIMAL AS (d) STORED,
+ c_d_expr STRING AS (d::string) STORED,
+ PRIMARY KEY (c_i_expr, i),
+ UNIQUE (c_s, s),
+ UNIQUE (c_d_expr, d),
+ UNIQUE WITHOUT INDEX (i),
+ UNIQUE WITHOUT INDEX (s),
+ UNIQUE WITHOUT INDEX (d)
+)
+
statement ok
CREATE TABLE other (k INT, v INT, w INT NOT NULL, x INT, y INT)
@@ -322,6 +339,28 @@ a b
NULL 5
NULL 5
+# Check that uniqueness violations are detected in a table with UNIQUE indexes
+# containing computed columns that are dependent on UNIQUE WITHOUT INDEX
+# columns.
+statement ok
+INSERT INTO uniq_computed_pk (i, s, d) VALUES (1, 'a', 1.0), (2, 'b', 2.0)
+
+statement error pgcode 23505 pq: duplicate key value violates unique constraint "primary"\nDETAIL: Key \(c_i_expr,i\)=\('bar',1\) already exists\.
+INSERT INTO uniq_computed_pk (i, s, d) VALUES (1, 'c', 3.0)
+
+statement error pgcode 23505 pq: duplicate key value violates unique constraint "uniq_computed_pk_c_s_s_key"\nDETAIL: Key \(c_s,s\)=\('b','b'\) already exists\.
+INSERT INTO uniq_computed_pk (i, s, d) VALUES (3, 'b', 3.0)
+
+statement error pgcode 23505 pq: duplicate key value violates unique constraint "unique_d"\nDETAIL: Key \(d\)=\(1\.00\) already exists\.
+INSERT INTO uniq_computed_pk (i, s, d) VALUES (3, 'c', 1.00)
+
+query ITFTTFT colnames,rowsort
+SELECT * FROM uniq_computed_pk
+----
+i s d c_i_expr c_s c_d c_d_expr
+1 a 1.0 bar a 1.0 1.0
+2 b 2.0 bar b 2.0 2.0
+
# -- Tests with UPDATE --
subtest Update
@@ -475,6 +514,25 @@ NULL 5
NULL 5
NULL 10
+# Check that uniqueness violations are detected in a table with UNIQUE indexes
+# containing computed columns that are dependent on UNIQUE WITHOUT INDEX
+# columns.
+statement error pgcode 23505 pq: duplicate key value violates unique constraint "primary"\nDETAIL: Key \(c_i_expr,i\)=\('bar',1\) already exists\.
+UPDATE uniq_computed_pk SET i = 1 WHERE i = 2
+
+statement error pgcode 23505 pq: duplicate key value violates unique constraint "uniq_computed_pk_c_s_s_key"\nDETAIL: Key \(c_s,s\)=\('a','a'\) already exists\.
+UPDATE uniq_computed_pk SET s = 'a' WHERE i = 2
+
+statement error pgcode 23505 pq: duplicate key value violates unique constraint "unique_d"\nDETAIL: Key \(d\)=\(1\.00\) already exists\.
+UPDATE uniq_computed_pk SET d = 1.00 WHERE i = 2
+
+query ITFTTFT colnames,rowsort
+SELECT * FROM uniq_computed_pk
+----
+i s d c_i_expr c_s c_d c_d_expr
+1 a 1.0 bar a 1.0 1.0
+2 b 2.0 bar b 2.0 2.0
+
# -- Tests with UPSERT --
subtest Upsert
@@ -670,6 +728,26 @@ SELECT * FROM uniq_partial_index_and_constraint
i
2
+# Check that uniqueness violations are detected in a table with UNIQUE indexes
+# containing computed columns that are dependent on UNIQUE WITHOUT INDEX
+# columns.
+statement error pgcode 23505 pq: duplicate key value violates unique constraint "primary"\nDETAIL: Key \(c_i_expr,i\)=\('bar',2\) already exists\.
+INSERT INTO uniq_computed_pk (i, s, d) VALUES (1, 'a', 1.0) ON CONFLICT (s) DO UPDATE SET i = 2
+
+statement error pgcode 23505 pq: duplicate key value violates unique constraint "uniq_computed_pk_c_s_s_key"\nDETAIL: Key \(c_s,s\)=\('b','b'\) already exists\.
+UPSERT INTO uniq_computed_pk (i, s, d) VALUES (1, 'b', 1.0)
+
+statement error pgcode 23505 pq: duplicate key value violates unique constraint "unique_d"\nDETAIL: Key \(d\)=\(2\.00\) already exists\.
+UPSERT INTO uniq_computed_pk (i, s, d) VALUES (1, 'a', 2.00)
+
+query ITFTTFT colnames,rowsort
+SELECT * FROM uniq_computed_pk
+----
+i s d c_i_expr c_s c_d c_d_expr
+1 a 1.0 bar a 1.0 1.0
+2 b 2.0 bar b 2.0 2.0
+
+
# -- Tests with DELETE --
subtest Delete
diff --git a/pkg/sql/opt/exec/execbuilder/testdata/unique b/pkg/sql/opt/exec/execbuilder/testdata/unique
index 539b288c00ed..3a8640df2f30 100644
--- a/pkg/sql/opt/exec/execbuilder/testdata/unique
+++ b/pkg/sql/opt/exec/execbuilder/testdata/unique
@@ -110,8 +110,6 @@ CREATE TABLE uniq_partial_hidden_pk (
statement ok
CREATE TYPE region AS ENUM ('us-east', 'us-west', 'eu-west')
-# TODO(rytaft): When more of the multi-region syntax is supported,
-# add it here.
statement ok
CREATE TABLE uniq_enum (
r region DEFAULT CASE (random()*3)::int WHEN 0 THEN 'us-east' WHEN 1 THEN 'us-west' ELSE 'eu-west' END,
@@ -202,6 +200,29 @@ ALTER TABLE uniq_partial_enum INJECT STATISTICS '[
}
]'
+statement ok
+CREATE TABLE uniq_computed_pk (
+ i INT,
+ s STRING,
+ d DECIMAL,
+ c_i_expr STRING AS (CASE WHEN i < 0 THEN 'foo' ELSE 'bar' END) STORED,
+ c_s STRING AS (s) VIRTUAL,
+ c_d DECIMAL AS (d) STORED,
+ c_d_expr STRING AS (d::string) STORED,
+ PRIMARY KEY (c_i_expr, i),
+ UNIQUE (c_s, s),
+ UNIQUE (c_d_expr, d),
+ UNIQUE WITHOUT INDEX (i),
+ UNIQUE WITHOUT INDEX (s),
+ UNIQUE WITHOUT INDEX (d),
+ FAMILY (i),
+ FAMILY (s),
+ FAMILY (d),
+ FAMILY (c_i_expr),
+ FAMILY (c_d),
+ FAMILY (c_d_expr)
+)
+
statement ok
CREATE TABLE other (k INT, v INT, w INT NOT NULL, x INT, y INT)
@@ -1641,6 +1662,45 @@ vectorized: true
columns: (column1, column2, column3, column4, check1, partial_index_put1)
label: buffer 1
+# We can eliminate uniqueness checks for i and s due to functional dependencies.
+# We cannot eliminate checks for d, since functional dependencies could not be
+# inferred due to composite sensitivity of d::string.
+query T
+EXPLAIN INSERT INTO uniq_computed_pk (i, s, d) VALUES (1, 'a', 1.0), (2, 'b', 2.0)
+----
+distribution: local
+vectorized: true
+·
+• root
+│
+├── • insert
+│ │ into: uniq_computed_pk(i, s, d, c_i_expr, c_s, c_d, c_d_expr)
+│ │
+│ └── • buffer
+│ │ label: buffer 1
+│ │
+│ └── • render
+│ │
+│ └── • values
+│ size: 3 columns, 2 rows
+│
+└── • constraint-check
+ │
+ └── • error if rows
+ │
+ └── • hash join (right semi)
+ │ equality: (d) = (column3)
+ │ pred: (column1 != i) OR (column13 != c_i_expr)
+ │
+ ├── • scan
+ │ missing stats
+ │ table: uniq_computed_pk@uniq_computed_pk_c_d_expr_d_key
+ │ spans: FULL SCAN
+ │
+ └── • scan buffer
+ label: buffer 1
+
+
# -- Tests with UPDATE --
subtest Update
diff --git a/pkg/sql/opt/memo/testdata/logprops/scan b/pkg/sql/opt/memo/testdata/logprops/scan
index f7133bf4cb62..09b535283756 100644
--- a/pkg/sql/opt/memo/testdata/logprops/scan
+++ b/pkg/sql/opt/memo/testdata/logprops/scan
@@ -399,6 +399,9 @@ index-join c
└── interesting orderings: (+1) (+3,+1)
# Test FDs for computed columns.
+# We add equivalencies s=c_s and d=c_d, a strict dependency i->c_i_expr, and
+# no dependency d->c_d_expr since the expression d::string is composite-
+# sensitive.
exec-ddl
CREATE TABLE computed (
i INT,
diff --git a/pkg/sql/opt/optbuilder/mutation_builder_unique.go b/pkg/sql/opt/optbuilder/mutation_builder_unique.go
index 492875cff950..9b6d2cec5621 100644
--- a/pkg/sql/opt/optbuilder/mutation_builder_unique.go
+++ b/pkg/sql/opt/optbuilder/mutation_builder_unique.go
@@ -153,6 +153,11 @@ type uniqueCheckHelper struct {
// primaryKeyOrdinals includes the ordinals from any primary key columns
// that are not included in uniqueOrdinals.
primaryKeyOrdinals util.FastIntSet
+
+ // The scope and column ordinals of the scan that will serve as the right
+ // side of the semi join for the uniqueness checks.
+ scanScope *scope
+ scanOrdinals []int
}
// init initializes the helper with a unique constraint.
@@ -179,7 +184,7 @@ func (h *uniqueCheckHelper) init(mb *mutationBuilder, uniqueOrdinal int) bool {
// with columns that are a subset of the unique constraint columns.
// Similarly, we don't need a check for a partial unique constraint if there
// exists a non-partial unique constraint with columns that are a subset of
- // the partial unique constrain columns.
+ // the partial unique constraint columns.
primaryOrds := getIndexLaxKeyOrdinals(mb.tab.Index(cat.PrimaryIndex))
primaryOrds.DifferenceWith(uniqueOrds)
if primaryOrds.Empty() {
@@ -203,7 +208,44 @@ func (h *uniqueCheckHelper) init(mb *mutationBuilder, uniqueOrdinal int) bool {
// If at least one unique column is getting a NULL value, unique check not
// needed.
- return numNullCols == 0
+ if numNullCols != 0 {
+ return false
+ }
+
+ // Build the scan that will serve as the right side of the semi join in the
+ // uniqueness check. We need to build the scan now so that we can use its
+ // FDs below.
+ h.scanScope, h.scanOrdinals = h.buildTableScan()
+
+ // Check that the columns in the unique constraint aren't already known to
+ // form a lax key. This can happen if there is a unique index on a superset of
+ // these columns, where all other columns are computed columns that depend
+ // only on our columns. This is especially important for multi-region tables
+ // when the region column is computed.
+ //
+ // For example:
+ //
+ // CREATE TABLE tab (
+ // k INT PRIMARY KEY,
+ // region crdb_internal_region AS (
+ // CASE WHEN k < 10 THEN 'us-east1' ELSE 'us-west1' END
+ // ) STORED
+ // ) LOCALITY REGIONAL BY ROW AS region
+ //
+ // Because this is a REGIONAL BY ROW table, the region column is implicitly
+ // added to the front of every index, including the primary index. As a
+ // result, we would normally need to add a uniqueness check to all mutations
+ // to ensure that the primary key column (k in this case) remains unique.
+ // However, because the region column is computed and depends only on k, the
+ // presence of the unique index on (region, k) (i.e., the primary index) is
+ // sufficient to guarantee the uniqueness of k.
+ var uniqueCols opt.ColSet
+ h.uniqueOrdinals.ForEach(func(ord int) {
+ colID := h.scanScope.cols[ord].id
+ uniqueCols.Add(colID)
+ })
+ fds := &h.scanScope.expr.Relational().FuncDeps
+ return !fds.ColsAreLaxKey(uniqueCols)
}
// buildInsertionCheck creates a unique check for rows which are added to a
@@ -214,10 +256,9 @@ func (h *uniqueCheckHelper) buildInsertionCheck() memo.UniqueChecksItem {
// Build a self semi-join, with the new values on the left and the
// existing values on the right.
- scanScope, ordinals := h.buildTableScan()
withScanScope, _ := h.mb.buildCheckInputScan(
- checkInputScanNewVals, ordinals,
+ checkInputScanNewVals, h.scanOrdinals,
)
// Build the join filters:
@@ -238,7 +279,7 @@ func (h *uniqueCheckHelper) buildInsertionCheck() memo.UniqueChecksItem {
semiJoinFilters = append(semiJoinFilters, f.ConstructFiltersItem(
f.ConstructEq(
f.ConstructVariable(withScanScope.cols[i].id),
- f.ConstructVariable(scanScope.cols[i].id),
+ f.ConstructVariable(h.scanScope.cols[i].id),
),
))
}
@@ -255,8 +296,8 @@ func (h *uniqueCheckHelper) buildInsertionCheck() memo.UniqueChecksItem {
withScanPred := h.mb.b.buildScalar(typedPred, withScanScope, nil, nil, nil)
semiJoinFilters = append(semiJoinFilters, f.ConstructFiltersItem(withScanPred))
- typedPred = scanScope.resolveAndRequireType(pred, types.Bool)
- scanPred := h.mb.b.buildScalar(typedPred, scanScope, nil, nil, nil)
+ typedPred = h.scanScope.resolveAndRequireType(pred, types.Bool)
+ scanPred := h.mb.b.buildScalar(typedPred, h.scanScope, nil, nil, nil)
semiJoinFilters = append(semiJoinFilters, f.ConstructFiltersItem(scanPred))
}
@@ -268,7 +309,7 @@ func (h *uniqueCheckHelper) buildInsertionCheck() memo.UniqueChecksItem {
for i, ok := h.primaryKeyOrdinals.Next(0); ok; i, ok = h.primaryKeyOrdinals.Next(i + 1) {
pkFilterLocal := f.ConstructNe(
f.ConstructVariable(withScanScope.cols[i].id),
- f.ConstructVariable(scanScope.cols[i].id),
+ f.ConstructVariable(h.scanScope.cols[i].id),
)
if pkFilter == nil {
pkFilter = pkFilterLocal
@@ -278,7 +319,7 @@ func (h *uniqueCheckHelper) buildInsertionCheck() memo.UniqueChecksItem {
}
semiJoinFilters = append(semiJoinFilters, f.ConstructFiltersItem(pkFilter))
- semiJoin := f.ConstructSemiJoin(withScanScope.expr, scanScope.expr, semiJoinFilters, memo.EmptyJoinPrivate)
+ semiJoin := f.ConstructSemiJoin(withScanScope.expr, h.scanScope.expr, semiJoinFilters, memo.EmptyJoinPrivate)
// Collect the key columns that will be shown in the error message if there
// is a duplicate key violation resulting from this uniqueness check.
diff --git a/pkg/sql/opt/optbuilder/testdata/unique-checks-insert b/pkg/sql/opt/optbuilder/testdata/unique-checks-insert
index 178d3b4dc14d..b1dee1aa9ec5 100644
--- a/pkg/sql/opt/optbuilder/testdata/unique-checks-insert
+++ b/pkg/sql/opt/optbuilder/testdata/unique-checks-insert
@@ -1562,3 +1562,80 @@ insert uniq_partial_constraint_and_partial_index
├── b:25 > 10
├── uniq_partial_constraint_and_partial_index.b:21 > 10
└── k:23 != uniq_partial_constraint_and_partial_index.k:19
+
+exec-ddl
+CREATE TABLE uniq_computed_pk (
+ i INT,
+ s STRING,
+ d DECIMAL,
+ c_i_expr STRING AS (CASE WHEN i < 0 THEN 'foo' ELSE 'bar' END) STORED,
+ c_s STRING AS (s) VIRTUAL,
+ c_d DECIMAL AS (d) STORED,
+ c_d_expr STRING AS (d::string) STORED,
+ PRIMARY KEY (c_i_expr, i),
+ UNIQUE (c_s, s),
+ UNIQUE (c_d_expr, d),
+ UNIQUE WITHOUT INDEX (i),
+ UNIQUE WITHOUT INDEX (s),
+ UNIQUE WITHOUT INDEX (d)
+)
+----
+
+# We can eliminate uniqueness checks for i and s due to functional dependencies.
+# We cannot eliminate checks for d, since functional dependencies could not be
+# inferred due to composite sensitivity of d::string.
+build
+INSERT INTO uniq_computed_pk (i, s, d) VALUES (1, 'a', 1.0), (2, 'b', 2.0)
+----
+insert uniq_computed_pk
+ ├── columns:
+ ├── insert-mapping:
+ │ ├── column1:9 => uniq_computed_pk.i:1
+ │ ├── column2:10 => uniq_computed_pk.s:2
+ │ ├── column3:11 => uniq_computed_pk.d:3
+ │ ├── column12:12 => uniq_computed_pk.c_i_expr:4
+ │ ├── column2:10 => uniq_computed_pk.c_s:5
+ │ ├── column3:11 => uniq_computed_pk.c_d:6
+ │ └── column13:13 => uniq_computed_pk.c_d_expr:7
+ ├── input binding: &1
+ ├── project
+ │ ├── columns: column12:12!null column13:13!null column1:9!null column2:10!null column3:11!null
+ │ ├── values
+ │ │ ├── columns: column1:9!null column2:10!null column3:11!null
+ │ │ ├── (1, 'a', 1.0)
+ │ │ └── (2, 'b', 2.0)
+ │ └── projections
+ │ ├── CASE WHEN column1:9 < 0 THEN 'foo' ELSE 'bar' END [as=column12:12]
+ │ └── column3:11::STRING [as=column13:13]
+ └── unique-checks
+ └── unique-checks-item: uniq_computed_pk(d)
+ └── semi-join (hash)
+ ├── columns: i:38!null s:39!null d:40!null c_i_expr:41!null c_s:42!null c_d:43!null c_d_expr:44!null
+ ├── with-scan &1
+ │ ├── columns: i:38!null s:39!null d:40!null c_i_expr:41!null c_s:42!null c_d:43!null c_d_expr:44!null
+ │ └── mapping:
+ │ ├── column1:9 => i:38
+ │ ├── column2:10 => s:39
+ │ ├── column3:11 => d:40
+ │ ├── column12:12 => c_i_expr:41
+ │ ├── column2:10 => c_s:42
+ │ ├── column3:11 => c_d:43
+ │ └── column13:13 => c_d_expr:44
+ ├── project
+ │ ├── columns: uniq_computed_pk.c_s:34 uniq_computed_pk.i:30!null uniq_computed_pk.s:31 uniq_computed_pk.d:32 uniq_computed_pk.c_i_expr:33!null uniq_computed_pk.c_d:35 uniq_computed_pk.c_d_expr:36
+ │ ├── scan uniq_computed_pk
+ │ │ ├── columns: uniq_computed_pk.i:30!null uniq_computed_pk.s:31 uniq_computed_pk.d:32 uniq_computed_pk.c_i_expr:33!null uniq_computed_pk.c_d:35 uniq_computed_pk.c_d_expr:36
+ │ │ └── computed column expressions
+ │ │ ├── uniq_computed_pk.c_i_expr:33
+ │ │ │ └── CASE WHEN uniq_computed_pk.i:30 < 0 THEN 'foo' ELSE 'bar' END
+ │ │ ├── uniq_computed_pk.c_s:34
+ │ │ │ └── uniq_computed_pk.s:31
+ │ │ ├── uniq_computed_pk.c_d:35
+ │ │ │ └── uniq_computed_pk.d:32
+ │ │ └── uniq_computed_pk.c_d_expr:36
+ │ │ └── uniq_computed_pk.d:32::STRING
+ │ └── projections
+ │ └── uniq_computed_pk.s:31 [as=uniq_computed_pk.c_s:34]
+ └── filters
+ ├── d:40 = uniq_computed_pk.d:32
+ └── (i:38 != uniq_computed_pk.i:30) OR (c_i_expr:41 != uniq_computed_pk.c_i_expr:33)
diff --git a/pkg/sql/opt/optbuilder/testdata/unique-checks-update b/pkg/sql/opt/optbuilder/testdata/unique-checks-update
index a1627c60cd21..8a3398c8ec03 100644
--- a/pkg/sql/opt/optbuilder/testdata/unique-checks-update
+++ b/pkg/sql/opt/optbuilder/testdata/unique-checks-update
@@ -1011,3 +1011,98 @@ update uniq_partial_hidden_pk
├── b:21 > 0
├── uniq_partial_hidden_pk.b:17 > 0
└── rowid:22 != uniq_partial_hidden_pk.rowid:18
+
+exec-ddl
+CREATE TABLE uniq_computed_pk (
+ i INT,
+ s STRING,
+ d DECIMAL,
+ c_i_expr STRING AS (CASE WHEN i < 0 THEN 'foo' ELSE 'bar' END) STORED,
+ c_s STRING AS (s) VIRTUAL,
+ c_d DECIMAL AS (d) STORED,
+ c_d_expr STRING AS (d::string) STORED,
+ PRIMARY KEY (c_i_expr, i),
+ UNIQUE (c_s, s),
+ UNIQUE (c_d_expr, d),
+ UNIQUE WITHOUT INDEX (i),
+ UNIQUE WITHOUT INDEX (s),
+ UNIQUE WITHOUT INDEX (d)
+)
+----
+
+# We can eliminate uniqueness checks for i and s due to functional dependencies.
+# We cannot eliminate checks for d, since functional dependencies could not be
+# inferred due to composite sensitivity of d::string.
+build
+UPDATE uniq_computed_pk SET i=1, s='a', d=1.0
+----
+update uniq_computed_pk
+ ├── columns:
+ ├── fetch columns: uniq_computed_pk.i:9 uniq_computed_pk.s:10 uniq_computed_pk.d:11 uniq_computed_pk.c_i_expr:12 uniq_computed_pk.c_s:13 uniq_computed_pk.c_d:14 uniq_computed_pk.c_d_expr:15
+ ├── update-mapping:
+ │ ├── i_new:17 => uniq_computed_pk.i:1
+ │ ├── s_new:18 => uniq_computed_pk.s:2
+ │ ├── d_new:19 => uniq_computed_pk.d:3
+ │ ├── column20:20 => uniq_computed_pk.c_i_expr:4
+ │ ├── s_new:18 => uniq_computed_pk.c_s:5
+ │ ├── d_new:19 => uniq_computed_pk.c_d:6
+ │ └── column21:21 => uniq_computed_pk.c_d_expr:7
+ ├── input binding: &1
+ ├── project
+ │ ├── columns: column20:20!null column21:21!null uniq_computed_pk.i:9!null uniq_computed_pk.s:10 uniq_computed_pk.d:11 uniq_computed_pk.c_i_expr:12!null uniq_computed_pk.c_s:13 uniq_computed_pk.c_d:14 uniq_computed_pk.c_d_expr:15 crdb_internal_mvcc_timestamp:16 i_new:17!null s_new:18!null d_new:19!null
+ │ ├── project
+ │ │ ├── columns: i_new:17!null s_new:18!null d_new:19!null uniq_computed_pk.i:9!null uniq_computed_pk.s:10 uniq_computed_pk.d:11 uniq_computed_pk.c_i_expr:12!null uniq_computed_pk.c_s:13 uniq_computed_pk.c_d:14 uniq_computed_pk.c_d_expr:15 crdb_internal_mvcc_timestamp:16
+ │ │ ├── project
+ │ │ │ ├── columns: uniq_computed_pk.c_s:13 uniq_computed_pk.i:9!null uniq_computed_pk.s:10 uniq_computed_pk.d:11 uniq_computed_pk.c_i_expr:12!null uniq_computed_pk.c_d:14 uniq_computed_pk.c_d_expr:15 crdb_internal_mvcc_timestamp:16
+ │ │ │ ├── scan uniq_computed_pk
+ │ │ │ │ ├── columns: uniq_computed_pk.i:9!null uniq_computed_pk.s:10 uniq_computed_pk.d:11 uniq_computed_pk.c_i_expr:12!null uniq_computed_pk.c_d:14 uniq_computed_pk.c_d_expr:15 crdb_internal_mvcc_timestamp:16
+ │ │ │ │ └── computed column expressions
+ │ │ │ │ ├── uniq_computed_pk.c_i_expr:12
+ │ │ │ │ │ └── CASE WHEN uniq_computed_pk.i:9 < 0 THEN 'foo' ELSE 'bar' END
+ │ │ │ │ ├── uniq_computed_pk.c_s:13
+ │ │ │ │ │ └── uniq_computed_pk.s:10
+ │ │ │ │ ├── uniq_computed_pk.c_d:14
+ │ │ │ │ │ └── uniq_computed_pk.d:11
+ │ │ │ │ └── uniq_computed_pk.c_d_expr:15
+ │ │ │ │ └── uniq_computed_pk.d:11::STRING
+ │ │ │ └── projections
+ │ │ │ └── uniq_computed_pk.s:10 [as=uniq_computed_pk.c_s:13]
+ │ │ └── projections
+ │ │ ├── 1 [as=i_new:17]
+ │ │ ├── 'a' [as=s_new:18]
+ │ │ └── 1.0 [as=d_new:19]
+ │ └── projections
+ │ ├── CASE WHEN i_new:17 < 0 THEN 'foo' ELSE 'bar' END [as=column20:20]
+ │ └── d_new:19::STRING [as=column21:21]
+ └── unique-checks
+ └── unique-checks-item: uniq_computed_pk(d)
+ └── semi-join (hash)
+ ├── columns: i:46!null s:47!null d:48!null c_i_expr:49!null c_s:50!null c_d:51!null c_d_expr:52!null
+ ├── with-scan &1
+ │ ├── columns: i:46!null s:47!null d:48!null c_i_expr:49!null c_s:50!null c_d:51!null c_d_expr:52!null
+ │ └── mapping:
+ │ ├── i_new:17 => i:46
+ │ ├── s_new:18 => s:47
+ │ ├── d_new:19 => d:48
+ │ ├── column20:20 => c_i_expr:49
+ │ ├── s_new:18 => c_s:50
+ │ ├── d_new:19 => c_d:51
+ │ └── column21:21 => c_d_expr:52
+ ├── project
+ │ ├── columns: uniq_computed_pk.c_s:42 uniq_computed_pk.i:38!null uniq_computed_pk.s:39 uniq_computed_pk.d:40 uniq_computed_pk.c_i_expr:41!null uniq_computed_pk.c_d:43 uniq_computed_pk.c_d_expr:44
+ │ ├── scan uniq_computed_pk
+ │ │ ├── columns: uniq_computed_pk.i:38!null uniq_computed_pk.s:39 uniq_computed_pk.d:40 uniq_computed_pk.c_i_expr:41!null uniq_computed_pk.c_d:43 uniq_computed_pk.c_d_expr:44
+ │ │ └── computed column expressions
+ │ │ ├── uniq_computed_pk.c_i_expr:41
+ │ │ │ └── CASE WHEN uniq_computed_pk.i:38 < 0 THEN 'foo' ELSE 'bar' END
+ │ │ ├── uniq_computed_pk.c_s:42
+ │ │ │ └── uniq_computed_pk.s:39
+ │ │ ├── uniq_computed_pk.c_d:43
+ │ │ │ └── uniq_computed_pk.d:40
+ │ │ └── uniq_computed_pk.c_d_expr:44
+ │ │ └── uniq_computed_pk.d:40::STRING
+ │ └── projections
+ │ └── uniq_computed_pk.s:39 [as=uniq_computed_pk.c_s:42]
+ └── filters
+ ├── d:48 = uniq_computed_pk.d:40
+ └── (i:46 != uniq_computed_pk.i:38) OR (c_i_expr:49 != uniq_computed_pk.c_i_expr:41)
diff --git a/pkg/sql/opt/optbuilder/testdata/unique-checks-upsert b/pkg/sql/opt/optbuilder/testdata/unique-checks-upsert
index 2d4e220a5389..fdd063532a35 100644
--- a/pkg/sql/opt/optbuilder/testdata/unique-checks-upsert
+++ b/pkg/sql/opt/optbuilder/testdata/unique-checks-upsert
@@ -1565,3 +1565,128 @@ upsert t
└── filters
├── i:17 = t.i:14
└── rowid:18 != t.rowid:15
+
+exec-ddl
+CREATE TABLE uniq_computed_pk (
+ i INT,
+ s STRING,
+ d DECIMAL,
+ c_i_expr STRING AS (CASE WHEN i < 0 THEN 'foo' ELSE 'bar' END) STORED,
+ c_s STRING AS (s) VIRTUAL,
+ c_d DECIMAL AS (d) STORED,
+ c_d_expr STRING AS (d::string) STORED,
+ PRIMARY KEY (c_i_expr, i),
+ UNIQUE (c_s, s),
+ UNIQUE (c_d_expr, d),
+ UNIQUE WITHOUT INDEX (i),
+ UNIQUE WITHOUT INDEX (s),
+ UNIQUE WITHOUT INDEX (d)
+)
+----
+
+# We can eliminate uniqueness checks for i and s due to functional dependencies.
+# We cannot eliminate checks for d, since functional dependencies could not be
+# inferred due to composite sensitivity of d::string.
+build
+UPSERT INTO uniq_computed_pk (i, s, d) VALUES (1, 'a', 1.0), (2, 'b', 2.0)
+----
+upsert uniq_computed_pk
+ ├── columns:
+ ├── arbiter indexes: primary
+ ├── canary column: uniq_computed_pk.c_i_expr:17
+ ├── fetch columns: uniq_computed_pk.i:14 uniq_computed_pk.s:15 uniq_computed_pk.d:16 uniq_computed_pk.c_i_expr:17 uniq_computed_pk.c_s:18 uniq_computed_pk.c_d:19 uniq_computed_pk.c_d_expr:20
+ ├── insert-mapping:
+ │ ├── column1:9 => uniq_computed_pk.i:1
+ │ ├── column2:10 => uniq_computed_pk.s:2
+ │ ├── column3:11 => uniq_computed_pk.d:3
+ │ ├── column12:12 => uniq_computed_pk.c_i_expr:4
+ │ ├── column2:10 => uniq_computed_pk.c_s:5
+ │ ├── column3:11 => uniq_computed_pk.c_d:6
+ │ └── column13:13 => uniq_computed_pk.c_d_expr:7
+ ├── update-mapping:
+ │ ├── column2:10 => uniq_computed_pk.s:2
+ │ ├── column3:11 => uniq_computed_pk.d:3
+ │ ├── column2:10 => uniq_computed_pk.c_s:5
+ │ ├── column3:11 => uniq_computed_pk.c_d:6
+ │ └── column13:13 => uniq_computed_pk.c_d_expr:7
+ ├── input binding: &1
+ ├── project
+ │ ├── columns: upsert_i:23 upsert_c_i_expr:24 column1:9!null column2:10!null column3:11!null column12:12!null column13:13!null uniq_computed_pk.i:14 uniq_computed_pk.s:15 uniq_computed_pk.d:16 uniq_computed_pk.c_i_expr:17 uniq_computed_pk.c_s:18 uniq_computed_pk.c_d:19 uniq_computed_pk.c_d_expr:20 crdb_internal_mvcc_timestamp:21 column22:22
+ │ ├── project
+ │ │ ├── columns: column22:22 column1:9!null column2:10!null column3:11!null column12:12!null column13:13!null uniq_computed_pk.i:14 uniq_computed_pk.s:15 uniq_computed_pk.d:16 uniq_computed_pk.c_i_expr:17 uniq_computed_pk.c_s:18 uniq_computed_pk.c_d:19 uniq_computed_pk.c_d_expr:20 crdb_internal_mvcc_timestamp:21
+ │ │ ├── left-join (hash)
+ │ │ │ ├── columns: column1:9!null column2:10!null column3:11!null column12:12!null column13:13!null uniq_computed_pk.i:14 uniq_computed_pk.s:15 uniq_computed_pk.d:16 uniq_computed_pk.c_i_expr:17 uniq_computed_pk.c_s:18 uniq_computed_pk.c_d:19 uniq_computed_pk.c_d_expr:20 crdb_internal_mvcc_timestamp:21
+ │ │ │ ├── ensure-upsert-distinct-on
+ │ │ │ │ ├── columns: column1:9!null column2:10!null column3:11!null column12:12!null column13:13!null
+ │ │ │ │ ├── grouping columns: column1:9!null column12:12!null
+ │ │ │ │ ├── project
+ │ │ │ │ │ ├── columns: column12:12!null column13:13!null column1:9!null column2:10!null column3:11!null
+ │ │ │ │ │ ├── values
+ │ │ │ │ │ │ ├── columns: column1:9!null column2:10!null column3:11!null
+ │ │ │ │ │ │ ├── (1, 'a', 1.0)
+ │ │ │ │ │ │ └── (2, 'b', 2.0)
+ │ │ │ │ │ └── projections
+ │ │ │ │ │ ├── CASE WHEN column1:9 < 0 THEN 'foo' ELSE 'bar' END [as=column12:12]
+ │ │ │ │ │ └── column3:11::STRING [as=column13:13]
+ │ │ │ │ └── aggregations
+ │ │ │ │ ├── first-agg [as=column2:10]
+ │ │ │ │ │ └── column2:10
+ │ │ │ │ ├── first-agg [as=column3:11]
+ │ │ │ │ │ └── column3:11
+ │ │ │ │ └── first-agg [as=column13:13]
+ │ │ │ │ └── column13:13
+ │ │ │ ├── project
+ │ │ │ │ ├── columns: uniq_computed_pk.c_s:18 uniq_computed_pk.i:14!null uniq_computed_pk.s:15 uniq_computed_pk.d:16 uniq_computed_pk.c_i_expr:17!null uniq_computed_pk.c_d:19 uniq_computed_pk.c_d_expr:20 crdb_internal_mvcc_timestamp:21
+ │ │ │ │ ├── scan uniq_computed_pk
+ │ │ │ │ │ ├── columns: uniq_computed_pk.i:14!null uniq_computed_pk.s:15 uniq_computed_pk.d:16 uniq_computed_pk.c_i_expr:17!null uniq_computed_pk.c_d:19 uniq_computed_pk.c_d_expr:20 crdb_internal_mvcc_timestamp:21
+ │ │ │ │ │ └── computed column expressions
+ │ │ │ │ │ ├── uniq_computed_pk.c_i_expr:17
+ │ │ │ │ │ │ └── CASE WHEN uniq_computed_pk.i:14 < 0 THEN 'foo' ELSE 'bar' END
+ │ │ │ │ │ ├── uniq_computed_pk.c_s:18
+ │ │ │ │ │ │ └── uniq_computed_pk.s:15
+ │ │ │ │ │ ├── uniq_computed_pk.c_d:19
+ │ │ │ │ │ │ └── uniq_computed_pk.d:16
+ │ │ │ │ │ └── uniq_computed_pk.c_d_expr:20
+ │ │ │ │ │ └── uniq_computed_pk.d:16::STRING
+ │ │ │ │ └── projections
+ │ │ │ │ └── uniq_computed_pk.s:15 [as=uniq_computed_pk.c_s:18]
+ │ │ │ └── filters
+ │ │ │ ├── column1:9 = uniq_computed_pk.i:14
+ │ │ │ └── column12:12 = uniq_computed_pk.c_i_expr:17
+ │ │ └── projections
+ │ │ └── CASE WHEN uniq_computed_pk.i:14 < 0 THEN 'foo' ELSE 'bar' END [as=column22:22]
+ │ └── projections
+ │ ├── CASE WHEN uniq_computed_pk.c_i_expr:17 IS NULL THEN column1:9 ELSE uniq_computed_pk.i:14 END [as=upsert_i:23]
+ │ └── CASE WHEN uniq_computed_pk.c_i_expr:17 IS NULL THEN column12:12 ELSE uniq_computed_pk.c_i_expr:17 END [as=upsert_c_i_expr:24]
+ └── unique-checks
+ └── unique-checks-item: uniq_computed_pk(d)
+ └── semi-join (hash)
+ ├── columns: i:49 s:50!null d:51!null c_i_expr:52 c_s:53!null c_d:54!null c_d_expr:55!null
+ ├── with-scan &1
+ │ ├── columns: i:49 s:50!null d:51!null c_i_expr:52 c_s:53!null c_d:54!null c_d_expr:55!null
+ │ └── mapping:
+ │ ├── upsert_i:23 => i:49
+ │ ├── column2:10 => s:50
+ │ ├── column3:11 => d:51
+ │ ├── upsert_c_i_expr:24 => c_i_expr:52
+ │ ├── column2:10 => c_s:53
+ │ ├── column3:11 => c_d:54
+ │ └── column13:13 => c_d_expr:55
+ ├── project
+ │ ├── columns: uniq_computed_pk.c_s:45 uniq_computed_pk.i:41!null uniq_computed_pk.s:42 uniq_computed_pk.d:43 uniq_computed_pk.c_i_expr:44!null uniq_computed_pk.c_d:46 uniq_computed_pk.c_d_expr:47
+ │ ├── scan uniq_computed_pk
+ │ │ ├── columns: uniq_computed_pk.i:41!null uniq_computed_pk.s:42 uniq_computed_pk.d:43 uniq_computed_pk.c_i_expr:44!null uniq_computed_pk.c_d:46 uniq_computed_pk.c_d_expr:47
+ │ │ └── computed column expressions
+ │ │ ├── uniq_computed_pk.c_i_expr:44
+ │ │ │ └── CASE WHEN uniq_computed_pk.i:41 < 0 THEN 'foo' ELSE 'bar' END
+ │ │ ├── uniq_computed_pk.c_s:45
+ │ │ │ └── uniq_computed_pk.s:42
+ │ │ ├── uniq_computed_pk.c_d:46
+ │ │ │ └── uniq_computed_pk.d:43
+ │ │ └── uniq_computed_pk.c_d_expr:47
+ │ │ └── uniq_computed_pk.d:43::STRING
+ │ └── projections
+ │ └── uniq_computed_pk.s:42 [as=uniq_computed_pk.c_s:45]
+ └── filters
+ ├── d:51 = uniq_computed_pk.d:43
+ └── (i:49 != uniq_computed_pk.i:41) OR (c_i_expr:52 != uniq_computed_pk.c_i_expr:44)
From f36a95a1eda327961928e3cc30ebbf6ac53bd0dc Mon Sep 17 00:00:00 2001
From: Aditya Maru
Date: Sat, 21 Nov 2020 12:04:44 -0500
Subject: [PATCH 5/7] importccl,sql: support importing non-public schemas from
pgdump
Previously, a PGDUMP import did not support any non-public schema
statements. Now that CRDB has user defined schemas, bundle format
IMPORTs need to be taught how to parse, create and cleanup schema
related PGDUMP operations.
Note this PR only adds support for `CREATE SCHEMA` and usage of the
schema in `CREATE TABLE/SEQUENCE` PGDUMP statements. `ALTER SCHEMA`
statements are ignored and support might be added in a follow up.
Release justification (bug fix): Import PGDUMP does not support user
defined schemas.
Release note (sql change): IMPORT PGDUMP can now import dump files with
non-public schemas.
---
pkg/ccl/importccl/BUILD.bazel | 3 +
pkg/ccl/importccl/import_processor_test.go | 6 +-
pkg/ccl/importccl/import_stmt.go | 678 ++++++++--
pkg/ccl/importccl/import_stmt_test.go | 210 ++-
pkg/ccl/importccl/import_table_creation.go | 53 +-
pkg/ccl/importccl/read_import_mysql.go | 7 +-
pkg/ccl/importccl/read_import_mysql_test.go | 11 +-
pkg/ccl/importccl/read_import_pgdump.go | 433 ++++--
pkg/ccl/importccl/testdata/pgdump/schema.sql | 166 +++
pkg/ccl/importccl/testutils_test.go | 2 +-
pkg/jobs/jobspb/jobs.pb.go | 1273 +++++++++++-------
pkg/jobs/jobspb/jobs.proto | 7 +
pkg/sql/alter_schema.go | 2 +-
pkg/sql/authorization.go | 2 +-
pkg/sql/create_schema.go | 136 +-
pkg/sql/grant_revoke.go | 3 +-
pkg/sql/reassign_owned_by.go | 2 +-
pkg/sql/reparent_database.go | 2 +-
pkg/sql/schema.go | 7 +-
pkg/sql/sem/tree/name_resolution.go | 3 +-
pkg/sql/table.go | 2 +-
pkg/sql/user.go | 9 +-
22 files changed, 2208 insertions(+), 809 deletions(-)
create mode 100644 pkg/ccl/importccl/testdata/pgdump/schema.sql
diff --git a/pkg/ccl/importccl/BUILD.bazel b/pkg/ccl/importccl/BUILD.bazel
index 8b446466fd7e..4b408614b781 100644
--- a/pkg/ccl/importccl/BUILD.bazel
+++ b/pkg/ccl/importccl/BUILD.bazel
@@ -38,11 +38,14 @@ go_library(
"//pkg/settings/cluster",
"//pkg/sql",
"//pkg/sql/catalog",
+ "//pkg/sql/catalog/catalogkeys",
"//pkg/sql/catalog/catalogkv",
"//pkg/sql/catalog/colinfo",
+ "//pkg/sql/catalog/dbdesc",
"//pkg/sql/catalog/descpb",
"//pkg/sql/catalog/descs",
"//pkg/sql/catalog/resolver",
+ "//pkg/sql/catalog/schemadesc",
"//pkg/sql/catalog/schemaexpr",
"//pkg/sql/catalog/tabledesc",
"//pkg/sql/catalog/typedesc",
diff --git a/pkg/ccl/importccl/import_processor_test.go b/pkg/ccl/importccl/import_processor_test.go
index 34e8dfa601e3..86ad25d5c749 100644
--- a/pkg/ccl/importccl/import_processor_test.go
+++ b/pkg/ccl/importccl/import_processor_test.go
@@ -937,8 +937,12 @@ func newTestSpec(
}
assert.True(t, numCols > 0)
+ fullTableName := "simple"
+ if format.Format == roachpb.IOFileFormat_PgDump {
+ fullTableName = "public.simple"
+ }
spec.tables = map[string]*execinfrapb.ReadImportDataSpec_ImportTable{
- "simple": {Desc: descr.TableDesc(), TargetCols: targetCols[0:numCols]},
+ fullTableName: {Desc: descr.TableDesc(), TargetCols: targetCols[0:numCols]},
}
for id, path := range inputs {
diff --git a/pkg/ccl/importccl/import_stmt.go b/pkg/ccl/importccl/import_stmt.go
index cb67403d330f..3a7faea7916b 100644
--- a/pkg/ccl/importccl/import_stmt.go
+++ b/pkg/ccl/importccl/import_stmt.go
@@ -35,10 +35,13 @@ import (
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
@@ -782,8 +785,19 @@ func importPlanHook(
// If we target a single table, populate details with one entry of tableName.
if table != nil {
tableDetails = make([]jobspb.ImportDetails_Table, 1)
+ tableName := table.ObjectName.String()
+ // PGDUMP supports importing tables from non-public schemas, thus we
+ // must prepend the target table name with the target schema name.
+ if format.Format == roachpb.IOFileFormat_PgDump {
+ if table.Schema() == "" {
+ return errors.Newf("expected schema for target table %s to be resolved",
+ tableName)
+ }
+ tableName = fmt.Sprintf("%s.%s", table.SchemaName.String(),
+ table.ObjectName.String())
+ }
tableDetails[0] = jobspb.ImportDetails_Table{
- Name: table.ObjectName.String(),
+ Name: tableName,
IsNew: true,
}
}
@@ -1095,6 +1109,7 @@ func prepareNewTableDescsForIngestion(
p sql.JobExecContext,
importTables []jobspb.ImportDetails_Table,
parentID descpb.ID,
+ schemaRewrites backupccl.DescRewriteMap,
) ([]*descpb.TableDescriptor, error) {
newMutableTableDescriptors := make([]*tabledesc.Mutable, len(importTables))
for i := range importTables {
@@ -1105,7 +1120,14 @@ func prepareNewTableDescsForIngestion(
// restoring. We do this last because we want to avoid calling
// GenerateUniqueDescID if there's any kind of error above.
// Reserving a table ID now means we can avoid the rekey work during restore.
- tableRewrites := make(backupccl.DescRewriteMap)
+ //
+ // schemaRewrites may contain information which is used in RewriteTableDescs
+ // to rewrite the parent schema ID in the table desc to point to the correct
+ // schema ID.
+ tableRewrites := schemaRewrites
+ if tableRewrites == nil {
+ tableRewrites = make(backupccl.DescRewriteMap)
+ }
seqVals := make(map[descpb.ID]int64, len(importTables))
for _, tableDesc := range importTables {
id, err := catalogkv.GenerateUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec)
@@ -1215,89 +1237,303 @@ func prepareExistingTableDescForIngestion(
return importing.TableDesc(), nil
}
+func createNonDropDatabaseChangeJob(
+ user security.SQLUsername,
+ databaseID descpb.ID,
+ jobDesc string,
+ p sql.JobExecContext,
+ txn *kv.Txn,
+) (*jobs.Job, error) {
+ jobRecord := jobs.Record{
+ Description: jobDesc,
+ Username: user,
+ Details: jobspb.SchemaChangeDetails{
+ DescID: databaseID,
+ FormatVersion: jobspb.DatabaseJobFormatVersion,
+ },
+ Progress: jobspb.SchemaChangeProgress{},
+ }
+
+ jobID := p.ExecCfg().JobRegistry.MakeJobID()
+ return p.ExecCfg().JobRegistry.CreateJobWithTxn(
+ p.ExtendedEvalContext().Context,
+ jobRecord,
+ jobID,
+ txn,
+ )
+}
+
+func writeNonDropDatabaseChange(
+ ctx context.Context,
+ desc *dbdesc.Mutable,
+ txn *kv.Txn,
+ descsCol *descs.Collection,
+ p sql.JobExecContext,
+ jobDesc string,
+) ([]int64, error) {
+ var job *jobs.Job
+ var err error
+ if job, err = createNonDropDatabaseChangeJob(p.User(), desc.ID, jobDesc, p, txn); err != nil {
+ return nil, err
+ }
+
+ queuedJob := []int64{job.ID()}
+ b := txn.NewBatch()
+ dg := catalogkv.NewOneLevelUncachedDescGetter(txn, p.ExecCfg().Codec)
+ if err := desc.Validate(ctx, dg); err != nil {
+ return nil, err
+ }
+ err = descsCol.WriteDescToBatch(
+ ctx,
+ p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
+ desc,
+ b,
+ )
+ if err != nil {
+ return nil, err
+ }
+ return queuedJob, txn.Run(ctx, b)
+}
+
+func createSchemaDescriptorWithID(
+ ctx context.Context,
+ idKey roachpb.Key,
+ id descpb.ID,
+ descriptor catalog.Descriptor,
+ p sql.JobExecContext,
+ descsCol *descs.Collection,
+ txn *kv.Txn,
+) error {
+ if descriptor.GetID() == descpb.InvalidID {
+ return errors.AssertionFailedf("cannot create descriptor with an empty ID: %v", descriptor)
+ }
+ if descriptor.GetID() != id {
+ return errors.AssertionFailedf("cannot create descriptor with an ID %v; expected ID %v; descriptor %v",
+ id, descriptor.GetID(), descriptor)
+ }
+ b := &kv.Batch{}
+ descID := descriptor.GetID()
+ if p.ExtendedEvalContext().Tracing.KVTracingEnabled() {
+ log.VEventf(ctx, 2, "CPut %s -> %d", idKey, descID)
+ }
+ b.CPut(idKey, descID, nil)
+ if err := catalogkv.WriteNewDescToBatch(
+ ctx,
+ p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
+ p.ExecCfg().Settings,
+ b,
+ p.ExecCfg().Codec,
+ descID,
+ descriptor,
+ ); err != nil {
+ return err
+ }
+
+ mutDesc, ok := descriptor.(catalog.MutableDescriptor)
+ if !ok {
+ return errors.Newf("unexpected type %T when creating descriptor", descriptor)
+ }
+ switch mutDesc.(type) {
+ case *schemadesc.Mutable:
+ if err := descsCol.AddUncommittedDescriptor(mutDesc); err != nil {
+ return err
+ }
+ default:
+ return errors.Newf("unexpected type %T when creating descriptor", mutDesc)
+ }
+
+ return txn.Run(ctx, b)
+}
+
+// prepareSchemasForIngestion is responsible for assigning the created schema
+// descriptors actual IDs, updating the parent DB with references to the new
+// schemas and writing the schema descriptors to disk.
+func (r *importResumer) prepareSchemasForIngestion(
+ ctx context.Context,
+ p sql.JobExecContext,
+ details jobspb.ImportDetails,
+ txn *kv.Txn,
+ descsCol *descs.Collection,
+) (*preparedSchemaMetadata, error) {
+ schemaMetadata := &preparedSchemaMetadata{
+ schemaPreparedDetails: details,
+ newSchemaIDToName: make(map[descpb.ID]string),
+ oldSchemaIDToName: make(map[descpb.ID]string),
+ }
+
+ schemaMetadata.schemaPreparedDetails.Schemas = make([]jobspb.ImportDetails_Schema,
+ len(details.Schemas))
+
+ desc, err := descsCol.GetMutableDescriptorByID(ctx, details.ParentID, txn)
+ if err != nil {
+ return nil, err
+ }
+
+ dbDesc, ok := desc.(*dbdesc.Mutable)
+ if !ok {
+ return nil, errors.Newf("expected ID %d to refer to the database being imported into",
+ details.ParentID)
+ }
+
+ if dbDesc.Schemas == nil {
+ dbDesc.Schemas = make(map[string]descpb.DatabaseDescriptor_SchemaInfo)
+ }
+
+ schemaMetadata.schemaRewrites = make(backupccl.DescRewriteMap)
+ mutableSchemaDescs := make([]*schemadesc.Mutable, 0)
+ for _, desc := range details.Schemas {
+ schemaMetadata.oldSchemaIDToName[desc.Desc.GetID()] = desc.Desc.GetName()
+ newMutableSchemaDescriptor := schemadesc.NewCreatedMutable(*desc.Desc)
+
+ // Verification steps have passed, generate a new schema ID. We do this
+ // last because we want to avoid calling GenerateUniqueDescID if there's
+ // any kind of error in the prior stages of import.
+ id, err := catalogkv.GenerateUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec)
+ if err != nil {
+ return nil, err
+ }
+ newMutableSchemaDescriptor.Version = 1
+ newMutableSchemaDescriptor.ID = id
+ mutableSchemaDescs = append(mutableSchemaDescs, newMutableSchemaDescriptor)
+
+ schemaMetadata.newSchemaIDToName[id] = newMutableSchemaDescriptor.GetName()
+
+ // Update the parent database with this schema information.
+ dbDesc.Schemas[newMutableSchemaDescriptor.Name] =
+ descpb.DatabaseDescriptor_SchemaInfo{ID: newMutableSchemaDescriptor.ID, Dropped: false}
+
+ schemaMetadata.schemaRewrites[desc.Desc.ID] = &jobspb.RestoreDetails_DescriptorRewrite{
+ ID: id,
+ }
+ }
+
+ // Queue a job to write the updated database descriptor.
+ schemaMetadata.queuedSchemaJobs, err = writeNonDropDatabaseChange(ctx, dbDesc, txn, descsCol, p,
+ fmt.Sprintf("updating parent database %s when importing new schemas", dbDesc.GetName()))
+ if err != nil {
+ return nil, err
+ }
+
+ // Finally create the schemas on disk.
+ for i, mutDesc := range mutableSchemaDescs {
+ err = createSchemaDescriptorWithID(ctx, catalogkeys.NewSchemaKey(dbDesc.ID,
+ mutDesc.GetName()).Key(p.ExecCfg().Codec), mutDesc.ID, mutDesc, p, descsCol, txn)
+ if err != nil {
+ return nil, err
+ }
+ schemaMetadata.schemaPreparedDetails.Schemas[i] = jobspb.ImportDetails_Schema{
+ Desc: mutDesc.SchemaDesc(),
+ }
+ }
+
+ return schemaMetadata, err
+}
+
+func constructSchemaAndTableKey(
+ tableDesc *descpb.TableDescriptor, schemaIDToName map[descpb.ID]string,
+) (schemaAndTableName, error) {
+ var schemaName string
+ var ok bool
+ schemaName, ok = schemaIDToName[tableDesc.GetUnexposedParentSchemaID()]
+ if !ok && tableDesc.UnexposedParentSchemaID != keys.PublicSchemaID {
+ return schemaAndTableName{}, errors.Newf("invalid parent schema ID %d for table %s",
+ tableDesc.UnexposedParentSchemaID, tableDesc.GetName())
+ }
+
+ return schemaAndTableName{schema: schemaName, table: tableDesc.GetName()}, nil
+}
+
// prepareTableDescsForIngestion prepares table descriptors for the ingestion
// step of import. The descriptors are in an IMPORTING state (offline) on
// successful completion of this method.
func (r *importResumer) prepareTableDescsForIngestion(
- ctx context.Context, p sql.JobExecContext, details jobspb.ImportDetails,
-) error {
- err := descs.Txn(ctx, p.ExecCfg().Settings, p.ExecCfg().LeaseManager,
- p.ExecCfg().InternalExecutor, p.ExecCfg().DB, func(
- ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
- ) error {
-
- importDetails := details
- importDetails.Tables = make([]jobspb.ImportDetails_Table, len(details.Tables))
-
- newTablenameToIdx := make(map[string]int, len(importDetails.Tables))
- var hasExistingTables bool
- var err error
- var newTableDescs []jobspb.ImportDetails_Table
- var desc *descpb.TableDescriptor
- for i, table := range details.Tables {
- if !table.IsNew {
- desc, err = prepareExistingTableDescForIngestion(ctx, txn, descsCol, table.Desc)
- if err != nil {
- return err
- }
- importDetails.Tables[i] = jobspb.ImportDetails_Table{Desc: desc, Name: table.Name,
- SeqVal: table.SeqVal,
- IsNew: table.IsNew,
- TargetCols: table.TargetCols}
-
- hasExistingTables = true
- } else {
- newTablenameToIdx[table.Desc.Name] = i
- // Make a deep copy of the table descriptor so that rewrites do not
- // partially clobber the descriptor stored in details.
- newTableDescs = append(newTableDescs,
- *protoutil.Clone(&table).(*jobspb.ImportDetails_Table))
- }
+ ctx context.Context,
+ p sql.JobExecContext,
+ details jobspb.ImportDetails,
+ txn *kv.Txn,
+ descsCol *descs.Collection,
+ schemaMetadata *preparedSchemaMetadata,
+) (jobspb.ImportDetails, error) {
+ importDetails := details
+ importDetails.Tables = make([]jobspb.ImportDetails_Table, len(details.Tables))
+
+ newSchemaAndTableNameToIdx := make(map[string]int, len(importDetails.Tables))
+ var hasExistingTables bool
+ var err error
+ var newTableDescs []jobspb.ImportDetails_Table
+ var desc *descpb.TableDescriptor
+ for i, table := range details.Tables {
+ if !table.IsNew {
+ desc, err = prepareExistingTableDescForIngestion(ctx, txn, descsCol, table.Desc)
+ if err != nil {
+ return importDetails, err
}
+ importDetails.Tables[i] = jobspb.ImportDetails_Table{Desc: desc, Name: table.Name,
+ SeqVal: table.SeqVal,
+ IsNew: table.IsNew,
+ TargetCols: table.TargetCols}
- // Prepare the table descriptors for newly created tables being imported
- // into.
- //
- // TODO(adityamaru): This is still unnecessarily complicated. If we can get
- // the new table desc preparation to work on a per desc basis, rather than
- // requiring all the newly created descriptors, then this can look like the
- // call to prepareExistingTableDescForIngestion. Currently, FK references
- // misbehave when I tried to write the desc one at a time.
- if len(newTableDescs) != 0 {
- res, err := prepareNewTableDescsForIngestion(
- ctx, txn, descsCol, p, newTableDescs, importDetails.ParentID)
- if err != nil {
- return err
- }
-
- for _, desc := range res {
- i := newTablenameToIdx[desc.Name]
- table := details.Tables[i]
- importDetails.Tables[i] = jobspb.ImportDetails_Table{Desc: desc,
- Name: table.Name,
- SeqVal: table.SeqVal,
- IsNew: table.IsNew,
- TargetCols: table.TargetCols}
- }
+ hasExistingTables = true
+ } else {
+ // PGDUMP imports support non-public schemas.
+ // For the purpose of disambiguation we must take the schema into
+ // account when constructing the newTablenameToIdx map.
+ // At this point the table descriptor's parent schema ID has not being
+ // remapped to the newly generated schema ID.
+ key, err := constructSchemaAndTableKey(table.Desc, schemaMetadata.oldSchemaIDToName)
+ if err != nil {
+ return importDetails, err
}
+ newSchemaAndTableNameToIdx[key.String()] = i
+ // Make a deep copy of the table descriptor so that rewrites do not
+ // partially clobber the descriptor stored in details.
+ newTableDescs = append(newTableDescs,
+ *protoutil.Clone(&table).(*jobspb.ImportDetails_Table))
+ }
+ }
- importDetails.PrepareComplete = true
+ // Prepare the table descriptors for newly created tables being imported
+ // into.
+ //
+ // TODO(adityamaru): This is still unnecessarily complicated. If we can get
+ // the new table desc preparation to work on a per desc basis, rather than
+ // requiring all the newly created descriptors, then this can look like the
+ // call to prepareExistingTableDescForIngestion. Currently, FK references
+ // misbehave when I tried to write the desc one at a time.
+ if len(newTableDescs) != 0 {
+ res, err := prepareNewTableDescsForIngestion(
+ ctx, txn, descsCol, p, newTableDescs, importDetails.ParentID, schemaMetadata.schemaRewrites)
+ if err != nil {
+ return importDetails, err
+ }
- // If we do not have pending schema changes on existing descriptors we can
- // choose our Walltime (to IMPORT from) immediately. Otherwise, we have to
- // wait for all nodes to see the same descriptor version before doing so.
- if !hasExistingTables {
- importDetails.Walltime = p.ExecCfg().Clock.Now().WallTime
- } else {
- importDetails.Walltime = 0
+ for _, desc := range res {
+ key, err := constructSchemaAndTableKey(desc, schemaMetadata.newSchemaIDToName)
+ if err != nil {
+ return importDetails, err
}
+ i := newSchemaAndTableNameToIdx[key.String()]
+ table := details.Tables[i]
+ importDetails.Tables[i] = jobspb.ImportDetails_Table{Desc: desc,
+ Name: table.Name,
+ SeqVal: table.SeqVal,
+ IsNew: table.IsNew,
+ TargetCols: table.TargetCols}
+ }
+ }
- // Update the job once all descs have been prepared for ingestion.
- err = r.job.SetDetails(ctx, txn, importDetails)
+ importDetails.PrepareComplete = true
- return err
- })
- return err
+ // If we do not have pending schema changes on existing descriptors we can
+ // choose our Walltime (to IMPORT from) immediately. Otherwise, we have to
+ // wait for all nodes to see the same descriptor version before doing so.
+ if !hasExistingTables {
+ importDetails.Walltime = p.ExecCfg().Clock.Now().WallTime
+ } else {
+ importDetails.Walltime = 0
+ }
+
+ return importDetails, nil
}
// ReportResults implements JobResultsReporter interface.
@@ -1426,8 +1662,9 @@ func parseAndCreateBundleTableDescs(
format roachpb.IOFileFormat,
walltime int64,
owner security.SQLUsername,
-) ([]*tabledesc.Mutable, error) {
+) ([]*tabledesc.Mutable, []*schemadesc.Mutable, error) {
+ var schemaDescs []*schemadesc.Mutable
var tableDescs []*tabledesc.Mutable
var tableName string
@@ -1440,54 +1677,59 @@ func parseAndCreateBundleTableDescs(
store, err := p.ExecCfg().DistSQLSrv.ExternalStorageFromURI(ctx, files[0], p.User())
if err != nil {
- return tableDescs, err
+ return tableDescs, schemaDescs, err
}
defer store.Close()
raw, err := store.ReadFile(ctx, "")
if err != nil {
- return tableDescs, err
+ return tableDescs, schemaDescs, err
}
defer raw.Close()
reader, err := decompressingReader(raw, files[0], format.Compression)
if err != nil {
- return tableDescs, err
+ return tableDescs, schemaDescs, err
}
defer reader.Close()
- fks := fkHandler{skip: skipFKs, allowed: true, resolver: make(fkResolver)}
+ fks := fkHandler{skip: skipFKs, allowed: true, resolver: fkResolver{
+ tableNameToDesc: make(map[string]*tabledesc.Mutable),
+ }}
switch format.Format {
case roachpb.IOFileFormat_Mysqldump:
+ fks.resolver.format.Format = roachpb.IOFileFormat_Mysqldump
evalCtx := &p.ExtendedEvalContext().EvalContext
tableDescs, err = readMysqlCreateTable(ctx, reader, evalCtx, p, defaultCSVTableID, parentID, tableName, fks, seqVals, owner, walltime)
case roachpb.IOFileFormat_PgDump:
+ fks.resolver.format.Format = roachpb.IOFileFormat_PgDump
evalCtx := &p.ExtendedEvalContext().EvalContext
// Setup a logger to handle unsupported DDL statements in the PGDUMP file.
unsupportedStmtLogger := makeUnsupportedStmtLogger(format.PgDump.IgnoreUnsupported,
format.PgDump.IgnoreUnsupportedLog, schemaParsing, p.ExecCfg().DistSQLSrv.ExternalStorage)
- tableDescs, err = readPostgresCreateTable(ctx, reader, evalCtx, p, tableName, parentID,
- walltime, fks, int(format.PgDump.MaxRowSize), owner, unsupportedStmtLogger)
+ tableDescs, schemaDescs, err = readPostgresCreateTable(ctx, reader, evalCtx, p, tableName,
+ parentID, walltime, fks, int(format.PgDump.MaxRowSize), owner, unsupportedStmtLogger)
logErr := unsupportedStmtLogger.flush(ctx, p.User())
if logErr != nil {
- return nil, logErr
+ return nil, nil, logErr
}
default:
- return tableDescs, errors.Errorf("non-bundle format %q does not support reading schemas", format.Format.String())
+ return tableDescs, schemaDescs, errors.Errorf(
+ "non-bundle format %q does not support reading schemas", format.Format.String())
}
if err != nil {
- return tableDescs, err
+ return tableDescs, schemaDescs, err
}
if tableDescs == nil && len(details.Tables) > 0 {
- return tableDescs, errors.Errorf("table definition not found for %q", tableName)
+ return tableDescs, schemaDescs, errors.Errorf("table definition not found for %q", tableName)
}
- return tableDescs, err
+ return tableDescs, schemaDescs, err
}
func (r *importResumer) parseBundleSchemaIfNeeded(ctx context.Context, phs interface{}) error {
@@ -1508,19 +1750,27 @@ func (r *importResumer) parseBundleSchemaIfNeeded(ctx context.Context, phs inter
return errors.Wrapf(err, "failed to update running status of job %d", errors.Safe(r.job.ID()))
}
+ var schemaDescs []*schemadesc.Mutable
var tableDescs []*tabledesc.Mutable
var err error
walltime := p.ExecCfg().Clock.Now().WallTime
- if tableDescs, err = parseAndCreateBundleTableDescs(
+ if tableDescs, schemaDescs, err = parseAndCreateBundleTableDescs(
ctx, p, details, seqVals, skipFKs, parentID, files, format, walltime, owner); err != nil {
return err
}
+ schemaDetails := make([]jobspb.ImportDetails_Schema, len(schemaDescs))
+ for i, schemaDesc := range schemaDescs {
+ schemaDetails[i] = jobspb.ImportDetails_Schema{Desc: schemaDesc.SchemaDesc()}
+ }
+ details.Schemas = schemaDetails
+
tableDetails := make([]jobspb.ImportDetails_Table, len(tableDescs))
- for i := range tableDescs {
+ for i, tableDesc := range tableDescs {
tableDetails[i] = jobspb.ImportDetails_Table{
- Desc: tableDescs[i].TableDesc(),
+ Name: tableDesc.GetName(),
+ Desc: tableDesc.TableDesc(),
SeqVal: seqVals[tableDescs[i].ID],
IsNew: true,
}
@@ -1546,6 +1796,14 @@ func (r *importResumer) parseBundleSchemaIfNeeded(ctx context.Context, phs inter
return nil
}
+type preparedSchemaMetadata struct {
+ schemaPreparedDetails jobspb.ImportDetails
+ schemaRewrites backupccl.DescRewriteMap
+ newSchemaIDToName map[descpb.ID]string
+ oldSchemaIDToName map[descpb.ID]string
+ queuedSchemaJobs []int64
+}
+
// Resume is part of the jobs.Resumer interface.
func (r *importResumer) Resume(ctx context.Context, execCtx interface{}) error {
p := execCtx.(sql.JobExecContext)
@@ -1573,7 +1831,47 @@ func (r *importResumer) Resume(ctx context.Context, execCtx interface{}) error {
if details.Tables != nil {
// Skip prepare stage on job resumption, if it has already been completed.
if !details.PrepareComplete {
- if err := r.prepareTableDescsForIngestion(ctx, p, details); err != nil {
+ var schemaMetadata *preparedSchemaMetadata
+ err := descs.Txn(ctx, p.ExecCfg().Settings, p.ExecCfg().LeaseManager,
+ p.ExecCfg().InternalExecutor, p.ExecCfg().DB, func(ctx context.Context, txn *kv.Txn,
+ descsCol *descs.Collection) error {
+ var preparedDetails jobspb.ImportDetails
+ schemaMetadata = &preparedSchemaMetadata{
+ newSchemaIDToName: make(map[descpb.ID]string),
+ oldSchemaIDToName: make(map[descpb.ID]string),
+ }
+ var err error
+ curDetails := details
+ if len(details.Schemas) != 0 {
+ schemaMetadata, err = r.prepareSchemasForIngestion(ctx, p, curDetails, txn, descsCol)
+ if err != nil {
+ return err
+ }
+ curDetails = schemaMetadata.schemaPreparedDetails
+ }
+
+ preparedDetails, err = r.prepareTableDescsForIngestion(ctx, p, curDetails, txn, descsCol,
+ schemaMetadata)
+ if err != nil {
+ return err
+ }
+
+ // Update the job details now that the schemas and table descs have
+ // been "prepared".
+ return r.job.SetDetails(ctx, txn, preparedDetails)
+ })
+ if err != nil {
+ return err
+ }
+
+ // Run the queued job which updates the database descriptor to contain the
+ // newly created schemas.
+ // NB: Seems like the registry eventually adopts the job anyways but this
+ // is in keeping with the semantics we use when creating a schema during
+ // sql execution. Namely, queue job in the txn which creates the schema
+ // desc and run once the txn has committed.
+ if err := p.ExecCfg().JobRegistry.Run(ctx, p.ExecCfg().InternalExecutor,
+ schemaMetadata.queuedSchemaJobs); err != nil {
return err
}
@@ -1581,13 +1879,34 @@ func (r *importResumer) Resume(ctx context.Context, execCtx interface{}) error {
details = r.job.Details().(jobspb.ImportDetails)
}
+ // Create a mapping from schemaID to schemaName.
+ schemaIDToName := make(map[descpb.ID]string)
+ for _, i := range details.Schemas {
+ schemaIDToName[i.Desc.GetID()] = i.Desc.GetName()
+ }
+
for _, i := range details.Tables {
+ var tableName string
if i.Name != "" {
- tables[i.Name] = &execinfrapb.ReadImportDataSpec_ImportTable{Desc: i.Desc, TargetCols: i.TargetCols}
+ tableName = i.Name
} else if i.Desc != nil {
- tables[i.Desc.Name] = &execinfrapb.ReadImportDataSpec_ImportTable{Desc: i.Desc, TargetCols: i.TargetCols}
+ tableName = i.Desc.Name
} else {
- return errors.Errorf("invalid table specification")
+ return errors.New("invalid table specification")
+ }
+
+ // If we are importing from PGDUMP, qualify the table name with the schema
+ // name since we support non-public schemas.
+ if details.Format.Format == roachpb.IOFileFormat_PgDump {
+ schemaName := tree.PublicSchema
+ if schema, ok := schemaIDToName[i.Desc.GetUnexposedParentSchemaID()]; ok {
+ schemaName = schema
+ }
+ tableName = fmt.Sprintf("%s.%s", schemaName, tableName)
+ }
+ tables[tableName] = &execinfrapb.ReadImportDataSpec_ImportTable{
+ Desc: i.Desc,
+ TargetCols: i.TargetCols,
}
}
}
@@ -1653,6 +1972,10 @@ func (r *importResumer) Resume(ctx context.Context, execCtx interface{}) error {
}
}
+ if err := r.publishSchemas(ctx, p.ExecCfg()); err != nil {
+ return err
+ }
+
if err := r.publishTables(ctx, p.ExecCfg()); err != nil {
return err
}
@@ -1688,6 +2011,50 @@ func (r *importResumer) Resume(ctx context.Context, execCtx interface{}) error {
return nil
}
+func (r *importResumer) publishSchemas(ctx context.Context, execCfg *sql.ExecutorConfig) error {
+ details := r.job.Details().(jobspb.ImportDetails)
+ // Schemas should only be published once.
+ if details.SchemasPublished {
+ return nil
+ }
+ log.Event(ctx, "making schemas live")
+
+ lm, ie, db := execCfg.LeaseManager, execCfg.InternalExecutor, execCfg.DB
+ return descs.Txn(ctx, execCfg.Settings, lm, ie, db, func(
+ ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
+ ) error {
+ b := txn.NewBatch()
+ for _, schema := range details.Schemas {
+ newDesc, err := descsCol.GetMutableDescriptorByID(ctx, schema.Desc.GetID(), txn)
+ if err != nil {
+ return err
+ }
+ newSchemaDesc, ok := newDesc.(*schemadesc.Mutable)
+ if !ok {
+ return errors.Newf("expected schema descriptor with ID %v, got %v",
+ schema.Desc.GetID(), newDesc)
+ }
+ newSchemaDesc.SetPublic()
+ if err := descsCol.WriteDescToBatch(
+ ctx, false /* kvTrace */, newSchemaDesc, b,
+ ); err != nil {
+ return errors.Wrapf(err, "publishing schema %d", newSchemaDesc.ID)
+ }
+ }
+ if err := txn.Run(ctx, b); err != nil {
+ return errors.Wrap(err, "publishing schemas")
+ }
+
+ // Update job record to mark tables published state as complete.
+ details.SchemasPublished = true
+ err := r.job.SetDetails(ctx, txn, details)
+ if err != nil {
+ return errors.Wrap(err, "updating job details after publishing schemas")
+ }
+ return nil
+ })
+}
+
// publishTables updates the status of imported tables from OFFLINE to PUBLIC.
func (r *importResumer) publishTables(ctx context.Context, execCfg *sql.ExecutorConfig) error {
details := r.job.Details().(jobspb.ImportDetails)
@@ -1771,18 +2138,46 @@ func (r *importResumer) publishTables(ctx context.Context, execCfg *sql.Executor
// by adding the table descriptors in DROP state, which causes the schema change
// stuff to delete the keys in the background.
func (r *importResumer) OnFailOrCancel(ctx context.Context, execCtx interface{}) error {
+ p := execCtx.(sql.JobExecContext)
details := r.job.Details().(jobspb.ImportDetails)
addToFileFormatTelemetry(details.Format.Format.String(), "failed")
cfg := execCtx.(sql.JobExecContext).ExecCfg()
lm, ie, db := cfg.LeaseManager, cfg.InternalExecutor, cfg.DB
- return descs.Txn(ctx, cfg.Settings, lm, ie, db, func(
+ var jobsToRunAfterTxnCommit []int64
+ if err := descs.Txn(ctx, cfg.Settings, lm, ie, db, func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
) error {
if err := r.dropTables(ctx, txn, descsCol, cfg); err != nil {
return err
}
+
+ // Drop all the schemas which may have been created during a bundle import.
+ // These schemas should now be empty as all the tables in them would be new
+ // tables created during the import, and therefore dropped by the above
+ // dropTables method. This allows us to avoid "collecting" objects in the
+ // schema before dropping the descriptor.
+ var err error
+ jobsToRunAfterTxnCommit, err = r.dropSchemas(ctx, txn, descsCol, cfg, p)
+ if err != nil {
+ return err
+ }
+
return r.releaseProtectedTimestamp(ctx, txn, cfg.ProtectedTimestampProvider)
- })
+ }); err != nil {
+ return err
+ }
+
+ // Run any jobs which might have been queued when dropping the schemas.
+ // This would be a job to drop all the schemas, and a job to update the parent
+ // database descriptor.
+ if len(jobsToRunAfterTxnCommit) != 0 {
+ if err := p.ExecCfg().JobRegistry.Run(ctx, p.ExecCfg().InternalExecutor,
+ jobsToRunAfterTxnCommit); err != nil {
+ return errors.Wrap(err, "failed to run jobs that drop the imported schemas")
+ }
+ }
+
+ return nil
}
func (r *importResumer) releaseProtectedTimestamp(
@@ -1804,6 +2199,101 @@ func (r *importResumer) releaseProtectedTimestamp(
return err
}
+func (r *importResumer) dropSchemas(
+ ctx context.Context,
+ txn *kv.Txn,
+ descsCol *descs.Collection,
+ execCfg *sql.ExecutorConfig,
+ p sql.JobExecContext,
+) ([]int64, error) {
+ details := r.job.Details().(jobspb.ImportDetails)
+
+ // If the prepare step of the import job was not completed then the
+ // descriptors do not need to be rolled back as the txn updating them never
+ // completed.
+ if !details.PrepareComplete || len(details.Schemas) == 0 {
+ return nil, nil
+ }
+
+ // Resolve the database descriptor.
+ desc, err := descsCol.GetMutableDescriptorByID(ctx, details.ParentID, txn)
+ if err != nil {
+ return nil, err
+ }
+
+ dbDesc, ok := desc.(*dbdesc.Mutable)
+ if !ok {
+ return nil, errors.Newf("expected ID %d to refer to the database being imported into",
+ details.ParentID)
+ }
+
+ droppedSchemaIDs := make([]descpb.ID, 0)
+ for _, schema := range details.Schemas {
+ desc, err := descsCol.GetMutableDescriptorByID(ctx, schema.Desc.ID, txn)
+ if err != nil {
+ return nil, err
+ }
+ var schemaDesc *schemadesc.Mutable
+ var ok bool
+ if schemaDesc, ok = desc.(*schemadesc.Mutable); !ok {
+ return nil, errors.Newf("unable to resolve schema desc with ID %d", schema.Desc.ID)
+ }
+
+ schemaDesc.DrainingNames = append(schemaDesc.DrainingNames,
+ descpb.NameInfo{ParentID: details.ParentID, ParentSchemaID: keys.RootNamespaceID,
+ Name: schemaDesc.Name})
+
+ // Update the parent database with information about the dropped schema.
+ if dbDesc.Schemas == nil {
+ dbDesc.Schemas = make(map[string]descpb.DatabaseDescriptor_SchemaInfo)
+ }
+ dbDesc.Schemas[schema.Desc.Name] = descpb.DatabaseDescriptor_SchemaInfo{ID: dbDesc.ID,
+ Dropped: true}
+
+ // Mark the descriptor as dropped and write it to the batch.
+ schemaDesc.State = descpb.DescriptorState_DROP
+ droppedSchemaIDs = append(droppedSchemaIDs, schemaDesc.GetID())
+
+ b := txn.NewBatch()
+ if err := descsCol.WriteDescToBatch(ctx, p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
+ schemaDesc, b); err != nil {
+ return nil, err
+ }
+ err = txn.Run(ctx, b)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Write out the change to the database. This only creates a job record to be
+ // run after the txn commits.
+ queuedJob, err := writeNonDropDatabaseChange(ctx, dbDesc, txn, descsCol, p, "")
+ if err != nil {
+ return nil, err
+ }
+
+ // Create the job to drop the schema.
+ dropSchemaJobRecord := jobs.Record{
+ Description: "dropping schemas as part of an import job rollback",
+ Username: p.User(),
+ DescriptorIDs: droppedSchemaIDs,
+ Details: jobspb.SchemaChangeDetails{
+ DroppedSchemas: droppedSchemaIDs,
+ DroppedDatabaseID: descpb.InvalidID,
+ FormatVersion: jobspb.DatabaseJobFormatVersion,
+ },
+ Progress: jobspb.SchemaChangeProgress{},
+ }
+ jobID := p.ExecCfg().JobRegistry.MakeJobID()
+ job, err := execCfg.JobRegistry.CreateJobWithTxn(ctx, dropSchemaJobRecord, jobID, txn)
+ if err != nil {
+ return nil, err
+ }
+ queuedJob = append(queuedJob, job.ID())
+
+ return queuedJob, nil
+}
+
// dropTables implements the OnFailOrCancel logic.
func (r *importResumer) dropTables(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, execCfg *sql.ExecutorConfig,
diff --git a/pkg/ccl/importccl/import_stmt_test.go b/pkg/ccl/importccl/import_stmt_test.go
index bb2f86fdcd59..f19add2b194e 100644
--- a/pkg/ccl/importccl/import_stmt_test.go
+++ b/pkg/ccl/importccl/import_stmt_test.go
@@ -914,7 +914,7 @@ END;
typ: "TABLE weather FROM PGDUMP",
data: testPgdumpFk,
with: "WITH ignore_unsupported",
- err: `table "cities" not found`,
+ err: `table "public.cities" not found`,
},
{
name: "fk unreferenced skipped",
@@ -1022,8 +1022,12 @@ END;
{
name: "non-public schema",
typ: "PGDUMP",
- data: "create table s.t (i INT8)",
- err: `non-public schemas unsupported: s`,
+ data: `
+ create schema s;
+ create table s.t (i INT8)`,
+ query: map[string][][]string{
+ getTablesQuery: {{"s", "t", "table"}},
+ },
},
{
name: "many tables",
@@ -5959,6 +5963,206 @@ func TestImportPgDumpDropTable(t *testing.T) {
})
}
+func TestImportPgDumpSchemas(t *testing.T) {
+ defer leaktest.AfterTest(t)()
+ defer log.Scope(t).Close(t)
+
+ const nodes = 1
+ ctx := context.Background()
+ baseDir := filepath.Join("testdata", "pgdump")
+ args := base.TestServerArgs{ExternalIODir: baseDir}
+
+ // Simple schema test which creates 3 schemas with a single `test` table in
+ // each schema.
+ t.Run("schema.sql", func(t *testing.T) {
+ tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
+ defer tc.Stopper().Stop(ctx)
+ conn := tc.Conns[0]
+ sqlDB := sqlutils.MakeSQLRunner(conn)
+
+ sqlDB.Exec(t, `CREATE DATABASE schemadb; SET DATABASE = schemadb`)
+ sqlDB.Exec(t, "IMPORT PGDUMP 'nodelocal://0/schema.sql' WITH ignore_unsupported")
+
+ // Check that we have imported 4 schemas.
+ expectedSchemaNames := [][]string{{"bar"}, {"baz"}, {"foo"}, {"public"}}
+ sqlDB.CheckQueryResults(t,
+ `SELECT schema_name FROM [SHOW SCHEMAS] WHERE owner IS NOT NULL ORDER BY schema_name`,
+ expectedSchemaNames)
+
+ // Check that we have a test table in each schema with the expected content.
+ expectedContent := [][]string{{"1", "abc"}, {"2", "def"}}
+ expectedTableName := "test"
+ expectedTableName2 := "test2"
+ expectedSeqName := "testseq"
+ sqlDB.CheckQueryResults(t, `SELECT schema_name,
+table_name FROM [SHOW TABLES] ORDER BY (schema_name, table_name)`,
+ [][]string{{"bar", expectedTableName}, {"bar", expectedTableName2}, {"bar", expectedSeqName},
+ {"baz", expectedTableName}, {"foo", expectedTableName}, {"public", expectedTableName}})
+
+ for _, schemaCollection := range expectedSchemaNames {
+ for _, schema := range schemaCollection {
+ sqlDB.CheckQueryResults(t, fmt.Sprintf(`SELECT * FROM %s.%s`, schema, expectedTableName),
+ expectedContent)
+ }
+ }
+
+ // There should be two jobs, the import and a job updating the parent
+ // database descriptor.
+ sqlDB.CheckQueryResults(t, `SELECT job_type, status FROM [SHOW JOBS] ORDER BY job_type`,
+ [][]string{{"IMPORT", "succeeded"}, {"SCHEMA CHANGE", "succeeded"}})
+
+ // Attempt to rename one of the imported schema's so as to verify that
+ // parent database descriptor has been updated with information about the
+ // imported schemas.
+ sqlDB.Exec(t, `ALTER SCHEMA foo RENAME TO biz`)
+
+ // Ensure that FK relationship works fine with UDS.
+ sqlDB.Exec(t, `INSERT INTO bar.test VALUES (100, 'a')`)
+ sqlDB.ExpectErr(t, "violates foreign key constraint \"testfk\"", `INSERT INTO bar.test2 VALUES (101, 'a')`)
+ })
+
+ t.Run("target-table-schema.sql", func(t *testing.T) {
+ tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
+ defer tc.Stopper().Stop(ctx)
+ conn := tc.Conns[0]
+ sqlDB := sqlutils.MakeSQLRunner(conn)
+
+ sqlDB.Exec(t, `CREATE DATABASE schemadb; SET DATABASE = schemadb`)
+ sqlDB.ExpectErr(t, "does not exist: \"schemadb.bar.test\"",
+ "IMPORT TABLE schemadb.bar.test FROM PGDUMP ('nodelocal://0/schema.sql') WITH ignore_unsupported")
+
+ // Create the user defined schema so that we can get past the "not found"
+ // error.
+ // We still expect an error as we do not support importing a target table in
+ // a UDS.
+ sqlDB.Exec(t, `CREATE SCHEMA bar`)
+ sqlDB.ExpectErr(t, "cannot use IMPORT with a user defined schema",
+ "IMPORT TABLE schemadb.bar.test FROM PGDUMP ('nodelocal://0/schema.sql') WITH ignore_unsupported")
+
+ // We expect the import of a target table in the public schema to work.
+ for _, target := range []string{"schemadb.public.test", "schemadb.test", "test"} {
+ sqlDB.Exec(t, fmt.Sprintf("IMPORT TABLE %s FROM PGDUMP ('nodelocal://0/schema.sql') WITH ignore_unsupported", target))
+
+ // Check that we have a test table in each schema with the expected content.
+ expectedContent := [][]string{{"1", "abc"}, {"2", "def"}}
+ expectedTableName := "test"
+ sqlDB.CheckQueryResults(t, `SELECT schema_name,
+table_name FROM [SHOW TABLES] ORDER BY (schema_name, table_name)`,
+ [][]string{{"public", expectedTableName}})
+
+ // Check that the target table in the public schema was imported correctly.
+ sqlDB.CheckQueryResults(t, fmt.Sprintf(`SELECT * FROM %s`, expectedTableName), expectedContent)
+
+ sqlDB.Exec(t, `DROP TABLE schemadb.public.test`)
+ }
+ sqlDB.CheckQueryResults(t,
+ `SELECT schema_name FROM [SHOW SCHEMAS] WHERE owner <> 'NULL' ORDER BY schema_name`,
+ [][]string{{"bar"}, {"public"}})
+ })
+
+ t.Run("inject-error-ensure-cleanup", func(t *testing.T) {
+ defer gcjob.SetSmallMaxGCIntervalForTest()()
+ tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{ServerArgs: args})
+ defer tc.Stopper().Stop(ctx)
+ conn := tc.Conns[0]
+ sqlDB := sqlutils.MakeSQLRunner(conn)
+ kvDB := tc.Server(0).DB()
+
+ beforeImport, err := tree.MakeDTimestampTZ(tc.Server(0).Clock().Now().GoTime(), time.Millisecond)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := range tc.Servers {
+ tc.Servers[i].JobRegistry().(*jobs.Registry).TestingResumerCreationKnobs =
+ map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
+ jobspb.TypeImport: func(raw jobs.Resumer) jobs.Resumer {
+ r := raw.(*importResumer)
+ r.testingKnobs.afterImport = func(_ backupccl.RowCount) error {
+ return errors.New("testing injected failure")
+ }
+ return r
+ },
+ }
+ }
+
+ sqlDB.Exec(t, `CREATE DATABASE failedimportpgdump; SET DATABASE = failedimportpgdump`)
+ // Hit a failure during import.
+ sqlDB.ExpectErr(
+ t, `testing injected failure`, `IMPORT PGDUMP 'nodelocal://0/schema.sql' WITH ignore_unsupported`,
+ )
+ // Nudge the registry to quickly adopt the job.
+ tc.Server(0).JobRegistry().(*jobs.Registry).TestingNudgeAdoptionQueue()
+
+ dbID := sqlutils.QueryDatabaseID(t, sqlDB.DB, "failedimportpgdump")
+ // In the case of the test, the ID of the 3 schemas that will be cleaned up
+ // due to the failed import will be consecutive IDs after the ID of the
+ // empty database it was created in.
+ schemaIDs := []descpb.ID{descpb.ID(dbID + 1), descpb.ID(dbID + 2), descpb.ID(dbID + 3)}
+ // The table IDs are allocated after the schemas are created. There is one
+ // extra table in the "public" schema.
+ tableIDs := []descpb.ID{descpb.ID(dbID + 4), descpb.ID(dbID + 5), descpb.ID(dbID + 6),
+ descpb.ID(dbID + 7)}
+
+ // At this point we expect to see three jobs related to the cleanup.
+ // - SCHEMA CHANGE GC job for the table cleanup.
+ // - SCHEMA CHANGE job to drop the schemas.
+ // - SCHEMA CHANGE job to update the database descriptor with dropped
+ // schemas.
+
+ // Ensure that a GC job was created, and wait for it to finish.
+ doneGCQuery := fmt.Sprintf(
+ "SELECT count(*) FROM [SHOW JOBS] WHERE job_type = '%s' AND status = '%s' AND created > %s",
+ "SCHEMA CHANGE GC", jobs.StatusSucceeded, beforeImport.String(),
+ )
+
+ doneSchemaDropQuery := fmt.Sprintf(
+ "SELECT count(*) FROM [SHOW JOBS] WHERE job_type = '%s' AND status = '%s' AND description"+
+ " LIKE '%s'", "SCHEMA CHANGE", jobs.StatusSucceeded, "dropping schemas%")
+
+ doneDatabaseUpdateQuery := fmt.Sprintf(
+ "SELECT count(*) FROM [SHOW JOBS] WHERE job_type = '%s' AND status = '%s' AND description"+
+ " LIKE '%s'", "SCHEMA CHANGE", jobs.StatusSucceeded, "updating parent database%")
+
+ sqlDB.CheckQueryResultsRetry(t, doneGCQuery, [][]string{{"1"}})
+ sqlDB.CheckQueryResultsRetry(t, doneSchemaDropQuery, [][]string{{"1"}})
+ sqlDB.CheckQueryResultsRetry(t, doneDatabaseUpdateQuery, [][]string{{"1"}})
+
+ for _, schemaID := range schemaIDs {
+ // Expect that the schema descriptor is deleted.
+ if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
+ _, err := catalogkv.MustGetTableDescByID(ctx, txn, keys.SystemSQLCodec, schemaID)
+ if !testutils.IsError(err, "descriptor not found") {
+ return err
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ for _, tableID := range tableIDs {
+ // Expect that the table descriptor is deleted.
+ if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
+ _, err := catalogkv.MustGetTableDescByID(ctx, txn, keys.SystemSQLCodec, tableID)
+ if !testutils.IsError(err, "descriptor not found") {
+ return err
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // As a final sanity check that the schemas have been removed.
+ sqlDB.CheckQueryResults(t, `SELECT schema_name FROM [SHOW SCHEMAS] WHERE owner IS NOT NULL`,
+ [][]string{{"public"}})
+
+ // Check that the database descriptor has been updated with the removed schemas.
+ sqlDB.ExpectErr(t, "unknown schema \"foo\"", `ALTER SCHEMA foo RENAME TO biz`)
+ })
+}
+
func TestImportCockroachDump(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
diff --git a/pkg/ccl/importccl/import_table_creation.go b/pkg/ccl/importccl/import_table_creation.go
index 334cb87c9901..d9ebb246b7e6 100644
--- a/pkg/ccl/importccl/import_table_creation.go
+++ b/pkg/ccl/importccl/import_table_creation.go
@@ -16,6 +16,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
+ "github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
@@ -83,7 +84,9 @@ type fkHandler struct {
}
// NoFKs is used by formats that do not support FKs.
-var NoFKs = fkHandler{resolver: make(fkResolver)}
+var NoFKs = fkHandler{resolver: fkResolver{
+ tableNameToDesc: make(map[string]*tabledesc.Mutable),
+}}
// MakeSimpleTableDescriptor creates a Mutable from a CreateTable parse
// node without the full machinery. Many parts of the syntax are unsupported
@@ -155,7 +158,7 @@ func MakeSimpleTableDescriptor(
tableDesc, err := sql.NewTableDesc(
ctx,
nil, /* txn */
- fks.resolver,
+ &fks.resolver,
st,
create,
parentID,
@@ -289,59 +292,73 @@ func (so *importSequenceOperators) SetSequenceValueByID(
return errSequenceOperators
}
-type fkResolver map[string]*tabledesc.Mutable
+type fkResolver struct {
+ tableNameToDesc map[string]*tabledesc.Mutable
+ format roachpb.IOFileFormat
+}
-var _ resolver.SchemaResolver = fkResolver{}
+var _ resolver.SchemaResolver = &fkResolver{}
// Implements the sql.SchemaResolver interface.
-func (r fkResolver) Txn() *kv.Txn {
+func (r *fkResolver) Txn() *kv.Txn {
return nil
}
// Implements the sql.SchemaResolver interface.
-func (r fkResolver) LogicalSchemaAccessor() catalog.Accessor {
+func (r *fkResolver) LogicalSchemaAccessor() catalog.Accessor {
return nil
}
// Implements the sql.SchemaResolver interface.
-func (r fkResolver) CurrentDatabase() string {
+func (r *fkResolver) CurrentDatabase() string {
return ""
}
// Implements the sql.SchemaResolver interface.
-func (r fkResolver) CurrentSearchPath() sessiondata.SearchPath {
+func (r *fkResolver) CurrentSearchPath() sessiondata.SearchPath {
return sessiondata.SearchPath{}
}
// Implements the sql.SchemaResolver interface.
-func (r fkResolver) CommonLookupFlags(required bool) tree.CommonLookupFlags {
+func (r *fkResolver) CommonLookupFlags(required bool) tree.CommonLookupFlags {
return tree.CommonLookupFlags{}
}
// Implements the sql.SchemaResolver interface.
-func (r fkResolver) ObjectLookupFlags(required bool, requireMutable bool) tree.ObjectLookupFlags {
+func (r *fkResolver) ObjectLookupFlags(required bool, requireMutable bool) tree.ObjectLookupFlags {
return tree.ObjectLookupFlags{
CommonLookupFlags: tree.CommonLookupFlags{Required: required, RequireMutable: requireMutable},
}
}
// Implements the tree.ObjectNameExistingResolver interface.
-func (r fkResolver) LookupObject(
- ctx context.Context, lookupFlags tree.ObjectLookupFlags, dbName, scName, obName string,
+func (r *fkResolver) LookupObject(
+ _ context.Context, _ tree.ObjectLookupFlags, catalogName, scName, obName string,
) (found bool, objMeta tree.NameResolutionResult, err error) {
- if scName != "" {
- obName = strings.TrimPrefix(obName, scName+".")
+ // PGDUMP supports non-public schemas so respect the schema name.
+ var lookupName string
+ if r.format.Format == roachpb.IOFileFormat_PgDump {
+ if scName == "" || catalogName == "" {
+ return false, nil, errors.Errorf("expected catalog and schema name to be set when resolving"+
+ " table %q in PGDUMP", obName)
+ }
+ lookupName = fmt.Sprintf("%s.%s", scName, obName)
+ } else {
+ if scName != "" {
+ lookupName = strings.TrimPrefix(obName, scName+".")
+ }
}
- tbl, ok := r[obName]
+ tbl, ok := r.tableNameToDesc[lookupName]
if ok {
return true, tbl, nil
}
- names := make([]string, 0, len(r))
- for k := range r {
+ names := make([]string, 0, len(r.tableNameToDesc))
+ for k := range r.tableNameToDesc {
names = append(names, k)
}
suggestions := strings.Join(names, ",")
- return false, nil, errors.Errorf("referenced table %q not found in tables being imported (%s)", obName, suggestions)
+ return false, nil, errors.Errorf("referenced table %q not found in tables being imported (%s)",
+ lookupName, suggestions)
}
// Implements the tree.ObjectNameTargetResolver interface.
diff --git a/pkg/ccl/importccl/read_import_mysql.go b/pkg/ccl/importccl/read_import_mysql.go
index b1acee028de2..d61da8528cdd 100644
--- a/pkg/ccl/importccl/read_import_mysql.go
+++ b/pkg/ccl/importccl/read_import_mysql.go
@@ -451,7 +451,7 @@ func mysqlTableToCockroach(
if err != nil {
return nil, nil, err
}
- fks.resolver[seqName] = seqDesc
+ fks.resolver.tableNameToDesc[seqName] = seqDesc
id++
}
@@ -538,7 +538,7 @@ func mysqlTableToCockroach(
fkDefs = append(fkDefs, delayedFK{desc, d})
}
}
- fks.resolver[desc.Name] = desc
+ fks.resolver.tableNameToDesc[desc.Name] = desc
if seqDesc != nil {
return []*tabledesc.Mutable{seqDesc, desc}, fkDefs, nil
}
@@ -569,7 +569,8 @@ func addDelayedFKs(
) error {
for _, def := range defs {
if err := sql.ResolveFK(
- ctx, nil, resolver, def.tbl, def.def, map[descpb.ID]*tabledesc.Mutable{}, sql.NewTable, tree.ValidationDefault, evalCtx,
+ ctx, nil, &resolver, def.tbl, def.def, map[descpb.ID]*tabledesc.Mutable{}, sql.NewTable,
+ tree.ValidationDefault, evalCtx,
); err != nil {
return err
}
diff --git a/pkg/ccl/importccl/read_import_mysql_test.go b/pkg/ccl/importccl/read_import_mysql_test.go
index f1ac0daf8058..4cb34f42127e 100644
--- a/pkg/ccl/importccl/read_import_mysql_test.go
+++ b/pkg/ccl/importccl/read_import_mysql_test.go
@@ -138,9 +138,9 @@ func TestMysqldumpSchemaReader(t *testing.T) {
referencedSimple := descForTable(ctx, t, readFile(t, `simple.cockroach-schema.sql`), expectedParent, 52, NoFKs)
fks := fkHandler{
allowed: true,
- resolver: fkResolver(map[string]*tabledesc.Mutable{
- referencedSimple.Name: referencedSimple,
- }),
+ resolver: fkResolver{
+ tableNameToDesc: map[string]*tabledesc.Mutable{referencedSimple.Name: referencedSimple},
+ format: mysqlDumpFormat()},
}
t.Run("simple", func(t *testing.T) {
@@ -169,7 +169,10 @@ func TestMysqldumpSchemaReader(t *testing.T) {
})
t.Run("third-in-multi", func(t *testing.T) {
- skip := fkHandler{allowed: true, skip: true, resolver: make(fkResolver)}
+ skip := fkHandler{allowed: true, skip: true, resolver: fkResolver{
+ tableNameToDesc: make(map[string]*tabledesc.Mutable),
+ format: mysqlDumpFormat(),
+ }}
expected := descForTable(ctx, t, readFile(t, `third.cockroach-schema.sql`), expectedParent, 52, skip)
got := readMysqlCreateFrom(t, files.wholeDB, "third", 51, skip)
compareTables(t, expected.TableDesc(), got)
diff --git a/pkg/ccl/importccl/read_import_pgdump.go b/pkg/ccl/importccl/read_import_pgdump.go
index 07af37520c87..20528c031400 100644
--- a/pkg/ccl/importccl/read_import_pgdump.go
+++ b/pkg/ccl/importccl/read_import_pgdump.go
@@ -23,8 +23,11 @@ import (
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
@@ -209,6 +212,207 @@ func removeDefaultRegclass(create *tree.CreateTable) {
}
}
+type schemaAndTableName struct {
+ schema string
+ table string
+}
+
+func (s *schemaAndTableName) String() string {
+ var ret string
+ if s.schema != "" {
+ ret += s.schema + "."
+ }
+ ret += s.table
+ return ret
+}
+
+type schemaParsingObjects struct {
+ createSchema map[string]*tree.CreateSchema
+ createTbl map[schemaAndTableName]*tree.CreateTable
+ createSeq map[schemaAndTableName]*tree.CreateSequence
+ tableFKs map[schemaAndTableName][]*tree.ForeignKeyConstraintTableDef
+}
+
+func createPostgresSchemas(
+ ctx context.Context,
+ parentID descpb.ID,
+ createSchema map[string]*tree.CreateSchema,
+ execCfg *sql.ExecutorConfig,
+ user security.SQLUsername,
+) ([]*schemadesc.Mutable, error) {
+ var dbDesc *dbdesc.Immutable
+ if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
+ var err error
+ dbDesc, err = catalogkv.MustGetDatabaseDescByID(ctx, txn, execCfg.Codec, parentID)
+ return err
+ }); err != nil {
+ return nil, err
+ }
+ schemaDescs := make([]*schemadesc.Mutable, 0)
+ for _, schema := range createSchema {
+ if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
+ desc, _, err := sql.CreateUserDefinedSchemaDescriptor(ctx, user, schema, txn, execCfg,
+ dbDesc, false /* allocateID */)
+ if err != nil {
+ return err
+ }
+
+ // This is true when the schema exists and we are processing a
+ // CREATE SCHEMA IF NOT EXISTS statement.
+ if desc == nil {
+ return nil
+ }
+
+ // We didn't allocate an ID above, so we must assign it a mock ID until it
+ // is assigned an actual ID later in the import.
+ desc.ID = getNextPlaceholderDescID()
+ desc.State = descpb.DescriptorState_OFFLINE
+ desc.OfflineReason = "importing"
+ schemaDescs = append(schemaDescs, desc)
+ return err
+ }); err != nil {
+ return nil, err
+ }
+ }
+ return schemaDescs, nil
+}
+
+func createPostgresSequences(
+ ctx context.Context,
+ parentID descpb.ID,
+ createSeq map[schemaAndTableName]*tree.CreateSequence,
+ fks fkHandler,
+ walltime int64,
+ owner security.SQLUsername,
+ schemaNameToDesc map[string]*schemadesc.Mutable,
+) ([]*tabledesc.Mutable, error) {
+ ret := make([]*tabledesc.Mutable, 0)
+ for schemaAndTableName, seq := range createSeq {
+ schemaID := descpb.ID(keys.PublicSchemaID)
+ if schemaAndTableName.schema != "" && schemaAndTableName.schema != "public" {
+ var desc *schemadesc.Mutable
+ var ok bool
+ if desc, ok = schemaNameToDesc[schemaAndTableName.schema]; !ok {
+ return nil, errors.Newf("schema %s not found in the schemas created from the pgdump",
+ schemaAndTableName.schema)
+ }
+ schemaID = desc.ID
+ }
+ desc, err := sql.NewSequenceTableDesc(
+ ctx,
+ schemaAndTableName.table,
+ seq.Options,
+ parentID,
+ schemaID,
+ getNextPlaceholderDescID(),
+ hlc.Timestamp{WallTime: walltime},
+ descpb.NewDefaultPrivilegeDescriptor(owner),
+ tree.PersistencePermanent,
+ nil, /* params */
+ )
+ if err != nil {
+ return nil, err
+ }
+ fks.resolver.tableNameToDesc[schemaAndTableName.String()] = desc
+ ret = append(ret, desc)
+ }
+
+ return ret, nil
+}
+
+func createPostgresTables(
+ evalCtx *tree.EvalContext,
+ p sql.JobExecContext,
+ createTbl map[schemaAndTableName]*tree.CreateTable,
+ fks fkHandler,
+ backrefs map[descpb.ID]*tabledesc.Mutable,
+ parentID descpb.ID,
+ walltime int64,
+ schemaNameToDesc map[string]*schemadesc.Mutable,
+) ([]*tabledesc.Mutable, error) {
+ ret := make([]*tabledesc.Mutable, 0)
+ for schemaAndTableName, create := range createTbl {
+ if create == nil {
+ continue
+ }
+ schemaID := descpb.ID(keys.PublicSchemaID)
+ if schemaAndTableName.schema != "" && schemaAndTableName.schema != tree.PublicSchema {
+ var desc *schemadesc.Mutable
+ var ok bool
+ if desc, ok = schemaNameToDesc[schemaAndTableName.schema]; !ok {
+ return nil, errors.Newf("schema %s not found in the schemas created from the pgdump",
+ schemaAndTableName.schema)
+ }
+ schemaID = desc.ID
+ }
+ removeDefaultRegclass(create)
+ desc, err := MakeSimpleTableDescriptor(evalCtx.Ctx(), p.SemaCtx(), p.ExecCfg().Settings,
+ create, parentID, schemaID, getNextPlaceholderDescID(), fks, walltime)
+ if err != nil {
+ return nil, err
+ }
+ fks.resolver.tableNameToDesc[schemaAndTableName.String()] = desc
+ backrefs[desc.ID] = desc
+ ret = append(ret, desc)
+ }
+
+ return ret, nil
+}
+
+func resolvePostgresFKs(
+ evalCtx *tree.EvalContext,
+ tableFKs map[schemaAndTableName][]*tree.ForeignKeyConstraintTableDef,
+ fks fkHandler,
+ backrefs map[descpb.ID]*tabledesc.Mutable,
+) error {
+ for schemaAndTableName, constraints := range tableFKs {
+ desc := fks.resolver.tableNameToDesc[schemaAndTableName.String()]
+ if desc == nil {
+ continue
+ }
+ for _, constraint := range constraints {
+ if constraint.Table.Schema() == "" {
+ return errors.Errorf("schema expected to be non-empty when resolving postgres FK %s",
+ constraint.Name.String())
+ }
+ constraint.Table.ExplicitSchema = true
+ // Add a dummy catalog name to aid in object resolution.
+ if constraint.Table.Catalog() == "" {
+ constraint.Table.ExplicitCatalog = true
+ constraint.Table.CatalogName = "defaultdb"
+ }
+ if err := sql.ResolveFK(
+ evalCtx.Ctx(), nil /* txn */, &fks.resolver, desc, constraint, backrefs, sql.NewTable,
+ tree.ValidationDefault, evalCtx,
+ ); err != nil {
+ return err
+ }
+ }
+ if err := fixDescriptorFKState(desc); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+var placeholderDescID = defaultCSVTableID
+
+// getNextPlaceholderDescID returns a monotonically increasing placeholder ID
+// that is used when creating table, sequence and schema descriptors during the
+// schema parsing phase of a PGDUMP import.
+// We assign these descriptors "fake" IDs because it is early in the IMPORT
+// execution and we do not want to blow through GenerateUniqueDescID calls only
+// to fail during the verification phase before we actually begin ingesting
+// data. Thus, we pessimistically wait till all the verification steps in the
+// IMPORT have been completed after which we rewrite the descriptor IDs with
+// "real" unique IDs.
+func getNextPlaceholderDescID() descpb.ID {
+ ret := placeholderDescID
+ placeholderDescID++
+ return ret
+}
+
// readPostgresCreateTable returns table descriptors for all tables or the
// matching table from SQL statements.
func readPostgresCreateTable(
@@ -223,90 +427,76 @@ func readPostgresCreateTable(
max int,
owner security.SQLUsername,
unsupportedStmtLogger *unsupportedStmtLogger,
-) ([]*tabledesc.Mutable, error) {
+) ([]*tabledesc.Mutable, []*schemadesc.Mutable, error) {
// Modify the CreateTable stmt with the various index additions. We do this
// instead of creating a full table descriptor first and adding indexes
// later because MakeSimpleTableDescriptor calls the sql package which calls
// AllocateIDs which adds the hidden rowid and default primary key. This means
// we'd have to delete the index and row and modify the column family. This
// is much easier and probably safer too.
- createTbl := make(map[string]*tree.CreateTable)
- createSeq := make(map[string]*tree.CreateSequence)
- tableFKs := make(map[string][]*tree.ForeignKeyConstraintTableDef)
+ schemaObjects := schemaParsingObjects{
+ createSchema: make(map[string]*tree.CreateSchema),
+ createTbl: make(map[schemaAndTableName]*tree.CreateTable),
+ createSeq: make(map[schemaAndTableName]*tree.CreateSequence),
+ tableFKs: make(map[schemaAndTableName][]*tree.ForeignKeyConstraintTableDef),
+ }
ps := newPostgreStream(ctx, input, max, unsupportedStmtLogger)
for {
stmt, err := ps.Next()
if err == io.EOF {
- ret := make([]*tabledesc.Mutable, 0, len(createTbl))
- for name, seq := range createSeq {
- id := descpb.ID(int(defaultCSVTableID) + len(ret))
- desc, err := sql.NewSequenceTableDesc(
- ctx,
- name,
- seq.Options,
- parentID,
- keys.PublicSchemaID,
- id,
- hlc.Timestamp{WallTime: walltime},
- descpb.NewDefaultPrivilegeDescriptor(owner),
- tree.PersistencePermanent,
- nil, /* params */
- )
- if err != nil {
- return nil, err
- }
- fks.resolver[desc.Name] = desc
- ret = append(ret, desc)
+ tables := make([]*tabledesc.Mutable, 0, len(schemaObjects.createTbl))
+ schemaNameToDesc := make(map[string]*schemadesc.Mutable)
+ schemaDescs, err := createPostgresSchemas(ctx, parentID, schemaObjects.createSchema,
+ p.ExecCfg(), p.User())
+ if err != nil {
+ return nil, nil, err
+ }
+
+ for _, schemaDesc := range schemaDescs {
+ schemaNameToDesc[schemaDesc.GetName()] = schemaDesc
+ }
+
+ // Construct sequence descriptors.
+ seqs, err := createPostgresSequences(ctx, parentID, schemaObjects.createSeq, fks,
+ walltime, owner, schemaNameToDesc)
+ if err != nil {
+ return nil, nil, err
}
+ tables = append(tables, seqs...)
+
+ // Construct table descriptors.
backrefs := make(map[descpb.ID]*tabledesc.Mutable)
- for _, create := range createTbl {
- if create == nil {
- continue
- }
- removeDefaultRegclass(create)
- id := descpb.ID(int(defaultCSVTableID) + len(ret))
- desc, err := MakeSimpleTableDescriptor(evalCtx.Ctx(), p.SemaCtx(), p.ExecCfg().Settings, create, parentID, keys.PublicSchemaID, id, fks, walltime)
- if err != nil {
- return nil, err
- }
- fks.resolver[desc.Name] = desc
- backrefs[desc.ID] = desc
- ret = append(ret, desc)
+ tableDescs, err := createPostgresTables(evalCtx, p, schemaObjects.createTbl, fks, backrefs,
+ parentID, walltime, schemaNameToDesc)
+ if err != nil {
+ return nil, nil, err
}
- for name, constraints := range tableFKs {
- desc := fks.resolver[name]
- if desc == nil {
- continue
- }
- for _, constraint := range constraints {
- if err := sql.ResolveFK(
- evalCtx.Ctx(), nil /* txn */, fks.resolver, desc, constraint, backrefs, sql.NewTable, tree.ValidationDefault, evalCtx,
- ); err != nil {
- return nil, err
- }
- }
- if err := fixDescriptorFKState(desc); err != nil {
- return nil, err
- }
+ tables = append(tables, tableDescs...)
+
+ // Resolve FKs.
+ err = resolvePostgresFKs(evalCtx, schemaObjects.tableFKs, fks, backrefs)
+ if err != nil {
+ return nil, nil, err
}
- if match != "" && len(ret) != 1 {
- found := make([]string, 0, len(createTbl))
- for name := range createTbl {
- found = append(found, name)
+ if match != "" && len(tables) != 1 {
+ found := make([]string, 0, len(schemaObjects.createTbl))
+ for schemaAndTableName := range schemaObjects.createTbl {
+ found = append(found, schemaAndTableName.String())
}
- return nil, errors.Errorf("table %q not found in file (found tables: %s)", match, strings.Join(found, ", "))
+ return nil, nil, errors.Errorf("table %q not found in file (found tables: %s)", match,
+ strings.Join(found, ", "))
}
- if len(ret) == 0 {
- return nil, errors.Errorf("no table definition found")
+ if len(tables) == 0 {
+ return nil, nil, errors.Errorf("no table definition found")
}
- return ret, nil
+ return tables, schemaDescs, nil
}
if err != nil {
- return nil, errors.Wrap(err, "postgres parse error")
+ return nil, nil, errors.Wrap(err, "postgres parse error")
}
- if err := readPostgresStmt(ctx, evalCtx, match, fks, createTbl, createSeq, tableFKs, stmt, p,
+ if err := readPostgresStmt(ctx, evalCtx, match, fks, &schemaObjects, stmt, p,
parentID, unsupportedStmtLogger); err != nil {
- return nil, err
+ return nil, nil, err
}
}
}
@@ -316,9 +506,7 @@ func readPostgresStmt(
evalCtx *tree.EvalContext,
match string,
fks fkHandler,
- createTbl map[string]*tree.CreateTable,
- createSeq map[string]*tree.CreateSequence,
- tableFKs map[string][]*tree.ForeignKeyConstraintTableDef,
+ schemaObjects *schemaParsingObjects,
stmt interface{},
p sql.JobExecContext,
parentID descpb.ID,
@@ -326,25 +514,38 @@ func readPostgresStmt(
) error {
ignoreUnsupportedStmts := unsupportedStmtLogger.ignoreUnsupported
switch stmt := stmt.(type) {
+ case *tree.CreateSchema:
+ name, err := getSchemaName(&stmt.Schema)
+ if err != nil {
+ return err
+ }
+ // If a target table is specified we do not want to create any user defined
+ // schemas. This is because we only allow specifying target table's in the
+ // public schema.
+ if match != "" {
+ break
+ }
+ schemaObjects.createSchema[name] = stmt
case *tree.CreateTable:
- name, err := getTableName(&stmt.Table)
+ schemaQualifiedName, err := getSchemaAndTableName(&stmt.Table)
if err != nil {
return err
}
- if match != "" && match != name {
- createTbl[name] = nil
+ isMatch := match == "" || match == schemaQualifiedName.String()
+ if isMatch {
+ schemaObjects.createTbl[schemaQualifiedName] = stmt
} else {
- createTbl[name] = stmt
+ schemaObjects.createTbl[schemaQualifiedName] = nil
}
case *tree.CreateIndex:
if stmt.Predicate != nil {
return unimplemented.NewWithIssue(50225, "cannot import a table with partial indexes")
}
- name, err := getTableName(&stmt.Table)
+ schemaQualifiedTableName, err := getSchemaAndTableName(&stmt.Table)
if err != nil {
return err
}
- create := createTbl[name]
+ create := schemaObjects.createTbl[schemaQualifiedTableName]
if create == nil {
break
}
@@ -360,12 +561,16 @@ func readPostgresStmt(
idx = &tree.UniqueConstraintTableDef{IndexTableDef: *idx.(*tree.IndexTableDef)}
}
create.Defs = append(create.Defs, idx)
+ case *tree.AlterSchema:
+ switch stmt.Cmd {
+ default:
+ }
case *tree.AlterTable:
- name, err := getTableName2(stmt.Table)
+ schemaQualifiedTableName, err := getSchemaAndTableName2(stmt.Table)
if err != nil {
return err
}
- create := createTbl[name]
+ create := schemaObjects.createTbl[schemaQualifiedTableName]
if create == nil {
break
}
@@ -375,7 +580,10 @@ func readPostgresStmt(
switch con := cmd.ConstraintDef.(type) {
case *tree.ForeignKeyConstraintTableDef:
if !fks.skip {
- tableFKs[name] = append(tableFKs[name], con)
+ if con.Table.Schema() == "" {
+ con.Table.SchemaName = tree.PublicSchemaName
+ }
+ schemaObjects.tableFKs[schemaQualifiedTableName] = append(schemaObjects.tableFKs[schemaQualifiedTableName], con)
}
default:
create.Defs = append(create.Defs, cmd.ConstraintDef)
@@ -438,12 +646,12 @@ func readPostgresStmt(
}
return errors.Errorf("unsupported statement: %s", stmt)
case *tree.CreateSequence:
- name, err := getTableName(&stmt.Name)
+ schemaQualifiedTableName, err := getSchemaAndTableName(&stmt.Name)
if err != nil {
return err
}
- if match == "" || match == name {
- createSeq[name] = stmt
+ if match == "" || match == schemaQualifiedTableName.String() {
+ schemaObjects.createSeq[schemaQualifiedTableName] = stmt
}
case *tree.AlterSequence:
if ignoreUnsupportedStmts {
@@ -507,8 +715,8 @@ func readPostgresStmt(
for _, fnStmt := range fnStmts {
switch ast := fnStmt.AST.(type) {
case *tree.AlterTable:
- if err := readPostgresStmt(ctx, evalCtx, match, fks, createTbl, createSeq,
- tableFKs, ast, p, parentID, unsupportedStmtLogger); err != nil {
+ if err := readPostgresStmt(ctx, evalCtx, match, fks, schemaObjects, ast, p,
+ parentID, unsupportedStmtLogger); err != nil {
return err
}
default:
@@ -592,27 +800,33 @@ func readPostgresStmt(
return nil
}
-func getTableName(tn *tree.TableName) (string, error) {
- if sc := tn.Schema(); sc != "" && sc != "public" {
- return "", unimplemented.NewWithIssueDetailf(
- 26443,
- "import non-public schema",
- "non-public schemas unsupported: %s", sc,
- )
+func getSchemaName(sc *tree.ObjectNamePrefix) (string, error) {
+ if sc.ExplicitCatalog {
+ return "", unimplemented.Newf("import into database specified in dump file",
+ "explicit catalog schemas unsupported: %s", sc.CatalogName.String()+sc.SchemaName.String())
+ }
+ return sc.SchemaName.String(), nil
+}
+
+func getSchemaAndTableName(tn *tree.TableName) (schemaAndTableName, error) {
+ var ret schemaAndTableName
+ ret.schema = tree.PublicSchema
+ if tn.Schema() != "" {
+ ret.schema = tn.Schema()
}
- return tn.Table(), nil
+ ret.table = tn.Table()
+ return ret, nil
}
// getTableName variant for UnresolvedObjectName.
-func getTableName2(u *tree.UnresolvedObjectName) (string, error) {
- if u.NumParts >= 2 && u.Parts[1] != "public" {
- return "", unimplemented.NewWithIssueDetailf(
- 26443,
- "import non-public schema",
- "non-public schemas unsupported: %s", u.Parts[1],
- )
+func getSchemaAndTableName2(u *tree.UnresolvedObjectName) (schemaAndTableName, error) {
+ var ret schemaAndTableName
+ ret.schema = tree.PublicSchema
+ if u.NumParts >= 2 && u.Parts[1] != "" {
+ ret.schema = u.Parts[1]
}
- return u.Parts[0], nil
+ ret.table = u.Parts[0]
+ return ret, nil
}
type pgDumpReader struct {
@@ -730,11 +944,11 @@ func (m *pgDumpReader) readFile(
if !ok {
return errors.Errorf("unexpected: %T", i.Table)
}
- name, err := getTableName(n)
+ name, err := getSchemaAndTableName(n)
if err != nil {
return errors.Wrapf(err, "%s", i)
}
- conv, ok := m.tables[name]
+ conv, ok := m.tables[name.String()]
if !ok {
// not importing this table.
continue
@@ -785,11 +999,11 @@ func (m *pgDumpReader) readFile(
}
for _, tuple := range values.Rows {
count++
- tableNameToRowsProcessed[name]++
+ tableNameToRowsProcessed[name.String()]++
if count <= resumePos {
continue
}
- if rowLimit != 0 && tableNameToRowsProcessed[name] > rowLimit {
+ if rowLimit != 0 && tableNameToRowsProcessed[name.String()] > rowLimit {
break
}
if got := len(tuple); expectedColLen != got {
@@ -820,11 +1034,11 @@ func (m *pgDumpReader) readFile(
if !i.Stdin {
return errors.New("expected STDIN option on COPY FROM")
}
- name, err := getTableName(&i.Table)
+ name, err := getSchemaAndTableName(&i.Table)
if err != nil {
return errors.Wrapf(err, "%s", i)
}
- conv, importing := m.tables[name]
+ conv, importing := m.tables[name.String()]
if importing && conv == nil {
return errors.Errorf("missing schema info for requested table %q", name)
}
@@ -853,7 +1067,7 @@ func (m *pgDumpReader) readFile(
break
}
count++
- tableNameToRowsProcessed[name]++
+ tableNameToRowsProcessed[name.String()]++
if err != nil {
return wrapRowErr(err, "", count, pgcode.Uncategorized, "")
}
@@ -869,7 +1083,7 @@ func (m *pgDumpReader) readFile(
return makeRowErr("", count, pgcode.Syntax,
"expected %d values, got %d", expected, got)
}
- if rowLimit != 0 && tableNameToRowsProcessed[name] > rowLimit {
+ if rowLimit != 0 && tableNameToRowsProcessed[name.String()] > rowLimit {
break
}
for i, s := range row {
@@ -987,7 +1201,12 @@ func (m *pgDumpReader) readFile(
if err != nil {
break
}
- seq := m.tableDescs[name.Parts[0]]
+
+ seqName := name.Parts[0]
+ if name.Schema() != "" {
+ seqName = fmt.Sprintf("%s.%s", name.Schema(), name.Object())
+ }
+ seq := m.tableDescs[seqName]
if seq == nil {
break
}
@@ -1015,8 +1234,8 @@ func (m *pgDumpReader) readFile(
// handled during schema extraction.
case *tree.SetVar, *tree.BeginTransaction, *tree.CommitTransaction, *tree.Analyze:
// handled during schema extraction.
- case *tree.CreateTable, *tree.AlterTable, *tree.AlterTableOwner, *tree.CreateIndex,
- *tree.CreateSequence, *tree.DropTable:
+ case *tree.CreateTable, *tree.CreateSchema, *tree.AlterTable, *tree.AlterTableOwner,
+ *tree.CreateIndex, *tree.CreateSequence, *tree.DropTable:
// handled during schema extraction.
default:
err := errors.Errorf("unsupported %T statement: %v", i, i)
diff --git a/pkg/ccl/importccl/testdata/pgdump/schema.sql b/pkg/ccl/importccl/testdata/pgdump/schema.sql
new file mode 100644
index 000000000000..df864bc3b45f
--- /dev/null
+++ b/pkg/ccl/importccl/testdata/pgdump/schema.sql
@@ -0,0 +1,166 @@
+--
+-- PostgreSQL database dump
+--
+
+-- Dumped from database version 13.1
+-- Dumped by pg_dump version 13.1
+
+SET statement_timeout = 0;
+SET lock_timeout = 0;
+SET idle_in_transaction_session_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = on;
+SELECT pg_catalog.set_config('search_path', '', false);
+SET check_function_bodies = false;
+SET xmloption = content;
+SET client_min_messages = warning;
+SET row_security = off;
+
+--
+-- Name: bar; Type: SCHEMA; Schema: -; Owner: postgres
+--
+
+CREATE SCHEMA bar;
+
+
+ALTER SCHEMA bar OWNER TO postgres;
+
+--
+-- Name: baz; Type: SCHEMA; Schema: -; Owner: adityamaru
+--
+
+CREATE SCHEMA baz;
+
+
+ALTER SCHEMA baz OWNER TO adityamaru;
+
+--
+-- Name: foo; Type: SCHEMA; Schema: -; Owner: adityamaru
+--
+
+CREATE SCHEMA foo;
+
+
+ALTER SCHEMA foo OWNER TO adityamaru;
+
+SET default_tablespace = '';
+
+SET default_table_access_method = heap;
+
+--
+-- Name: test; Type: TABLE; Schema: bar; Owner: adityamaru
+--
+
+CREATE TABLE bar.test (
+ id integer UNIQUE,
+ name character varying
+);
+
+--
+-- Name: test2; Type: TABLE; Schema: bar; Owner: adityamaru
+--
+
+CREATE TABLE bar.test2 (
+ id integer,
+ name character varying
+);
+
+--
+-- Name: test2 testfk; Type: FK CONSTRAINT; Schema: bar; Owner: adityamaru
+--
+
+ALTER TABLE ONLY bar.test2 ADD CONSTRAINT testfk FOREIGN KEY (id) REFERENCES bar.test(id) ON DELETE CASCADE;
+
+--
+-- Name: testseq; Type: SEQUENCE; Schema: bar; Owner: adityamaru
+--
+
+CREATE SEQUENCE bar.testseq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE;
+
+
+ALTER TABLE bar.test OWNER TO adityamaru;
+
+--
+-- Name: test; Type: TABLE; Schema: baz; Owner: adityamaru
+--
+
+CREATE TABLE baz.test (
+ id integer,
+ name character varying
+);
+
+
+ALTER TABLE baz.test OWNER TO adityamaru;
+
+--
+-- Name: test; Type: TABLE; Schema: foo; Owner: adityamaru
+--
+
+CREATE TABLE foo.test (
+ id integer,
+ name character varying
+);
+
+
+ALTER TABLE foo.test OWNER TO adityamaru;
+
+--
+-- Name: test; Type: TABLE; Schema: public; Owner: adityamaru
+--
+
+CREATE TABLE public.test (
+ id integer,
+ name character varying
+);
+
+
+ALTER TABLE public.test OWNER TO adityamaru;
+
+--
+-- Data for Name: test; Type: TABLE DATA; Schema: bar; Owner: adityamaru
+--
+
+COPY bar.test (id, name) FROM stdin;
+1 abc
+2 def
+\.
+
+
+--
+-- Data for Name: test; Type: TABLE DATA; Schema: baz; Owner: adityamaru
+--
+
+COPY baz.test (id, name) FROM stdin;
+1 abc
+2 def
+\.
+
+
+--
+-- Data for Name: test; Type: TABLE DATA; Schema: foo; Owner: adityamaru
+--
+
+COPY foo.test (id, name) FROM stdin;
+1 abc
+2 def
+\.
+
+
+--
+-- Data for Name: test; Type: TABLE DATA; Schema: public; Owner: adityamaru
+--
+
+COPY public.test (id, name) FROM stdin;
+1 abc
+2 def
+\.
+
+
+--
+-- PostgreSQL database dump complete
+--
+
diff --git a/pkg/ccl/importccl/testutils_test.go b/pkg/ccl/importccl/testutils_test.go
index 49808290807d..2c2dc243fbe0 100644
--- a/pkg/ccl/importccl/testutils_test.go
+++ b/pkg/ccl/importccl/testutils_test.go
@@ -69,7 +69,7 @@ func descForTable(
if err != nil {
t.Fatal(err)
}
- fks.resolver[name] = desc
+ fks.resolver.tableNameToDesc[name] = desc
} else {
stmt = parsed[0].AST.(*tree.CreateTable)
}
diff --git a/pkg/jobs/jobspb/jobs.pb.go b/pkg/jobs/jobspb/jobs.pb.go
index 7be22c5ceed6..a2051997e75c 100644
--- a/pkg/jobs/jobspb/jobs.pb.go
+++ b/pkg/jobs/jobspb/jobs.pb.go
@@ -58,7 +58,7 @@ func (x EncryptionMode) String() string {
return proto.EnumName(EncryptionMode_name, int32(x))
}
func (EncryptionMode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{0}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{0}
}
type Status int32
@@ -87,7 +87,7 @@ func (x Status) String() string {
return proto.EnumName(Status_name, int32(x))
}
func (Status) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{1}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{1}
}
type Type int32
@@ -142,7 +142,7 @@ var Type_value = map[string]int32{
}
func (Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{2}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{2}
}
type EncryptionInfo_Scheme int32
@@ -162,7 +162,7 @@ func (x EncryptionInfo_Scheme) String() string {
return proto.EnumName(EncryptionInfo_Scheme_name, int32(x))
}
func (EncryptionInfo_Scheme) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{2, 0}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{2, 0}
}
type SchemaChangeGCProgress_Status int32
@@ -192,7 +192,7 @@ func (x SchemaChangeGCProgress_Status) String() string {
return proto.EnumName(SchemaChangeGCProgress_Status_name, int32(x))
}
func (SchemaChangeGCProgress_Status) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{22, 0}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{22, 0}
}
type ResolvedSpan_BoundaryType int32
@@ -230,7 +230,7 @@ func (x ResolvedSpan_BoundaryType) String() string {
return proto.EnumName(ResolvedSpan_BoundaryType_name, int32(x))
}
func (ResolvedSpan_BoundaryType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{25, 0}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{25, 0}
}
type Lease struct {
@@ -244,7 +244,7 @@ func (m *Lease) Reset() { *m = Lease{} }
func (m *Lease) String() string { return proto.CompactTextString(m) }
func (*Lease) ProtoMessage() {}
func (*Lease) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{0}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{0}
}
func (m *Lease) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -285,7 +285,7 @@ func (m *BackupEncryptionOptions) Reset() { *m = BackupEncryptionOptions
func (m *BackupEncryptionOptions) String() string { return proto.CompactTextString(m) }
func (*BackupEncryptionOptions) ProtoMessage() {}
func (*BackupEncryptionOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{1}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{1}
}
func (m *BackupEncryptionOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -319,7 +319,7 @@ func (m *BackupEncryptionOptions_KMSInfo) Reset() { *m = BackupEncryptio
func (m *BackupEncryptionOptions_KMSInfo) String() string { return proto.CompactTextString(m) }
func (*BackupEncryptionOptions_KMSInfo) ProtoMessage() {}
func (*BackupEncryptionOptions_KMSInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{1, 0}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{1, 0}
}
func (m *BackupEncryptionOptions_KMSInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -359,7 +359,7 @@ func (m *EncryptionInfo) Reset() { *m = EncryptionInfo{} }
func (m *EncryptionInfo) String() string { return proto.CompactTextString(m) }
func (*EncryptionInfo) ProtoMessage() {}
func (*EncryptionInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{2}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{2}
}
func (m *EncryptionInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -402,7 +402,7 @@ func (m *StreamIngestionDetails) Reset() { *m = StreamIngestionDetails{}
func (m *StreamIngestionDetails) String() string { return proto.CompactTextString(m) }
func (*StreamIngestionDetails) ProtoMessage() {}
func (*StreamIngestionDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{3}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{3}
}
func (m *StreamIngestionDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -439,7 +439,7 @@ func (m *StreamIngestionProgress) Reset() { *m = StreamIngestionProgress
func (m *StreamIngestionProgress) String() string { return proto.CompactTextString(m) }
func (*StreamIngestionProgress) ProtoMessage() {}
func (*StreamIngestionProgress) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{4}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{4}
}
func (m *StreamIngestionProgress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -494,7 +494,7 @@ func (m *BackupDetails) Reset() { *m = BackupDetails{} }
func (m *BackupDetails) String() string { return proto.CompactTextString(m) }
func (*BackupDetails) ProtoMessage() {}
func (*BackupDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{5}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{5}
}
func (m *BackupDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -526,7 +526,7 @@ func (m *BackupProgress) Reset() { *m = BackupProgress{} }
func (m *BackupProgress) String() string { return proto.CompactTextString(m) }
func (*BackupProgress) ProtoMessage() {}
func (*BackupProgress) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{6}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{6}
}
func (m *BackupProgress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -594,7 +594,7 @@ func (m *RestoreDetails) Reset() { *m = RestoreDetails{} }
func (m *RestoreDetails) String() string { return proto.CompactTextString(m) }
func (*RestoreDetails) ProtoMessage() {}
func (*RestoreDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{7}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{7}
}
func (m *RestoreDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -631,7 +631,7 @@ func (m *RestoreDetails_DescriptorRewrite) Reset() { *m = RestoreDetails
func (m *RestoreDetails_DescriptorRewrite) String() string { return proto.CompactTextString(m) }
func (*RestoreDetails_DescriptorRewrite) ProtoMessage() {}
func (*RestoreDetails_DescriptorRewrite) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{7, 0}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{7, 0}
}
func (m *RestoreDetails_DescriptorRewrite) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -664,7 +664,7 @@ func (m *RestoreDetails_BackupLocalityInfo) Reset() { *m = RestoreDetail
func (m *RestoreDetails_BackupLocalityInfo) String() string { return proto.CompactTextString(m) }
func (*RestoreDetails_BackupLocalityInfo) ProtoMessage() {}
func (*RestoreDetails_BackupLocalityInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{7, 1}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{7, 1}
}
func (m *RestoreDetails_BackupLocalityInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -697,7 +697,7 @@ func (m *RestoreProgress) Reset() { *m = RestoreProgress{} }
func (m *RestoreProgress) String() string { return proto.CompactTextString(m) }
func (*RestoreProgress) ProtoMessage() {}
func (*RestoreProgress) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{8}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{8}
}
func (m *RestoreProgress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -723,12 +723,13 @@ func (m *RestoreProgress) XXX_DiscardUnknown() {
var xxx_messageInfo_RestoreProgress proto.InternalMessageInfo
type ImportDetails struct {
- Tables []ImportDetails_Table `protobuf:"bytes,1,rep,name=tables,proto3" json:"tables"`
- URIs []string `protobuf:"bytes,2,rep,name=uris,proto3" json:"uris,omitempty"`
- Format roachpb.IOFileFormat `protobuf:"bytes,3,opt,name=format,proto3" json:"format"`
- SSTSize int64 `protobuf:"varint,4,opt,name=sst_size,json=sstSize,proto3" json:"sst_size,omitempty"`
- Oversample int64 `protobuf:"varint,9,opt,name=oversample,proto3" json:"oversample,omitempty"`
- SkipFKs bool `protobuf:"varint,10,opt,name=skip_fks,json=skipFks,proto3" json:"skip_fks,omitempty"`
+ Tables []ImportDetails_Table `protobuf:"bytes,1,rep,name=tables,proto3" json:"tables"`
+ Schemas []ImportDetails_Schema `protobuf:"bytes,23,rep,name=schemas,proto3" json:"schemas"`
+ URIs []string `protobuf:"bytes,2,rep,name=uris,proto3" json:"uris,omitempty"`
+ Format roachpb.IOFileFormat `protobuf:"bytes,3,opt,name=format,proto3" json:"format"`
+ SSTSize int64 `protobuf:"varint,4,opt,name=sst_size,json=sstSize,proto3" json:"sst_size,omitempty"`
+ Oversample int64 `protobuf:"varint,9,opt,name=oversample,proto3" json:"oversample,omitempty"`
+ SkipFKs bool `protobuf:"varint,10,opt,name=skip_fks,json=skipFks,proto3" json:"skip_fks,omitempty"`
// walltime is the time at which an import job will write KVs.
Walltime int64 `protobuf:"varint,5,opt,name=walltime,proto3" json:"walltime,omitempty"`
ParentID github_com_cockroachdb_cockroach_pkg_sql_catalog_descpb.ID `protobuf:"varint,6,opt,name=parent_id,json=parentId,proto3,casttype=github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID" json:"parent_id,omitempty"`
@@ -744,6 +745,7 @@ type ImportDetails struct {
// directly, many other fields like samples, oversample, sst_size are ignored.
IngestDirectly bool `protobuf:"varint,11,opt,name=ingest_directly,json=ingestDirectly,proto3" json:"ingest_directly,omitempty"`
PrepareComplete bool `protobuf:"varint,12,opt,name=prepare_complete,json=prepareComplete,proto3" json:"prepare_complete,omitempty"`
+ SchemasPublished bool `protobuf:"varint,24,opt,name=schemas_published,json=schemasPublished,proto3" json:"schemas_published,omitempty"`
TablesPublished bool `protobuf:"varint,13,opt,name=tables_published,json=tablesPublished,proto3" json:"tables_published,omitempty"`
ParseBundleSchema bool `protobuf:"varint,14,opt,name=parse_bundle_schema,json=parseBundleSchema,proto3" json:"parse_bundle_schema,omitempty"`
// ProtectedTimestampRecord is the ID of the protected timestamp record
@@ -758,7 +760,7 @@ func (m *ImportDetails) Reset() { *m = ImportDetails{} }
func (m *ImportDetails) String() string { return proto.CompactTextString(m) }
func (*ImportDetails) ProtoMessage() {}
func (*ImportDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{9}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{9}
}
func (m *ImportDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -796,7 +798,7 @@ func (m *ImportDetails_Table) Reset() { *m = ImportDetails_Table{} }
func (m *ImportDetails_Table) String() string { return proto.CompactTextString(m) }
func (*ImportDetails_Table) ProtoMessage() {}
func (*ImportDetails_Table) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{9, 0}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{9, 0}
}
func (m *ImportDetails_Table) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -821,6 +823,39 @@ func (m *ImportDetails_Table) XXX_DiscardUnknown() {
var xxx_messageInfo_ImportDetails_Table proto.InternalMessageInfo
+type ImportDetails_Schema struct {
+ Desc *descpb.SchemaDescriptor `protobuf:"bytes,1,opt,name=desc,proto3" json:"desc,omitempty"`
+}
+
+func (m *ImportDetails_Schema) Reset() { *m = ImportDetails_Schema{} }
+func (m *ImportDetails_Schema) String() string { return proto.CompactTextString(m) }
+func (*ImportDetails_Schema) ProtoMessage() {}
+func (*ImportDetails_Schema) Descriptor() ([]byte, []int) {
+ return fileDescriptor_jobs_dec159c6138441c3, []int{9, 1}
+}
+func (m *ImportDetails_Schema) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImportDetails_Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalTo(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (dst *ImportDetails_Schema) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImportDetails_Schema.Merge(dst, src)
+}
+func (m *ImportDetails_Schema) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImportDetails_Schema) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImportDetails_Schema.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImportDetails_Schema proto.InternalMessageInfo
+
// SequenceValChunks represents a single chunk of sequence values allocated
// during an IMPORT.
type SequenceValChunk struct {
@@ -838,7 +873,7 @@ func (m *SequenceValChunk) Reset() { *m = SequenceValChunk{} }
func (m *SequenceValChunk) String() string { return proto.CompactTextString(m) }
func (*SequenceValChunk) ProtoMessage() {}
func (*SequenceValChunk) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{10}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{10}
}
func (m *SequenceValChunk) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -874,7 +909,7 @@ func (m *SequenceDetails) Reset() { *m = SequenceDetails{} }
func (m *SequenceDetails) String() string { return proto.CompactTextString(m) }
func (*SequenceDetails) ProtoMessage() {}
func (*SequenceDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{11}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{11}
}
func (m *SequenceDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -909,7 +944,7 @@ func (m *SequenceDetails_SequenceChunks) Reset() { *m = SequenceDetails_
func (m *SequenceDetails_SequenceChunks) String() string { return proto.CompactTextString(m) }
func (*SequenceDetails_SequenceChunks) ProtoMessage() {}
func (*SequenceDetails_SequenceChunks) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{11, 0}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{11, 0}
}
func (m *SequenceDetails_SequenceChunks) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -955,7 +990,7 @@ func (m *ImportProgress) Reset() { *m = ImportProgress{} }
func (m *ImportProgress) String() string { return proto.CompactTextString(m) }
func (*ImportProgress) ProtoMessage() {}
func (*ImportProgress) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{12}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{12}
}
func (m *ImportProgress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -998,7 +1033,7 @@ func (m *TypeSchemaChangeDetails) Reset() { *m = TypeSchemaChangeDetails
func (m *TypeSchemaChangeDetails) String() string { return proto.CompactTextString(m) }
func (*TypeSchemaChangeDetails) ProtoMessage() {}
func (*TypeSchemaChangeDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{13}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{13}
}
func (m *TypeSchemaChangeDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1031,7 +1066,7 @@ func (m *TypeSchemaChangeProgress) Reset() { *m = TypeSchemaChangeProgre
func (m *TypeSchemaChangeProgress) String() string { return proto.CompactTextString(m) }
func (*TypeSchemaChangeProgress) ProtoMessage() {}
func (*TypeSchemaChangeProgress) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{14}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{14}
}
func (m *TypeSchemaChangeProgress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1065,7 +1100,7 @@ func (m *NewSchemaChangeDetails) Reset() { *m = NewSchemaChangeDetails{}
func (m *NewSchemaChangeDetails) String() string { return proto.CompactTextString(m) }
func (*NewSchemaChangeDetails) ProtoMessage() {}
func (*NewSchemaChangeDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{15}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{15}
}
func (m *NewSchemaChangeDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1099,7 +1134,7 @@ func (m *NewSchemaChangeProgress) Reset() { *m = NewSchemaChangeProgress
func (m *NewSchemaChangeProgress) String() string { return proto.CompactTextString(m) }
func (*NewSchemaChangeProgress) ProtoMessage() {}
func (*NewSchemaChangeProgress) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{16}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{16}
}
func (m *NewSchemaChangeProgress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1132,7 +1167,7 @@ func (m *ResumeSpanList) Reset() { *m = ResumeSpanList{} }
func (m *ResumeSpanList) String() string { return proto.CompactTextString(m) }
func (*ResumeSpanList) ProtoMessage() {}
func (*ResumeSpanList) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{17}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{17}
}
func (m *ResumeSpanList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1167,7 +1202,7 @@ func (m *DroppedTableDetails) Reset() { *m = DroppedTableDetails{} }
func (m *DroppedTableDetails) String() string { return proto.CompactTextString(m) }
func (*DroppedTableDetails) ProtoMessage() {}
func (*DroppedTableDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{18}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{18}
}
func (m *DroppedTableDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1230,7 +1265,7 @@ func (m *SchemaChangeGCDetails) Reset() { *m = SchemaChangeGCDetails{} }
func (m *SchemaChangeGCDetails) String() string { return proto.CompactTextString(m) }
func (*SchemaChangeGCDetails) ProtoMessage() {}
func (*SchemaChangeGCDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{19}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{19}
}
func (m *SchemaChangeGCDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1264,7 +1299,7 @@ func (m *SchemaChangeGCDetails_DroppedIndex) Reset() { *m = SchemaChange
func (m *SchemaChangeGCDetails_DroppedIndex) String() string { return proto.CompactTextString(m) }
func (*SchemaChangeGCDetails_DroppedIndex) ProtoMessage() {}
func (*SchemaChangeGCDetails_DroppedIndex) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{19, 0}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{19, 0}
}
func (m *SchemaChangeGCDetails_DroppedIndex) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1298,7 +1333,7 @@ func (m *SchemaChangeGCDetails_DroppedID) Reset() { *m = SchemaChangeGCD
func (m *SchemaChangeGCDetails_DroppedID) String() string { return proto.CompactTextString(m) }
func (*SchemaChangeGCDetails_DroppedID) ProtoMessage() {}
func (*SchemaChangeGCDetails_DroppedID) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{19, 1}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{19, 1}
}
func (m *SchemaChangeGCDetails_DroppedID) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1332,7 +1367,7 @@ func (m *SchemaChangeGCDetails_DroppedTenant) Reset() { *m = SchemaChang
func (m *SchemaChangeGCDetails_DroppedTenant) String() string { return proto.CompactTextString(m) }
func (*SchemaChangeGCDetails_DroppedTenant) ProtoMessage() {}
func (*SchemaChangeGCDetails_DroppedTenant) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{19, 2}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{19, 2}
}
func (m *SchemaChangeGCDetails_DroppedTenant) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1396,7 +1431,7 @@ func (m *SchemaChangeDetails) Reset() { *m = SchemaChangeDetails{} }
func (m *SchemaChangeDetails) String() string { return proto.CompactTextString(m) }
func (*SchemaChangeDetails) ProtoMessage() {}
func (*SchemaChangeDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{20}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{20}
}
func (m *SchemaChangeDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1428,7 +1463,7 @@ func (m *SchemaChangeProgress) Reset() { *m = SchemaChangeProgress{} }
func (m *SchemaChangeProgress) String() string { return proto.CompactTextString(m) }
func (*SchemaChangeProgress) ProtoMessage() {}
func (*SchemaChangeProgress) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{21}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{21}
}
func (m *SchemaChangeProgress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1466,7 +1501,7 @@ func (m *SchemaChangeGCProgress) Reset() { *m = SchemaChangeGCProgress{}
func (m *SchemaChangeGCProgress) String() string { return proto.CompactTextString(m) }
func (*SchemaChangeGCProgress) ProtoMessage() {}
func (*SchemaChangeGCProgress) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{22}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{22}
}
func (m *SchemaChangeGCProgress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1500,7 +1535,7 @@ func (m *SchemaChangeGCProgress_IndexProgress) Reset() { *m = SchemaChan
func (m *SchemaChangeGCProgress_IndexProgress) String() string { return proto.CompactTextString(m) }
func (*SchemaChangeGCProgress_IndexProgress) ProtoMessage() {}
func (*SchemaChangeGCProgress_IndexProgress) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{22, 0}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{22, 0}
}
func (m *SchemaChangeGCProgress_IndexProgress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1534,7 +1569,7 @@ func (m *SchemaChangeGCProgress_TableProgress) Reset() { *m = SchemaChan
func (m *SchemaChangeGCProgress_TableProgress) String() string { return proto.CompactTextString(m) }
func (*SchemaChangeGCProgress_TableProgress) ProtoMessage() {}
func (*SchemaChangeGCProgress_TableProgress) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{22, 1}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{22, 1}
}
func (m *SchemaChangeGCProgress_TableProgress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1567,7 +1602,7 @@ func (m *SchemaChangeGCProgress_TenantProgress) Reset() { *m = SchemaCha
func (m *SchemaChangeGCProgress_TenantProgress) String() string { return proto.CompactTextString(m) }
func (*SchemaChangeGCProgress_TenantProgress) ProtoMessage() {}
func (*SchemaChangeGCProgress_TenantProgress) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{22, 2}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{22, 2}
}
func (m *SchemaChangeGCProgress_TenantProgress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1600,7 +1635,7 @@ func (m *ChangefeedTarget) Reset() { *m = ChangefeedTarget{} }
func (m *ChangefeedTarget) String() string { return proto.CompactTextString(m) }
func (*ChangefeedTarget) ProtoMessage() {}
func (*ChangefeedTarget) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{23}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{23}
}
func (m *ChangefeedTarget) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1653,7 +1688,7 @@ func (m *ChangefeedDetails) Reset() { *m = ChangefeedDetails{} }
func (m *ChangefeedDetails) String() string { return proto.CompactTextString(m) }
func (*ChangefeedDetails) ProtoMessage() {}
func (*ChangefeedDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{24}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{24}
}
func (m *ChangefeedDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1693,7 +1728,7 @@ func (m *ResolvedSpan) Reset() { *m = ResolvedSpan{} }
func (m *ResolvedSpan) String() string { return proto.CompactTextString(m) }
func (*ResolvedSpan) ProtoMessage() {}
func (*ResolvedSpan) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{25}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{25}
}
func (m *ResolvedSpan) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1726,7 +1761,7 @@ func (m *ResolvedSpans) Reset() { *m = ResolvedSpans{} }
func (m *ResolvedSpans) String() string { return proto.CompactTextString(m) }
func (*ResolvedSpans) ProtoMessage() {}
func (*ResolvedSpans) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{26}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{26}
}
func (m *ResolvedSpans) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1769,7 +1804,7 @@ func (m *ChangefeedProgress) Reset() { *m = ChangefeedProgress{} }
func (m *ChangefeedProgress) String() string { return proto.CompactTextString(m) }
func (*ChangefeedProgress) ProtoMessage() {}
func (*ChangefeedProgress) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{27}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{27}
}
func (m *ChangefeedProgress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1813,7 +1848,7 @@ func (m *CreateStatsDetails) Reset() { *m = CreateStatsDetails{} }
func (m *CreateStatsDetails) String() string { return proto.CompactTextString(m) }
func (*CreateStatsDetails) ProtoMessage() {}
func (*CreateStatsDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{28}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{28}
}
func (m *CreateStatsDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1854,7 +1889,7 @@ func (m *CreateStatsDetails_ColStat) Reset() { *m = CreateStatsDetails_C
func (m *CreateStatsDetails_ColStat) String() string { return proto.CompactTextString(m) }
func (*CreateStatsDetails_ColStat) ProtoMessage() {}
func (*CreateStatsDetails_ColStat) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{28, 0}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{28, 0}
}
func (m *CreateStatsDetails_ColStat) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1886,7 +1921,7 @@ func (m *CreateStatsProgress) Reset() { *m = CreateStatsProgress{} }
func (m *CreateStatsProgress) String() string { return proto.CompactTextString(m) }
func (*CreateStatsProgress) ProtoMessage() {}
func (*CreateStatsProgress) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{29}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{29}
}
func (m *CreateStatsProgress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1919,7 +1954,7 @@ func (m *MigrationDetails) Reset() { *m = MigrationDetails{} }
func (m *MigrationDetails) String() string { return proto.CompactTextString(m) }
func (*MigrationDetails) ProtoMessage() {}
func (*MigrationDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{30}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{30}
}
func (m *MigrationDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1951,7 +1986,7 @@ func (m *MigrationProgress) Reset() { *m = MigrationProgress{} }
func (m *MigrationProgress) String() string { return proto.CompactTextString(m) }
func (*MigrationProgress) ProtoMessage() {}
func (*MigrationProgress) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{31}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{31}
}
func (m *MigrationProgress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2021,7 +2056,7 @@ func (m *Payload) Reset() { *m = Payload{} }
func (m *Payload) String() string { return proto.CompactTextString(m) }
func (*Payload) ProtoMessage() {}
func (*Payload) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{32}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{32}
}
func (m *Payload) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2453,7 +2488,7 @@ func (m *Progress) Reset() { *m = Progress{} }
func (m *Progress) String() string { return proto.CompactTextString(m) }
func (*Progress) ProtoMessage() {}
func (*Progress) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{33}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{33}
}
func (m *Progress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2949,7 +2984,7 @@ func (m *Job) Reset() { *m = Job{} }
func (m *Job) String() string { return proto.CompactTextString(m) }
func (*Job) ProtoMessage() {}
func (*Job) Descriptor() ([]byte, []int) {
- return fileDescriptor_jobs_2bb17fe81255384c, []int{34}
+ return fileDescriptor_jobs_dec159c6138441c3, []int{34}
}
func (m *Job) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2994,6 +3029,7 @@ func init() {
proto.RegisterType((*RestoreProgress)(nil), "cockroach.sql.jobs.jobspb.RestoreProgress")
proto.RegisterType((*ImportDetails)(nil), "cockroach.sql.jobs.jobspb.ImportDetails")
proto.RegisterType((*ImportDetails_Table)(nil), "cockroach.sql.jobs.jobspb.ImportDetails.Table")
+ proto.RegisterType((*ImportDetails_Schema)(nil), "cockroach.sql.jobs.jobspb.ImportDetails.Schema")
proto.RegisterType((*SequenceValChunk)(nil), "cockroach.sql.jobs.jobspb.SequenceValChunk")
proto.RegisterType((*SequenceDetails)(nil), "cockroach.sql.jobs.jobspb.SequenceDetails")
proto.RegisterMapType((map[int32]*SequenceDetails_SequenceChunks)(nil), "cockroach.sql.jobs.jobspb.SequenceDetails.SeqIdToChunksEntry")
@@ -3972,6 +4008,32 @@ func (m *ImportDetails) MarshalTo(dAtA []byte) (int, error) {
}
i += n14
}
+ if len(m.Schemas) > 0 {
+ for _, msg := range m.Schemas {
+ dAtA[i] = 0xba
+ i++
+ dAtA[i] = 0x1
+ i++
+ i = encodeVarintJobs(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.SchemasPublished {
+ dAtA[i] = 0xc0
+ i++
+ dAtA[i] = 0x1
+ i++
+ if m.SchemasPublished {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
return i, nil
}
@@ -4059,6 +4121,34 @@ func (m *ImportDetails_Table) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *ImportDetails_Schema) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImportDetails_Schema) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Desc != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintJobs(dAtA, i, uint64(m.Desc.Size()))
+ n16, err := m.Desc.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n16
+ }
+ return i, nil
+}
+
func (m *SequenceValChunk) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -4136,11 +4226,11 @@ func (m *SequenceDetails) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintJobs(dAtA, i, uint64(v.Size()))
- n16, err := v.MarshalTo(dAtA[i:])
+ n17, err := v.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n16
+ i += n17
}
}
}
@@ -4197,8 +4287,8 @@ func (m *ImportProgress) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintJobs(dAtA, i, uint64(len(m.SamplingProgress)*4))
for _, num := range m.SamplingProgress {
- f17 := math.Float32bits(float32(num))
- encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(f17))
+ f18 := math.Float32bits(float32(num))
+ encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(f18))
i += 4
}
}
@@ -4207,8 +4297,8 @@ func (m *ImportProgress) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintJobs(dAtA, i, uint64(len(m.ReadProgress)*4))
for _, num := range m.ReadProgress {
- f18 := math.Float32bits(float32(num))
- encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(f18))
+ f19 := math.Float32bits(float32(num))
+ encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(f19))
i += 4
}
}
@@ -4217,8 +4307,8 @@ func (m *ImportProgress) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintJobs(dAtA, i, uint64(len(m.WriteProgress)*4))
for _, num := range m.WriteProgress {
- f19 := math.Float32bits(float32(num))
- encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(f19))
+ f20 := math.Float32bits(float32(num))
+ encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(f20))
i += 4
}
}
@@ -4235,22 +4325,22 @@ func (m *ImportProgress) MarshalTo(dAtA []byte) (int, error) {
}
}
if len(m.ResumePos) > 0 {
- dAtA21 := make([]byte, len(m.ResumePos)*10)
- var j20 int
+ dAtA22 := make([]byte, len(m.ResumePos)*10)
+ var j21 int
for _, num1 := range m.ResumePos {
num := uint64(num1)
for num >= 1<<7 {
- dAtA21[j20] = uint8(uint64(num)&0x7f | 0x80)
+ dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
- j20++
+ j21++
}
- dAtA21[j20] = uint8(num)
- j20++
+ dAtA22[j21] = uint8(num)
+ j21++
}
dAtA[i] = 0x2a
i++
- i = encodeVarintJobs(dAtA, i, uint64(j20))
- i += copy(dAtA[i:], dAtA21[:j20])
+ i = encodeVarintJobs(dAtA, i, uint64(j21))
+ i += copy(dAtA[i:], dAtA22[:j21])
}
if len(m.SequenceDetails) > 0 {
for _, msg := range m.SequenceDetails {
@@ -4362,21 +4452,21 @@ func (m *NewSchemaChangeProgress) MarshalTo(dAtA []byte) (int, error) {
var l int
_ = l
if len(m.States) > 0 {
- dAtA23 := make([]byte, len(m.States)*10)
- var j22 int
+ dAtA24 := make([]byte, len(m.States)*10)
+ var j23 int
for _, num := range m.States {
for num >= 1<<7 {
- dAtA23[j22] = uint8(uint64(num)&0x7f | 0x80)
+ dAtA24[j23] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
- j22++
+ j23++
}
- dAtA23[j22] = uint8(num)
- j22++
+ dAtA24[j23] = uint8(num)
+ j23++
}
dAtA[i] = 0xa
i++
- i = encodeVarintJobs(dAtA, i, uint64(j22))
- i += copy(dAtA[i:], dAtA23[:j22])
+ i = encodeVarintJobs(dAtA, i, uint64(j23))
+ i += copy(dAtA[i:], dAtA24[:j23])
}
return i, nil
}
@@ -4493,11 +4583,11 @@ func (m *SchemaChangeGCDetails) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x22
i++
i = encodeVarintJobs(dAtA, i, uint64(m.InterleavedTable.Size()))
- n24, err := m.InterleavedTable.MarshalTo(dAtA[i:])
+ n25, err := m.InterleavedTable.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n24
+ i += n25
}
if len(m.InterleavedIndexes) > 0 {
for _, msg := range m.InterleavedIndexes {
@@ -4515,11 +4605,11 @@ func (m *SchemaChangeGCDetails) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x32
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Tenant.Size()))
- n25, err := m.Tenant.MarshalTo(dAtA[i:])
+ n26, err := m.Tenant.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n25
+ i += n26
}
return i, nil
}
@@ -4668,38 +4758,38 @@ func (m *SchemaChangeDetails) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintJobs(dAtA, i, uint64(m.FormatVersion))
}
if len(m.DroppedTypes) > 0 {
- dAtA27 := make([]byte, len(m.DroppedTypes)*10)
- var j26 int
+ dAtA28 := make([]byte, len(m.DroppedTypes)*10)
+ var j27 int
for _, num := range m.DroppedTypes {
for num >= 1<<7 {
- dAtA27[j26] = uint8(uint64(num)&0x7f | 0x80)
+ dAtA28[j27] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
- j26++
+ j27++
}
- dAtA27[j26] = uint8(num)
- j26++
+ dAtA28[j27] = uint8(num)
+ j27++
}
dAtA[i] = 0x42
i++
- i = encodeVarintJobs(dAtA, i, uint64(j26))
- i += copy(dAtA[i:], dAtA27[:j26])
+ i = encodeVarintJobs(dAtA, i, uint64(j27))
+ i += copy(dAtA[i:], dAtA28[:j27])
}
if len(m.DroppedSchemas) > 0 {
- dAtA29 := make([]byte, len(m.DroppedSchemas)*10)
- var j28 int
+ dAtA30 := make([]byte, len(m.DroppedSchemas)*10)
+ var j29 int
for _, num := range m.DroppedSchemas {
for num >= 1<<7 {
- dAtA29[j28] = uint8(uint64(num)&0x7f | 0x80)
+ dAtA30[j29] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
- j28++
+ j29++
}
- dAtA29[j28] = uint8(num)
- j28++
+ dAtA30[j29] = uint8(num)
+ j29++
}
dAtA[i] = 0x4a
i++
- i = encodeVarintJobs(dAtA, i, uint64(j28))
- i += copy(dAtA[i:], dAtA29[:j28])
+ i = encodeVarintJobs(dAtA, i, uint64(j29))
+ i += copy(dAtA[i:], dAtA30[:j29])
}
return i, nil
}
@@ -4765,11 +4855,11 @@ func (m *SchemaChangeGCProgress) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Tenant.Size()))
- n30, err := m.Tenant.MarshalTo(dAtA[i:])
+ n31, err := m.Tenant.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n30
+ i += n31
}
return i, nil
}
@@ -4943,21 +5033,21 @@ func (m *ChangefeedDetails) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintJobs(dAtA, i, uint64((&v).Size()))
- n31, err := (&v).MarshalTo(dAtA[i:])
+ n32, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n31
+ i += n32
}
}
dAtA[i] = 0x3a
i++
i = encodeVarintJobs(dAtA, i, uint64(m.StatementTime.Size()))
- n32, err := m.StatementTime.MarshalTo(dAtA[i:])
+ n33, err := m.StatementTime.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n32
+ i += n33
return i, nil
}
@@ -4979,19 +5069,19 @@ func (m *ResolvedSpan) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Span.Size()))
- n33, err := m.Span.MarshalTo(dAtA[i:])
+ n34, err := m.Span.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n33
+ i += n34
dAtA[i] = 0x12
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Timestamp.Size()))
- n34, err := m.Timestamp.MarshalTo(dAtA[i:])
+ n35, err := m.Timestamp.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n34
+ i += n35
if m.DeprecatedBoundaryReached {
dAtA[i] = 0x18
i++
@@ -5070,11 +5160,11 @@ func (m *ChangefeedProgress) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintJobs(dAtA, i, uint64(m.ProtectedTimestampRecord.Size()))
- n35, err := m.ProtectedTimestampRecord.MarshalTo(dAtA[i:])
+ n36, err := m.ProtectedTimestampRecord.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n35
+ i += n36
return i, nil
}
@@ -5102,11 +5192,11 @@ func (m *CreateStatsDetails) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Table.Size()))
- n36, err := m.Table.MarshalTo(dAtA[i:])
+ n37, err := m.Table.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n36
+ i += n37
if len(m.ColumnStats) > 0 {
for _, msg := range m.ColumnStats {
dAtA[i] = 0x1a
@@ -5129,11 +5219,11 @@ func (m *CreateStatsDetails) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x2a
i++
i = encodeVarintJobs(dAtA, i, uint64(m.AsOf.Size()))
- n37, err := m.AsOf.MarshalTo(dAtA[i:])
+ n38, err := m.AsOf.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n37
+ i += n38
}
if len(m.FQTableName) > 0 {
dAtA[i] = 0x32
@@ -5166,21 +5256,21 @@ func (m *CreateStatsDetails_ColStat) MarshalTo(dAtA []byte) (int, error) {
var l int
_ = l
if len(m.ColumnIDs) > 0 {
- dAtA39 := make([]byte, len(m.ColumnIDs)*10)
- var j38 int
+ dAtA40 := make([]byte, len(m.ColumnIDs)*10)
+ var j39 int
for _, num := range m.ColumnIDs {
for num >= 1<<7 {
- dAtA39[j38] = uint8(uint64(num)&0x7f | 0x80)
+ dAtA40[j39] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
- j38++
+ j39++
}
- dAtA39[j38] = uint8(num)
- j38++
+ dAtA40[j39] = uint8(num)
+ j39++
}
dAtA[i] = 0xa
i++
- i = encodeVarintJobs(dAtA, i, uint64(j38))
- i += copy(dAtA[i:], dAtA39[:j38])
+ i = encodeVarintJobs(dAtA, i, uint64(j39))
+ i += copy(dAtA[i:], dAtA40[:j39])
}
if m.HasHistogram {
dAtA[i] = 0x10
@@ -5247,11 +5337,11 @@ func (m *MigrationDetails) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintJobs(dAtA, i, uint64(m.ClusterVersion.Size()))
- n40, err := m.ClusterVersion.MarshalTo(dAtA[i:])
+ n41, err := m.ClusterVersion.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n40
+ i += n41
}
return i, nil
}
@@ -5312,21 +5402,21 @@ func (m *Payload) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintJobs(dAtA, i, uint64(m.FinishedMicros))
}
if len(m.DescriptorIDs) > 0 {
- dAtA42 := make([]byte, len(m.DescriptorIDs)*10)
- var j41 int
+ dAtA43 := make([]byte, len(m.DescriptorIDs)*10)
+ var j42 int
for _, num := range m.DescriptorIDs {
for num >= 1<<7 {
- dAtA42[j41] = uint8(uint64(num)&0x7f | 0x80)
+ dAtA43[j42] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
- j41++
+ j42++
}
- dAtA42[j41] = uint8(num)
- j41++
+ dAtA43[j42] = uint8(num)
+ j42++
}
dAtA[i] = 0x32
i++
- i = encodeVarintJobs(dAtA, i, uint64(j41))
- i += copy(dAtA[i:], dAtA42[:j41])
+ i = encodeVarintJobs(dAtA, i, uint64(j42))
+ i += copy(dAtA[i:], dAtA43[:j42])
}
if len(m.Error) > 0 {
dAtA[i] = 0x42
@@ -5338,18 +5428,18 @@ func (m *Payload) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x4a
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Lease.Size()))
- n43, err := m.Lease.MarshalTo(dAtA[i:])
+ n44, err := m.Lease.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n43
+ i += n44
}
if m.Details != nil {
- nn44, err := m.Details.MarshalTo(dAtA[i:])
+ nn45, err := m.Details.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += nn44
+ i += nn45
}
if len(m.Statement) > 0 {
dAtA[i] = 0x82
@@ -5393,11 +5483,11 @@ func (m *Payload) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintJobs(dAtA, i, uint64(m.FinalResumeError.Size()))
- n45, err := m.FinalResumeError.MarshalTo(dAtA[i:])
+ n46, err := m.FinalResumeError.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n45
+ i += n46
}
if m.Noncancelable {
dAtA[i] = 0xa0
@@ -5420,11 +5510,11 @@ func (m *Payload_Backup) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x52
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Backup.Size()))
- n46, err := m.Backup.MarshalTo(dAtA[i:])
+ n47, err := m.Backup.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n46
+ i += n47
}
return i, nil
}
@@ -5434,11 +5524,11 @@ func (m *Payload_Restore) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x5a
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Restore.Size()))
- n47, err := m.Restore.MarshalTo(dAtA[i:])
+ n48, err := m.Restore.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n47
+ i += n48
}
return i, nil
}
@@ -5448,11 +5538,11 @@ func (m *Payload_SchemaChange) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x62
i++
i = encodeVarintJobs(dAtA, i, uint64(m.SchemaChange.Size()))
- n48, err := m.SchemaChange.MarshalTo(dAtA[i:])
+ n49, err := m.SchemaChange.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n48
+ i += n49
}
return i, nil
}
@@ -5462,11 +5552,11 @@ func (m *Payload_Import) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x6a
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Import.Size()))
- n49, err := m.Import.MarshalTo(dAtA[i:])
+ n50, err := m.Import.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n49
+ i += n50
}
return i, nil
}
@@ -5476,11 +5566,11 @@ func (m *Payload_Changefeed) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x72
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Changefeed.Size()))
- n50, err := m.Changefeed.MarshalTo(dAtA[i:])
+ n51, err := m.Changefeed.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n50
+ i += n51
}
return i, nil
}
@@ -5490,11 +5580,11 @@ func (m *Payload_CreateStats) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x7a
i++
i = encodeVarintJobs(dAtA, i, uint64(m.CreateStats.Size()))
- n51, err := m.CreateStats.MarshalTo(dAtA[i:])
+ n52, err := m.CreateStats.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n51
+ i += n52
}
return i, nil
}
@@ -5506,11 +5596,11 @@ func (m *Payload_SchemaChangeGC) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintJobs(dAtA, i, uint64(m.SchemaChangeGC.Size()))
- n52, err := m.SchemaChangeGC.MarshalTo(dAtA[i:])
+ n53, err := m.SchemaChangeGC.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n52
+ i += n53
}
return i, nil
}
@@ -5522,11 +5612,11 @@ func (m *Payload_TypeSchemaChange) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintJobs(dAtA, i, uint64(m.TypeSchemaChange.Size()))
- n53, err := m.TypeSchemaChange.MarshalTo(dAtA[i:])
+ n54, err := m.TypeSchemaChange.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n53
+ i += n54
}
return i, nil
}
@@ -5538,11 +5628,11 @@ func (m *Payload_StreamIngestion) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintJobs(dAtA, i, uint64(m.StreamIngestion.Size()))
- n54, err := m.StreamIngestion.MarshalTo(dAtA[i:])
+ n55, err := m.StreamIngestion.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n54
+ i += n55
}
return i, nil
}
@@ -5554,11 +5644,11 @@ func (m *Payload_NewSchemaChange) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintJobs(dAtA, i, uint64(m.NewSchemaChange.Size()))
- n55, err := m.NewSchemaChange.MarshalTo(dAtA[i:])
+ n56, err := m.NewSchemaChange.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n55
+ i += n56
}
return i, nil
}
@@ -5570,11 +5660,11 @@ func (m *Payload_Migration) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Migration.Size()))
- n56, err := m.Migration.MarshalTo(dAtA[i:])
+ n57, err := m.Migration.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n56
+ i += n57
}
return i, nil
}
@@ -5594,11 +5684,11 @@ func (m *Progress) MarshalTo(dAtA []byte) (int, error) {
var l int
_ = l
if m.Progress != nil {
- nn57, err := m.Progress.MarshalTo(dAtA[i:])
+ nn58, err := m.Progress.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += nn57
+ i += nn58
}
if m.ModifiedMicros != 0 {
dAtA[i] = 0x10
@@ -5612,11 +5702,11 @@ func (m *Progress) MarshalTo(dAtA []byte) (int, error) {
i += copy(dAtA[i:], m.RunningStatus)
}
if m.Details != nil {
- nn58, err := m.Details.MarshalTo(dAtA[i:])
+ nn59, err := m.Details.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += nn58
+ i += nn59
}
return i, nil
}
@@ -5635,11 +5725,11 @@ func (m *Progress_HighWater) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintJobs(dAtA, i, uint64(m.HighWater.Size()))
- n59, err := m.HighWater.MarshalTo(dAtA[i:])
+ n60, err := m.HighWater.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n59
+ i += n60
}
return i, nil
}
@@ -5649,11 +5739,11 @@ func (m *Progress_Backup) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x52
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Backup.Size()))
- n60, err := m.Backup.MarshalTo(dAtA[i:])
+ n61, err := m.Backup.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n60
+ i += n61
}
return i, nil
}
@@ -5663,11 +5753,11 @@ func (m *Progress_Restore) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x5a
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Restore.Size()))
- n61, err := m.Restore.MarshalTo(dAtA[i:])
+ n62, err := m.Restore.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n61
+ i += n62
}
return i, nil
}
@@ -5677,11 +5767,11 @@ func (m *Progress_SchemaChange) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x62
i++
i = encodeVarintJobs(dAtA, i, uint64(m.SchemaChange.Size()))
- n62, err := m.SchemaChange.MarshalTo(dAtA[i:])
+ n63, err := m.SchemaChange.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n62
+ i += n63
}
return i, nil
}
@@ -5691,11 +5781,11 @@ func (m *Progress_Import) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x6a
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Import.Size()))
- n63, err := m.Import.MarshalTo(dAtA[i:])
+ n64, err := m.Import.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n63
+ i += n64
}
return i, nil
}
@@ -5705,11 +5795,11 @@ func (m *Progress_Changefeed) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x72
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Changefeed.Size()))
- n64, err := m.Changefeed.MarshalTo(dAtA[i:])
+ n65, err := m.Changefeed.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n64
+ i += n65
}
return i, nil
}
@@ -5719,11 +5809,11 @@ func (m *Progress_CreateStats) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x7a
i++
i = encodeVarintJobs(dAtA, i, uint64(m.CreateStats.Size()))
- n65, err := m.CreateStats.MarshalTo(dAtA[i:])
+ n66, err := m.CreateStats.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n65
+ i += n66
}
return i, nil
}
@@ -5735,11 +5825,11 @@ func (m *Progress_SchemaChangeGC) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintJobs(dAtA, i, uint64(m.SchemaChangeGC.Size()))
- n66, err := m.SchemaChangeGC.MarshalTo(dAtA[i:])
+ n67, err := m.SchemaChangeGC.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n66
+ i += n67
}
return i, nil
}
@@ -5751,11 +5841,11 @@ func (m *Progress_TypeSchemaChange) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintJobs(dAtA, i, uint64(m.TypeSchemaChange.Size()))
- n67, err := m.TypeSchemaChange.MarshalTo(dAtA[i:])
+ n68, err := m.TypeSchemaChange.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n67
+ i += n68
}
return i, nil
}
@@ -5767,11 +5857,11 @@ func (m *Progress_StreamIngest) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintJobs(dAtA, i, uint64(m.StreamIngest.Size()))
- n68, err := m.StreamIngest.MarshalTo(dAtA[i:])
+ n69, err := m.StreamIngest.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n68
+ i += n69
}
return i, nil
}
@@ -5783,11 +5873,11 @@ func (m *Progress_NewSchemaChange) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintJobs(dAtA, i, uint64(m.NewSchemaChange.Size()))
- n69, err := m.NewSchemaChange.MarshalTo(dAtA[i:])
+ n70, err := m.NewSchemaChange.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n69
+ i += n70
}
return i, nil
}
@@ -5799,11 +5889,11 @@ func (m *Progress_Migration) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Migration.Size()))
- n70, err := m.Migration.MarshalTo(dAtA[i:])
+ n71, err := m.Migration.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n70
+ i += n71
}
return i, nil
}
@@ -5831,21 +5921,21 @@ func (m *Job) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Progress.Size()))
- n71, err := m.Progress.MarshalTo(dAtA[i:])
+ n72, err := m.Progress.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n71
+ i += n72
}
if m.Payload != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintJobs(dAtA, i, uint64(m.Payload.Size()))
- n72, err := m.Payload.MarshalTo(dAtA[i:])
+ n73, err := m.Payload.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n72
+ i += n73
}
return i, nil
}
@@ -6224,6 +6314,15 @@ func (m *ImportDetails) Size() (n int) {
l = m.ProtectedTimestampRecord.Size()
n += 2 + l + sovJobs(uint64(l))
}
+ if len(m.Schemas) > 0 {
+ for _, e := range m.Schemas {
+ l = e.Size()
+ n += 2 + l + sovJobs(uint64(l))
+ }
+ }
+ if m.SchemasPublished {
+ n += 3
+ }
return n
}
@@ -6259,6 +6358,19 @@ func (m *ImportDetails_Table) Size() (n int) {
return n
}
+func (m *ImportDetails_Schema) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Desc != nil {
+ l = m.Desc.Size()
+ n += 1 + l + sovJobs(uint64(l))
+ }
+ return n
+}
+
func (m *SequenceValChunk) Size() (n int) {
if m == nil {
return 0
@@ -9877,6 +9989,57 @@ func (m *ImportDetails) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 23:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Schemas", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowJobs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthJobs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Schemas = append(m.Schemas, ImportDetails_Schema{})
+ if err := m.Schemas[len(m.Schemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 24:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemasPublished", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowJobs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.SchemasPublished = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipJobs(dAtA[iNdEx:])
@@ -10098,6 +10261,89 @@ func (m *ImportDetails_Table) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *ImportDetails_Schema) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowJobs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Schema: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Schema: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Desc", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowJobs
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthJobs
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Desc == nil {
+ m.Desc = &descpb.SchemaDescriptor{}
+ }
+ if err := m.Desc.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipJobs(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthJobs
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *SequenceValChunk) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -15490,319 +15736,322 @@ var (
ErrIntOverflowJobs = fmt.Errorf("proto: integer overflow")
)
-func init() { proto.RegisterFile("jobs/jobspb/jobs.proto", fileDescriptor_jobs_2bb17fe81255384c) }
-
-var fileDescriptor_jobs_2bb17fe81255384c = []byte{
- // 4967 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x7b, 0x4b, 0x6c, 0x23, 0x47,
- 0x7a, 0xbf, 0x9a, 0xa4, 0xc8, 0xe6, 0xc7, 0x87, 0x9a, 0x25, 0xcd, 0x0c, 0xcd, 0xb5, 0x87, 0x5a,
- 0xae, 0xed, 0x79, 0x78, 0x4d, 0xed, 0xca, 0xfb, 0xf2, 0xfc, 0xed, 0xb1, 0xf9, 0x92, 0x44, 0x69,
- 0xf4, 0x98, 0xa6, 0x34, 0x7e, 0xec, 0xdf, 0xdb, 0x69, 0x76, 0x97, 0xa4, 0x8e, 0xc8, 0x6e, 0x4e,
- 0x57, 0x73, 0x66, 0xb4, 0x01, 0x92, 0x60, 0x83, 0x00, 0x8b, 0x39, 0x25, 0x40, 0x36, 0x97, 0x64,
- 0x80, 0x00, 0xd9, 0x05, 0x72, 0xc8, 0x25, 0x8b, 0x20, 0xc9, 0x21, 0xb7, 0x5c, 0x7c, 0x48, 0x80,
- 0xbd, 0x04, 0x30, 0x72, 0xe0, 0x26, 0xf2, 0x25, 0xc7, 0x20, 0x01, 0x82, 0xc5, 0x5c, 0x12, 0xd4,
- 0xa3, 0x9b, 0x4d, 0xea, 0x45, 0x8d, 0xec, 0xcd, 0x45, 0xea, 0xfa, 0xea, 0xab, 0x5f, 0xbd, 0xbe,
- 0xfa, 0x5e, 0x55, 0x84, 0xab, 0xbf, 0xe9, 0xb4, 0xc9, 0x02, 0xfd, 0xd3, 0x6b, 0xb3, 0x7f, 0xe5,
- 0x9e, 0xeb, 0x78, 0x0e, 0x7a, 0xc9, 0x70, 0x8c, 0x03, 0xd7, 0xd1, 0x8d, 0xfd, 0x32, 0x79, 0xd8,
- 0x29, 0xb3, 0x1a, 0xce, 0x55, 0xb8, 0x82, 0x5d, 0xd7, 0x71, 0x29, 0x3f, 0xff, 0xe0, 0x2d, 0x0a,
- 0x73, 0x7b, 0xce, 0x9e, 0xc3, 0x3e, 0x17, 0xe8, 0x97, 0xa0, 0x22, 0x86, 0xd1, 0x6b, 0x2f, 0x98,
- 0xba, 0xa7, 0x0b, 0x5a, 0xde, 0xa7, 0x59, 0xce, 0x9b, 0xbb, 0x8e, 0xdb, 0xd5, 0x3d, 0x1f, 0xe3,
- 0x6b, 0xe4, 0x61, 0x67, 0xc1, 0xd0, 0x3d, 0xbd, 0xe3, 0xec, 0x2d, 0x98, 0x98, 0x18, 0xbd, 0xf6,
- 0x02, 0xf1, 0xdc, 0xbe, 0xe1, 0xf5, 0x5d, 0x6c, 0x0a, 0xa6, 0xe2, 0x09, 0x4c, 0x1e, 0xb6, 0x75,
- 0xdb, 0xf3, 0xf1, 0xfb, 0x9e, 0xd5, 0x59, 0xd8, 0xef, 0x18, 0x0b, 0x9e, 0xd5, 0xc5, 0xc4, 0xd3,
- 0xbb, 0x3d, 0x51, 0xf3, 0x55, 0xda, 0x94, 0x18, 0xfb, 0xb8, 0xab, 0x1b, 0xfb, 0xba, 0xbd, 0x87,
- 0xdd, 0x05, 0xde, 0x87, 0xd1, 0x6b, 0x0b, 0x96, 0x57, 0x8d, 0x4e, 0x9f, 0x78, 0xd8, 0x7d, 0x84,
- 0x5d, 0x62, 0x39, 0xf6, 0x82, 0x28, 0x6a, 0xa2, 0xcc, 0xb9, 0x4a, 0xbf, 0x03, 0xd3, 0xf7, 0xb0,
- 0x4e, 0x30, 0xfa, 0x18, 0x12, 0xb6, 0x63, 0x62, 0xcd, 0x32, 0xf3, 0xd2, 0xbc, 0x74, 0x33, 0x53,
- 0xad, 0x1c, 0x0d, 0x8a, 0xf1, 0x0d, 0xc7, 0xc4, 0xcd, 0xfa, 0xf3, 0x41, 0xf1, 0xad, 0x3d, 0xcb,
- 0xdb, 0xef, 0xb7, 0xcb, 0x86, 0xd3, 0x5d, 0x08, 0x56, 0xd4, 0x6c, 0x0f, 0xbf, 0x17, 0x7a, 0x07,
- 0x7b, 0x0b, 0x62, 0x3d, 0xca, 0xbc, 0x99, 0x1a, 0xa7, 0x88, 0x4d, 0x13, 0xcd, 0xc1, 0x34, 0xee,
- 0x39, 0xc6, 0x7e, 0x3e, 0x32, 0x2f, 0xdd, 0x8c, 0xaa, 0xbc, 0x70, 0x27, 0xf6, 0xef, 0x7f, 0x56,
- 0x94, 0x4a, 0x3f, 0x8b, 0xc0, 0xb5, 0xaa, 0x6e, 0x1c, 0xf4, 0x7b, 0x0d, 0xdb, 0x70, 0x0f, 0x7b,
- 0x9e, 0xe5, 0xd8, 0x9b, 0xec, 0x2f, 0x41, 0x0a, 0x44, 0x0f, 0xf0, 0x21, 0x1b, 0x4f, 0x5a, 0xa5,
- 0x9f, 0xe8, 0x5d, 0x88, 0x75, 0x1d, 0x13, 0x33, 0xa0, 0xec, 0xe2, 0xad, 0xf2, 0xa9, 0x9b, 0x5b,
- 0x1e, 0xa2, 0xad, 0x3b, 0x26, 0x56, 0x59, 0x33, 0xd4, 0x06, 0xf9, 0xa0, 0x4b, 0x34, 0xcb, 0xde,
- 0x75, 0xf2, 0xd1, 0x79, 0xe9, 0x66, 0x6a, 0xf1, 0xce, 0x19, 0x10, 0xa7, 0x0c, 0xab, 0xbc, 0xb6,
- 0xde, 0x6a, 0xda, 0xbb, 0x4e, 0x35, 0x75, 0x34, 0x28, 0x26, 0x44, 0x41, 0x4d, 0x1c, 0x74, 0x09,
- 0xfd, 0x28, 0x6c, 0x82, 0x4f, 0xa3, 0xe3, 0xef, 0xbb, 0x16, 0x1b, 0x7f, 0x52, 0xa5, 0x9f, 0xe8,
- 0xeb, 0x80, 0x30, 0xc7, 0xc3, 0xa6, 0x46, 0x25, 0x49, 0xa3, 0x13, 0x8c, 0xb0, 0x09, 0x2a, 0x41,
- 0x4d, 0x5d, 0xf7, 0xf4, 0x35, 0x7c, 0xc8, 0x57, 0x48, 0xac, 0xd3, 0xef, 0x46, 0x21, 0x3b, 0x1c,
- 0x0a, 0x83, 0x5f, 0x81, 0x38, 0x13, 0x01, 0xcc, 0x7a, 0xc8, 0x2e, 0x7e, 0x63, 0xa2, 0xe5, 0xa0,
- 0x4d, 0xcb, 0x2d, 0xd6, 0x4e, 0x15, 0xed, 0x11, 0x82, 0x18, 0xd1, 0x3b, 0x9e, 0x18, 0x08, 0xfb,
- 0x46, 0x7f, 0x22, 0xc1, 0xfc, 0xf8, 0x88, 0xaa, 0x87, 0x6b, 0xeb, 0xad, 0x75, 0x9d, 0xca, 0xd1,
- 0x1a, 0x3e, 0x6c, 0xd6, 0xf3, 0xd1, 0xf9, 0xe8, 0xcd, 0xd4, 0xe2, 0xe6, 0xe4, 0x1d, 0x37, 0xce,
- 0x41, 0x6c, 0xd8, 0x9e, 0x7b, 0xa8, 0x9e, 0xdb, 0x71, 0xa1, 0x05, 0xaf, 0x4d, 0x04, 0x15, 0x96,
- 0xa1, 0x24, 0x97, 0xa1, 0x39, 0x98, 0x7e, 0xa4, 0x77, 0xfa, 0x58, 0xcc, 0x96, 0x17, 0xee, 0x44,
- 0xbe, 0x27, 0x95, 0xae, 0x41, 0x9c, 0x2f, 0x0c, 0xca, 0x40, 0xb2, 0xd2, 0x68, 0x2d, 0x7e, 0xfb,
- 0x3b, 0xcb, 0xb5, 0x75, 0x65, 0x4a, 0x6c, 0xc1, 0xff, 0x48, 0x70, 0xb5, 0xe5, 0xb9, 0x58, 0xef,
- 0x36, 0xed, 0x3d, 0x4c, 0xe8, 0x9c, 0xea, 0xd8, 0xd3, 0xad, 0x0e, 0x41, 0x36, 0x64, 0x09, 0xab,
- 0xd1, 0x74, 0xd3, 0x74, 0x31, 0x21, 0xbc, 0xc3, 0xea, 0xf2, 0xf3, 0x41, 0xb1, 0x36, 0xd1, 0xd1,
- 0x31, 0x8c, 0xce, 0x02, 0x87, 0xb0, 0xec, 0x3d, 0xc3, 0xe8, 0x94, 0x79, 0x4f, 0x15, 0x0e, 0xa7,
- 0x66, 0x48, 0xb8, 0x88, 0xbe, 0x09, 0x31, 0xd2, 0xd3, 0x6d, 0x36, 0x85, 0xd4, 0xe2, 0xb5, 0xd0,
- 0xfa, 0xfb, 0x47, 0xb0, 0xd5, 0xd3, 0xed, 0x6a, 0xec, 0xd3, 0x41, 0x71, 0x4a, 0x65, 0xac, 0xa8,
- 0x0a, 0x40, 0x3c, 0xdd, 0xf5, 0x34, 0xaa, 0x4b, 0x84, 0xf4, 0xbf, 0x12, 0x6a, 0x48, 0x75, 0x4d,
- 0x79, 0xbf, 0x63, 0x94, 0xb7, 0x7d, 0x5d, 0x23, 0x9a, 0x27, 0x59, 0x33, 0x4a, 0x2d, 0xe9, 0x70,
- 0x6d, 0x6c, 0x01, 0xb6, 0x5c, 0x67, 0x8f, 0x8d, 0x68, 0x09, 0xd2, 0x46, 0xdf, 0x73, 0x1e, 0x61,
- 0x97, 0x77, 0x20, 0x4d, 0xde, 0x41, 0x4a, 0x34, 0x64, 0x5d, 0xfc, 0x7d, 0x1c, 0x32, 0xfc, 0xe0,
- 0xf9, 0x6b, 0x3b, 0x3a, 0x70, 0xe9, 0x45, 0x06, 0x8e, 0xee, 0x82, 0x8c, 0x6d, 0x93, 0x23, 0x44,
- 0x26, 0x47, 0x48, 0x60, 0xdb, 0x64, 0xed, 0x5f, 0xe2, 0x27, 0x39, 0xca, 0x36, 0x35, 0x71, 0x34,
- 0x28, 0x46, 0x77, 0xd4, 0x26, 0x3f, 0xd2, 0xef, 0x40, 0xc1, 0xc4, 0x3d, 0x17, 0x1b, 0x3a, 0x3d,
- 0xd3, 0x6d, 0x36, 0x74, 0xad, 0xab, 0xdb, 0xd6, 0x2e, 0x26, 0x5e, 0x3e, 0xc6, 0x64, 0x2c, 0x3f,
- 0xe4, 0xe0, 0x73, 0x5b, 0x17, 0xf5, 0xe8, 0xf7, 0x24, 0x98, 0xed, 0xbb, 0x16, 0xd1, 0xda, 0x87,
- 0x5a, 0xc7, 0x31, 0xf4, 0x8e, 0xe5, 0x1d, 0x6a, 0x07, 0x8f, 0xf2, 0xd3, 0xec, 0x60, 0xdd, 0x3d,
- 0x57, 0x3b, 0x89, 0x45, 0x2a, 0xef, 0xb8, 0x16, 0xa9, 0x1e, 0xde, 0x13, 0x08, 0x6b, 0x8f, 0x98,
- 0xf0, 0x57, 0xe7, 0x8e, 0x06, 0x45, 0x65, 0x47, 0x6d, 0x86, 0xab, 0x1e, 0xa8, 0x4a, 0x7f, 0x8c,
- 0x19, 0xe9, 0x81, 0x5a, 0xb2, 0x1c, 0x5b, 0x73, 0xb8, 0x9e, 0xcb, 0xc7, 0xd9, 0x42, 0x2d, 0x5e,
- 0x5c, 0x43, 0xaa, 0x39, 0x7c, 0x4c, 0x97, 0xff, 0xa1, 0x04, 0x05, 0x6a, 0x72, 0xb0, 0x41, 0x97,
- 0x29, 0xb0, 0x67, 0x9a, 0x8b, 0x0d, 0xc7, 0x35, 0xf3, 0x09, 0xba, 0x4e, 0xd5, 0xd6, 0xbf, 0x4c,
- 0x6a, 0x69, 0x98, 0x65, 0xec, 0xf7, 0x2d, 0xb3, 0xbc, 0xb3, 0xd3, 0xac, 0x1f, 0x0d, 0x8a, 0xf9,
- 0x2d, 0x1f, 0x3c, 0xd8, 0x44, 0x95, 0x41, 0xab, 0xf9, 0xde, 0x29, 0x35, 0xe8, 0x7b, 0x90, 0x35,
- 0x9c, 0x4e, 0x07, 0x1b, 0x6c, 0xda, 0x3b, 0x6a, 0x33, 0x2f, 0xb3, 0x0d, 0xce, 0x1d, 0x0d, 0x8a,
- 0x99, 0x5a, 0x50, 0x43, 0xb7, 0x3a, 0x63, 0x84, 0x8b, 0x48, 0x85, 0x99, 0xd0, 0x82, 0x31, 0x7b,
- 0x92, 0x64, 0xab, 0x75, 0x6b, 0x62, 0x55, 0xa8, 0x66, 0xf1, 0x48, 0xb9, 0x50, 0x83, 0x2b, 0x27,
- 0xee, 0xe2, 0x79, 0x2a, 0x2c, 0x19, 0x56, 0x61, 0x0a, 0x64, 0xf9, 0xa6, 0xf8, 0x07, 0xb3, 0xf4,
- 0xdf, 0x33, 0x90, 0x55, 0x31, 0xf1, 0x1c, 0x17, 0xfb, 0x27, 0xea, 0xe7, 0x12, 0xcc, 0x52, 0x7f,
- 0xc3, 0xb5, 0x7a, 0x9e, 0xe3, 0x6a, 0x2e, 0x7e, 0xec, 0x5a, 0x1e, 0x26, 0xf9, 0x08, 0x13, 0xba,
- 0xca, 0x19, 0x53, 0x18, 0x05, 0x2a, 0xd7, 0x03, 0x10, 0x55, 0x60, 0x70, 0xb9, 0xbb, 0xfb, 0xa3,
- 0x5f, 0x16, 0xef, 0x4c, 0xb4, 0x8f, 0xc7, 0x5d, 0xa0, 0x72, 0xb3, 0xae, 0x22, 0xf3, 0x18, 0x30,
- 0x7a, 0x19, 0x62, 0x54, 0x6e, 0x99, 0xc9, 0x49, 0x56, 0xe5, 0xa3, 0x41, 0x31, 0x46, 0x25, 0x5b,
- 0x65, 0xd4, 0x91, 0x03, 0x1e, 0x7b, 0x81, 0x03, 0xbe, 0x0c, 0x29, 0x4f, 0x6f, 0x77, 0xb0, 0x46,
- 0x7b, 0x26, 0xe2, 0xf8, 0xbd, 0x3e, 0xb6, 0x12, 0xe4, 0x61, 0xa7, 0xad, 0x13, 0x5c, 0xde, 0xa6,
- 0x9c, 0xa1, 0xb9, 0x83, 0xe7, 0x13, 0x08, 0x5a, 0x80, 0x14, 0xd5, 0x65, 0xae, 0x65, 0x62, 0xcd,
- 0x6c, 0xb3, 0x33, 0x94, 0xac, 0x66, 0x8f, 0x06, 0x45, 0xd8, 0x14, 0xe4, 0x7a, 0x55, 0x05, 0x9f,
- 0xa5, 0xde, 0x46, 0x1e, 0xcc, 0x09, 0xa5, 0x11, 0x9c, 0x7f, 0x26, 0x4f, 0x09, 0x36, 0x84, 0x77,
- 0x26, 0xdf, 0x0c, 0xbe, 0xef, 0xbe, 0xf0, 0x30, 0x0f, 0x85, 0x4f, 0x12, 0xb5, 0x8f, 0xd5, 0xa0,
- 0x37, 0x20, 0xd7, 0x73, 0x71, 0x4f, 0x77, 0xb1, 0x66, 0x38, 0xdd, 0x5e, 0x07, 0x7b, 0xd8, 0x64,
- 0xd2, 0x2f, 0xab, 0x8a, 0xa8, 0xa8, 0xf9, 0x74, 0xf4, 0x1a, 0xb5, 0x6e, 0xba, 0x47, 0x1d, 0x27,
- 0x82, 0x5d, 0xca, 0x99, 0x64, 0x9c, 0x19, 0x46, 0x6d, 0x0a, 0x22, 0x7a, 0x0b, 0xae, 0x0c, 0xf7,
- 0x8d, 0x68, 0xbd, 0x7e, 0xbb, 0x63, 0x91, 0x7d, 0x6c, 0xe6, 0x81, 0x71, 0xcf, 0x85, 0x2a, 0xb7,
- 0xfc, 0x3a, 0x74, 0x38, 0x22, 0x8a, 0x06, 0x5d, 0x18, 0x7d, 0x0f, 0xe7, 0x53, 0xf3, 0xd2, 0xcd,
- 0xe9, 0xea, 0xca, 0xf3, 0x41, 0xb1, 0x3e, 0xb1, 0x1c, 0x11, 0xdc, 0x5d, 0xf0, 0x5c, 0x8c, 0x43,
- 0x62, 0x59, 0x13, 0x78, 0x61, 0x89, 0xf2, 0x69, 0x48, 0x05, 0x18, 0x1e, 0xc1, 0x7c, 0xfa, 0x85,
- 0xb5, 0x5d, 0x08, 0x05, 0x55, 0x20, 0xc1, 0x5d, 0x78, 0x92, 0xcf, 0xb0, 0x0d, 0xfc, 0xea, 0x69,
- 0x32, 0xc4, 0xb8, 0x42, 0xbb, 0xe4, 0xb7, 0x43, 0x75, 0x00, 0xef, 0xb0, 0xe7, 0x4b, 0x62, 0x96,
- 0xa1, 0xbc, 0x76, 0x1a, 0xca, 0x61, 0x2f, 0x2c, 0x88, 0x49, 0x4f, 0x94, 0x09, 0x5a, 0x85, 0x34,
- 0x8f, 0x0f, 0x04, 0xce, 0x0c, 0xc3, 0xb9, 0x71, 0x0a, 0x0e, 0x73, 0x7b, 0xf4, 0x10, 0x52, 0x8a,
- 0x04, 0x14, 0x82, 0xb6, 0x20, 0x4b, 0x7d, 0x55, 0xca, 0x29, 0xd0, 0x14, 0x86, 0x76, 0xeb, 0x14,
- 0xb4, 0xba, 0x60, 0x0e, 0xe1, 0x65, 0xcc, 0x10, 0x8d, 0xa0, 0x43, 0xb8, 0x4a, 0x0e, 0x89, 0x87,
- 0xbb, 0x1a, 0x3b, 0x3a, 0x44, 0x73, 0xb9, 0x2c, 0x9b, 0xf9, 0x1c, 0x43, 0xae, 0x4d, 0x2e, 0xf6,
- 0x2d, 0x86, 0xc3, 0x8e, 0x24, 0x11, 0x55, 0x26, 0xf7, 0x22, 0xe7, 0xc8, 0x09, 0x55, 0x85, 0xff,
- 0x92, 0x20, 0x77, 0x4c, 0x6f, 0xa1, 0x6d, 0x88, 0x04, 0x91, 0x0f, 0x35, 0x27, 0x11, 0x16, 0xf5,
- 0x5c, 0x46, 0x87, 0x45, 0x2c, 0x13, 0xed, 0x41, 0x92, 0x9e, 0x24, 0xdb, 0xa3, 0x61, 0x55, 0x84,
- 0x81, 0xaf, 0x1e, 0x0d, 0x8a, 0xf2, 0x16, 0x23, 0x5e, 0xba, 0x0b, 0x99, 0x83, 0x37, 0x4d, 0x54,
- 0x84, 0x94, 0xe7, 0x68, 0xf8, 0x89, 0x45, 0x3c, 0xcb, 0xde, 0x63, 0x7e, 0x8a, 0xac, 0x82, 0xe7,
- 0x34, 0x04, 0xa5, 0xf0, 0xa7, 0x11, 0x40, 0xc7, 0x15, 0x04, 0xfa, 0x3b, 0x09, 0x5e, 0xf6, 0xdd,
- 0x0f, 0xc7, 0xb5, 0xf6, 0x2c, 0x5b, 0xef, 0x8c, 0xf8, 0x21, 0x12, 0xdb, 0x8e, 0x8f, 0x2f, 0xa3,
- 0x85, 0x84, 0x6f, 0xb2, 0x29, 0xe0, 0xc7, 0x7d, 0x94, 0x97, 0xa9, 0xf1, 0xe6, 0x3e, 0xca, 0x31,
- 0x96, 0x07, 0x6a, 0xbe, 0x7f, 0x4a, 0xe3, 0xc2, 0x1a, 0xbc, 0x72, 0x26, 0xf0, 0x45, 0xcc, 0x66,
- 0xe1, 0x47, 0x12, 0x5c, 0x3b, 0xc5, 0x98, 0x85, 0x71, 0x32, 0x1c, 0xe7, 0x7e, 0x18, 0x27, 0xb5,
- 0xf8, 0xff, 0x2e, 0x61, 0x30, 0xc3, 0x83, 0x58, 0x86, 0x97, 0x4e, 0x15, 0xe6, 0xf3, 0x66, 0x23,
- 0x87, 0x80, 0x56, 0x63, 0xb2, 0xa4, 0x44, 0x4a, 0xdf, 0x80, 0x19, 0x01, 0x11, 0x38, 0xe9, 0xaf,
- 0x00, 0xec, 0x5b, 0x7b, 0xfb, 0xda, 0x63, 0xdd, 0xc3, 0xae, 0x88, 0xab, 0x93, 0x94, 0xf2, 0x01,
- 0x25, 0x94, 0xfe, 0x59, 0x86, 0x4c, 0xb3, 0xdb, 0x73, 0x5c, 0xcf, 0xf7, 0x14, 0xee, 0x41, 0x9c,
- 0x1f, 0x50, 0x21, 0x08, 0xe5, 0x33, 0xa6, 0x3a, 0xd2, 0x92, 0xdb, 0x48, 0xa1, 0xda, 0x04, 0x46,
- 0x60, 0xc2, 0x23, 0x27, 0x9a, 0xf0, 0x77, 0x21, 0xce, 0x93, 0x28, 0x22, 0x38, 0x29, 0x9e, 0x10,
- 0xd5, 0x34, 0x37, 0x97, 0xac, 0x0e, 0x5e, 0x62, 0x6c, 0x3e, 0x38, 0x6f, 0x84, 0x5e, 0x07, 0x99,
- 0x10, 0x4f, 0x23, 0xd6, 0x0f, 0xb9, 0x07, 0x10, 0xe5, 0xf1, 0x79, 0xab, 0xb5, 0xdd, 0xb2, 0x7e,
- 0x88, 0xd5, 0x04, 0x21, 0x1e, 0xfd, 0x40, 0x05, 0x90, 0x1f, 0xeb, 0x9d, 0x0e, 0xf3, 0x14, 0xa6,
- 0x59, 0x3e, 0x22, 0x28, 0x8f, 0x9e, 0xd7, 0xf8, 0x97, 0x7b, 0x5e, 0x85, 0xd1, 0xef, 0xe9, 0xde,
- 0x3e, 0xf3, 0x7e, 0x93, 0x2a, 0x70, 0xd2, 0x96, 0xee, 0xed, 0xa3, 0x3c, 0x24, 0x88, 0x4e, 0xed,
- 0x2f, 0xc9, 0xcb, 0xf3, 0xd1, 0x9b, 0x69, 0xd5, 0x2f, 0xa2, 0xeb, 0xc0, 0xbc, 0x07, 0x5e, 0x64,
- 0x86, 0x38, 0xaa, 0x86, 0x28, 0x6c, 0x1d, 0x0e, 0xac, 0x9e, 0xb6, 0x7b, 0x40, 0xb8, 0xe1, 0x15,
- 0xeb, 0x70, 0x60, 0xf5, 0x96, 0xd6, 0x88, 0x9a, 0xa0, 0x95, 0x4b, 0x07, 0x04, 0xdd, 0x80, 0x19,
- 0x8b, 0x45, 0x71, 0x9a, 0x69, 0xb9, 0xd8, 0xf0, 0x3a, 0x87, 0xcc, 0xe8, 0xca, 0x6a, 0x96, 0x93,
- 0xeb, 0x82, 0x8a, 0x6e, 0x81, 0x32, 0xee, 0x2a, 0x30, 0x63, 0x29, 0xab, 0x33, 0x63, 0x9e, 0x02,
- 0x65, 0x15, 0xfa, 0x7c, 0x68, 0xfc, 0x33, 0x9c, 0x95, 0xd3, 0x87, 0x76, 0xbf, 0x0c, 0xb3, 0x3d,
- 0xdd, 0x25, 0x58, 0x6b, 0xf7, 0x6d, 0xb3, 0x83, 0x35, 0x6e, 0x6f, 0xf2, 0x59, 0xc6, 0x9d, 0x63,
- 0x55, 0x55, 0x56, 0xc3, 0x4d, 0xd3, 0x79, 0xf1, 0xc3, 0xd5, 0xff, 0x83, 0xf8, 0xa1, 0xf0, 0xb3,
- 0x08, 0x4c, 0x33, 0x39, 0x47, 0x77, 0x20, 0x46, 0xb7, 0x59, 0x44, 0xa7, 0x93, 0xfa, 0x8d, 0xac,
- 0x0d, 0x42, 0x10, 0xb3, 0xf5, 0x2e, 0xce, 0x23, 0x26, 0x04, 0xec, 0x1b, 0x5d, 0x83, 0x04, 0xc1,
- 0x0f, 0xb5, 0x47, 0x7a, 0x27, 0x3f, 0xcb, 0x76, 0x38, 0x4e, 0xf0, 0xc3, 0x07, 0x7a, 0x07, 0x5d,
- 0x81, 0xb8, 0x45, 0x34, 0x1b, 0x3f, 0xce, 0xcf, 0xf1, 0x53, 0x6f, 0x91, 0x0d, 0xfc, 0x98, 0xe9,
- 0x7f, 0xdd, 0xdd, 0xc3, 0x9e, 0x66, 0x38, 0x1d, 0x92, 0xbf, 0x42, 0x0f, 0x18, 0x75, 0x4b, 0x29,
- 0xa9, 0xe6, 0x74, 0x08, 0xfa, 0x0a, 0x24, 0x1f, 0xeb, 0x44, 0xc3, 0xdd, 0x9e, 0x77, 0xc8, 0x16,
- 0x4b, 0xa6, 0x62, 0x4f, 0x1a, 0xb4, 0xbc, 0x1a, 0x93, 0x23, 0x4a, 0x74, 0x35, 0x26, 0x47, 0x95,
- 0xd8, 0x6a, 0x4c, 0x8e, 0x29, 0xd3, 0xab, 0x31, 0x79, 0x5a, 0x89, 0xaf, 0xc6, 0xe4, 0xb8, 0x92,
- 0x58, 0x8d, 0xc9, 0x09, 0x45, 0x5e, 0x8d, 0xc9, 0xb2, 0x92, 0x5c, 0x8d, 0xc9, 0x49, 0x05, 0x56,
- 0x63, 0x32, 0x28, 0xa9, 0xd5, 0x98, 0x9c, 0x52, 0xd2, 0xab, 0x31, 0x39, 0xad, 0x64, 0x56, 0x63,
- 0x72, 0x46, 0xc9, 0xae, 0xc6, 0xe4, 0xac, 0x32, 0xb3, 0x1a, 0x93, 0x67, 0x14, 0x65, 0x35, 0x26,
- 0x2b, 0x4a, 0x6e, 0x35, 0x26, 0xe7, 0x14, 0x54, 0xfa, 0xb9, 0x04, 0x4a, 0x0b, 0x3f, 0xec, 0x63,
- 0xdb, 0xc0, 0x0f, 0xf4, 0x4e, 0x6d, 0xbf, 0x6f, 0x1f, 0xa0, 0xd7, 0x61, 0xc6, 0xa0, 0x1f, 0x1a,
- 0x0f, 0xee, 0xe9, 0x54, 0x25, 0x36, 0xd5, 0x0c, 0x23, 0xb7, 0x28, 0x95, 0xce, 0xf8, 0x15, 0x00,
- 0xc1, 0x47, 0x4f, 0x36, 0xcf, 0x20, 0x26, 0x39, 0x0b, 0x3d, 0xce, 0x63, 0x30, 0xae, 0xf3, 0x98,
- 0xa9, 0x8f, 0x11, 0x18, 0xd5, 0x79, 0x8c, 0x16, 0x60, 0xce, 0xc6, 0x4f, 0x3c, 0x6d, 0x9c, 0x99,
- 0xa9, 0x0a, 0x35, 0x47, 0xeb, 0x6a, 0xe1, 0x06, 0xa5, 0x7f, 0x8a, 0xc0, 0x8c, 0x3f, 0x68, 0x5f,
- 0x1d, 0xee, 0x82, 0x42, 0xb7, 0xc5, 0x32, 0x35, 0xcf, 0xe1, 0x48, 0xbe, 0x62, 0x7c, 0xf7, 0x0c,
- 0xc5, 0x38, 0x86, 0x42, 0xcb, 0x4d, 0x73, 0xdb, 0x61, 0xdd, 0x71, 0x1b, 0xa3, 0x66, 0x48, 0x98,
- 0x56, 0xd8, 0x81, 0xac, 0xdf, 0x88, 0x53, 0x50, 0x0d, 0xe2, 0x23, 0xfd, 0xbd, 0x31, 0x41, 0x7f,
- 0xfe, 0x52, 0xab, 0xa2, 0x69, 0xe1, 0xb7, 0x00, 0x1d, 0xef, 0x3b, 0x6c, 0x59, 0xa6, 0xb9, 0x65,
- 0xd9, 0x1c, 0xb5, 0x6f, 0x6f, 0x5f, 0x6c, 0x6e, 0xa1, 0x61, 0x87, 0x23, 0xd3, 0x7f, 0x88, 0x40,
- 0x96, 0x9b, 0x88, 0xc0, 0x1c, 0xbd, 0x01, 0x39, 0xa6, 0xb4, 0x2c, 0x7b, 0x4f, 0xeb, 0x09, 0x22,
- 0x9b, 0x5f, 0x44, 0x55, 0xfc, 0x8a, 0x80, 0xf9, 0x6b, 0x90, 0x71, 0xb1, 0x6e, 0x0e, 0x19, 0x23,
- 0x8c, 0x31, 0x4d, 0x89, 0x01, 0xd3, 0x6b, 0x90, 0x65, 0x66, 0x75, 0xc8, 0x15, 0x65, 0x5c, 0x19,
- 0x46, 0x0d, 0xd8, 0xaa, 0x90, 0x21, 0x3d, 0xdd, 0x1e, 0x72, 0xc5, 0xd8, 0xa2, 0x9e, 0x93, 0x47,
- 0x4b, 0xd3, 0x36, 0x61, 0x5b, 0xea, 0x62, 0xd2, 0xef, 0x62, 0xad, 0xe7, 0xf0, 0x80, 0x31, 0xaa,
- 0x26, 0x39, 0x65, 0xcb, 0x21, 0x68, 0x87, 0x89, 0x0a, 0x5b, 0x0b, 0xcd, 0xe4, 0x8b, 0x93, 0x8f,
- 0xb3, 0x5e, 0x6e, 0x4f, 0xbe, 0x9c, 0xea, 0x0c, 0x19, 0x25, 0x94, 0xfe, 0x4a, 0x82, 0x6b, 0xd4,
- 0xe9, 0xe7, 0x5a, 0xb1, 0xc6, 0x72, 0xff, 0xbe, 0x74, 0xea, 0x90, 0x60, 0x81, 0x43, 0xe0, 0xc8,
- 0xae, 0x1c, 0x0d, 0x8a, 0x71, 0xca, 0x7d, 0x69, 0xcb, 0x15, 0xa7, 0xc0, 0x4d, 0x16, 0xe2, 0x79,
- 0xae, 0x6e, 0x13, 0x8b, 0x06, 0x3b, 0x74, 0xdb, 0xba, 0xb8, 0xdb, 0xc6, 0x2e, 0xdf, 0x8c, 0xb4,
- 0x3a, 0x37, 0x52, 0xb9, 0xce, 0xeb, 0x4a, 0x05, 0xc8, 0x8f, 0x0f, 0x39, 0xc8, 0x4e, 0xfc, 0x7f,
- 0xb8, 0xba, 0x81, 0x1f, 0x9f, 0x34, 0x9b, 0x2a, 0x24, 0xb8, 0xfe, 0xf2, 0x45, 0xfe, 0xe6, 0xb8,
- 0x56, 0x0d, 0x5f, 0x7f, 0x94, 0xd9, 0x48, 0xb7, 0x59, 0x03, 0xd5, 0x6f, 0x58, 0xfa, 0x18, 0xae,
- 0x8d, 0xa1, 0x07, 0xdb, 0xf7, 0x1e, 0xc4, 0x69, 0xf4, 0x2a, 0x3c, 0x9b, 0xec, 0xf1, 0xc8, 0xe8,
- 0x38, 0x7a, 0x8b, 0xf2, 0xab, 0xa2, 0x59, 0x49, 0x65, 0x69, 0x95, 0x7e, 0x17, 0x53, 0x09, 0xb9,
- 0x67, 0x11, 0x0f, 0xbd, 0x0f, 0x69, 0x21, 0x11, 0x54, 0x50, 0xfc, 0x61, 0x9f, 0x23, 0x54, 0x29,
- 0x37, 0x00, 0x21, 0xa5, 0xbf, 0x96, 0x60, 0xb6, 0xee, 0x3a, 0xbd, 0x1e, 0x36, 0x85, 0xad, 0xe0,
- 0x6b, 0xe1, 0x9b, 0x08, 0x29, 0x64, 0x22, 0x36, 0x20, 0xd2, 0xac, 0x8b, 0xa0, 0xe2, 0xee, 0x65,
- 0x63, 0x95, 0x66, 0x1d, 0xbd, 0xcd, 0x17, 0xa4, 0x4f, 0x98, 0xfe, 0xcc, 0x1e, 0x0b, 0x5c, 0x47,
- 0xc4, 0x94, 0x31, 0xaa, 0xa2, 0x41, 0xe9, 0xa7, 0x09, 0xb8, 0x12, 0x5e, 0xe4, 0xe5, 0x9a, 0x3f,
- 0xf0, 0x4f, 0x20, 0x61, 0xd9, 0x26, 0x7e, 0x82, 0x27, 0xd2, 0x93, 0x27, 0x41, 0x94, 0xc5, 0x7a,
- 0x34, 0x29, 0x8c, 0x1f, 0x2a, 0x0b, 0x4c, 0xf4, 0x61, 0xe0, 0x9e, 0xf2, 0xd4, 0xd5, 0x9d, 0x17,
- 0x46, 0xaf, 0x8f, 0xb9, 0xaa, 0x23, 0x9e, 0x20, 0x33, 0x28, 0x5f, 0x92, 0x27, 0xd8, 0x82, 0x9c,
- 0x65, 0x7b, 0xd8, 0xed, 0x60, 0xfd, 0x11, 0x75, 0x6c, 0x68, 0xf7, 0x22, 0x83, 0x35, 0xa9, 0x1b,
- 0xa1, 0x84, 0x00, 0xb8, 0x3b, 0xf2, 0x09, 0xcc, 0x86, 0x41, 0xfd, 0x2d, 0x38, 0x3b, 0xab, 0xc5,
- 0x56, 0x78, 0x08, 0xeb, 0x27, 0x8f, 0x42, 0x40, 0x4d, 0xb1, 0xec, 0x0f, 0x20, 0xce, 0x93, 0x15,
- 0x22, 0x45, 0x7c, 0xf7, 0x45, 0x97, 0x9d, 0x27, 0x41, 0x54, 0x81, 0x56, 0xf8, 0x63, 0x09, 0xd2,
- 0xe1, 0xed, 0x46, 0x16, 0xc8, 0x6c, 0xec, 0xbe, 0x4a, 0x8b, 0x56, 0x37, 0xa8, 0x2f, 0xcb, 0x2a,
- 0xd9, 0x1e, 0xbc, 0xf7, 0xc2, 0x7b, 0xc0, 0x21, 0x84, 0x28, 0x35, 0x4d, 0xea, 0x20, 0x99, 0xae,
- 0xd3, 0x1b, 0x5e, 0x11, 0x44, 0x55, 0x99, 0x12, 0xa8, 0xcf, 0x57, 0xf8, 0x6d, 0x48, 0x06, 0x82,
- 0x12, 0x4a, 0x15, 0x44, 0xbf, 0xc0, 0x54, 0xc1, 0x99, 0xfd, 0xd7, 0x21, 0x33, 0xb2, 0x62, 0xe8,
- 0x6a, 0x30, 0x86, 0x58, 0x35, 0xce, 0xc7, 0x70, 0x2e, 0x4a, 0xe9, 0x97, 0x71, 0x98, 0x3d, 0x49,
- 0xd3, 0x7e, 0x04, 0x4a, 0x48, 0x6f, 0x69, 0x1d, 0x8b, 0x78, 0xe2, 0x3c, 0xdd, 0x3a, 0x3b, 0xb2,
- 0x0d, 0x29, 0x3f, 0x21, 0x2d, 0x59, 0x77, 0x54, 0x25, 0x7e, 0x1f, 0xb2, 0x26, 0x1f, 0xb8, 0x48,
- 0xf4, 0x88, 0x1b, 0xc3, 0xb3, 0xe2, 0xc8, 0x13, 0x14, 0xa0, 0x40, 0xcf, 0x98, 0xa1, 0x2a, 0xc2,
- 0xee, 0x4e, 0x7c, 0xf4, 0x20, 0x3f, 0x65, 0x99, 0xec, 0xf4, 0x64, 0xaa, 0xad, 0xa3, 0x41, 0x31,
- 0x27, 0xb0, 0xfc, 0x84, 0xd4, 0xa5, 0x77, 0x2a, 0x67, 0x8e, 0x01, 0x9a, 0xd4, 0xea, 0xd2, 0x7a,
- 0xda, 0xf1, 0xf4, 0xd0, 0xea, 0xd2, 0x73, 0x74, 0x79, 0xab, 0x4b, 0x3f, 0x9b, 0x26, 0xfa, 0x7d,
- 0x09, 0x72, 0x3c, 0x3b, 0xdd, 0xed, 0x7b, 0x3a, 0xbf, 0x72, 0xf0, 0xe3, 0xd3, 0x8f, 0x8e, 0x06,
- 0xc5, 0x19, 0xb6, 0x20, 0xeb, 0xa2, 0x8e, 0x75, 0x5b, 0x7d, 0xd1, 0x6e, 0x87, 0x28, 0x22, 0x66,
- 0x0b, 0x08, 0x26, 0x5a, 0x83, 0x2c, 0x0f, 0xb6, 0xfd, 0x47, 0x04, 0x2c, 0x70, 0xcd, 0x54, 0x5f,
- 0x7d, 0x3e, 0x28, 0xce, 0x9f, 0x20, 0x59, 0x3c, 0x4e, 0x7f, 0xc0, 0x79, 0xd5, 0xcc, 0x6e, 0xb8,
- 0x88, 0x0c, 0xc8, 0x04, 0xa2, 0x71, 0xd8, 0x13, 0x71, 0xee, 0xe5, 0x4d, 0x59, 0xda, 0x97, 0x11,
- 0x8a, 0x89, 0xf6, 0x60, 0xc6, 0xef, 0x84, 0x1b, 0x74, 0x92, 0x4f, 0x7e, 0x21, 0xdd, 0xf8, 0x62,
- 0xcd, 0x67, 0x4d, 0x44, 0xca, 0xe5, 0x2a, 0xcc, 0x9d, 0xe8, 0xe5, 0x7c, 0x16, 0x87, 0xab, 0xa3,
- 0x8a, 0x30, 0xf0, 0x43, 0xb4, 0x71, 0x0b, 0xf9, 0xde, 0xc4, 0xca, 0xd4, 0xc7, 0xe0, 0xca, 0xcc,
- 0x2f, 0x8d, 0xdb, 0xc8, 0x4f, 0xc6, 0x6c, 0xe4, 0x0b, 0xe0, 0x33, 0xf1, 0x1a, 0xc3, 0xf7, 0x0d,
- 0xe5, 0x87, 0x81, 0x2d, 0xe0, 0x59, 0x9b, 0xf7, 0x5f, 0x00, 0x9e, 0xb5, 0xf7, 0x8b, 0x81, 0x35,
- 0xf8, 0x47, 0x09, 0x32, 0x23, 0x33, 0xfb, 0x75, 0x9a, 0x83, 0xad, 0xc0, 0x1b, 0xe2, 0x4f, 0x4d,
- 0xbe, 0x77, 0xf1, 0x69, 0x8d, 0x3a, 0x49, 0x85, 0xbf, 0x95, 0x20, 0x33, 0xb2, 0x90, 0x5f, 0x92,
- 0x21, 0xf9, 0xe2, 0x47, 0xde, 0x86, 0xec, 0xe8, 0x16, 0x85, 0xfa, 0x90, 0xbe, 0x98, 0x3e, 0x4a,
- 0xdf, 0x85, 0x38, 0xa7, 0x20, 0x04, 0xd9, 0x0f, 0x2a, 0xcd, 0xed, 0xe6, 0xc6, 0xb2, 0xb6, 0xb4,
- 0xa9, 0x6a, 0xcb, 0x35, 0x65, 0x0a, 0xa5, 0x41, 0xae, 0x37, 0xee, 0x35, 0x28, 0x51, 0x91, 0x50,
- 0x0a, 0x12, 0xac, 0xd4, 0xa8, 0x2b, 0x91, 0x52, 0x15, 0x14, 0x8e, 0xbd, 0x8b, 0xa9, 0x61, 0xa0,
- 0x7e, 0x3f, 0x2a, 0xc3, 0x2c, 0x73, 0xd2, 0xbb, 0xd4, 0x7f, 0xa3, 0xa6, 0x50, 0x0b, 0x79, 0xcf,
- 0xb9, 0xa0, 0x8a, 0x1a, 0xc5, 0x0d, 0xbd, 0x8b, 0x4b, 0x7f, 0x13, 0x83, 0xdc, 0x10, 0xc4, 0x37,
- 0x8b, 0xaf, 0x83, 0x4c, 0x2c, 0xfb, 0x40, 0x1b, 0x5e, 0xfc, 0xf3, 0x44, 0x9a, 0x65, 0x1f, 0xec,
- 0xa8, 0x4d, 0x35, 0x41, 0x2b, 0x77, 0x5c, 0x0b, 0xad, 0x42, 0xcc, 0xe9, 0x79, 0x7e, 0x0c, 0xf9,
- 0x9d, 0x33, 0x96, 0xe2, 0x58, 0x1f, 0xe5, 0xcd, 0x9e, 0x27, 0x32, 0x00, 0x0c, 0x03, 0xfd, 0xa5,
- 0x34, 0x8c, 0x7a, 0x78, 0xb4, 0xf8, 0xf6, 0x85, 0xf0, 0xf8, 0x02, 0x88, 0x5b, 0xd8, 0x0f, 0xe8,
- 0x41, 0x7d, 0x3e, 0x28, 0xe6, 0xc6, 0x17, 0x88, 0x5c, 0xf2, 0x7a, 0xd6, 0x1f, 0x22, 0x5a, 0xe5,
- 0x17, 0x83, 0xc3, 0x85, 0x66, 0x06, 0x61, 0xc2, 0xbb, 0xd7, 0xcc, 0xc8, 0x46, 0x14, 0xf6, 0x20,
- 0x1d, 0x1e, 0xfd, 0x09, 0x69, 0xf7, 0xca, 0x68, 0x5a, 0xe2, 0x8d, 0x89, 0x56, 0x46, 0x84, 0x84,
- 0xa1, 0x34, 0xfb, 0x77, 0x21, 0x19, 0x2c, 0xfb, 0x45, 0x2e, 0x09, 0xb8, 0x8e, 0x0f, 0x92, 0x65,
- 0xd3, 0x4a, 0xbc, 0x34, 0x88, 0x40, 0x5a, 0xc5, 0xc4, 0xe9, 0x3c, 0xc2, 0x26, 0xf5, 0x79, 0x82,
- 0x77, 0x39, 0xd2, 0xe4, 0xef, 0x72, 0x2a, 0x90, 0x0c, 0xb2, 0x99, 0x17, 0x79, 0x9b, 0x32, 0x6c,
- 0x85, 0xee, 0xc2, 0x57, 0xc2, 0x4f, 0x50, 0x9c, 0xbe, 0x6d, 0xea, 0xee, 0xa1, 0xe6, 0x62, 0xdd,
- 0xd8, 0xc7, 0xa6, 0xb8, 0x0d, 0x7a, 0x29, 0xf4, 0x06, 0x45, 0x70, 0xa8, 0x9c, 0x01, 0x7d, 0x04,
- 0x99, 0xa0, 0x11, 0xb5, 0xc5, 0xcc, 0x83, 0xca, 0x2e, 0x7e, 0xeb, 0x6c, 0xef, 0x2f, 0x98, 0x75,
- 0xd9, 0xc7, 0xa3, 0x36, 0x57, 0x4d, 0xb7, 0x43, 0xa5, 0xd2, 0xbb, 0x90, 0x0e, 0xd7, 0x22, 0x19,
- 0x62, 0x1b, 0x9b, 0x1b, 0x0d, 0x7e, 0xa6, 0xab, 0x95, 0xda, 0xda, 0x52, 0xf3, 0xde, 0x3d, 0x45,
- 0xa2, 0xf4, 0xc6, 0x87, 0xcd, 0x6d, 0x25, 0x42, 0x4f, 0xb7, 0xda, 0x68, 0x6d, 0x57, 0xd4, 0x6d,
- 0x25, 0x5a, 0xc2, 0x90, 0x09, 0xf7, 0x44, 0x75, 0x26, 0x75, 0x31, 0x19, 0x61, 0x24, 0xca, 0xbe,
- 0x31, 0xe1, 0x58, 0x7d, 0xd9, 0x73, 0xc3, 0xa8, 0xa5, 0x1f, 0x47, 0x00, 0x0d, 0x45, 0x26, 0xa4,
- 0xa0, 0xc7, 0x3b, 0x8b, 0x5c, 0xbe, 0x33, 0xf4, 0x93, 0xb3, 0x33, 0xd9, 0x51, 0x96, 0xc9, 0x66,
- 0x67, 0xf7, 0xd7, 0x9a, 0xcd, 0x16, 0x2e, 0xcc, 0x7f, 0xc6, 0x00, 0xd5, 0x5c, 0xac, 0x7b, 0x98,
- 0xea, 0x63, 0x72, 0x56, 0x06, 0xa2, 0x0a, 0xd3, 0x3c, 0x5c, 0x8d, 0x5c, 0x24, 0x5c, 0x15, 0x8b,
- 0xc2, 0x9b, 0xa2, 0x1f, 0x40, 0xda, 0x70, 0x3a, 0xfd, 0xae, 0xad, 0xb1, 0xb7, 0x04, 0x22, 0x3c,
- 0xf8, 0xf6, 0x59, 0x47, 0xfb, 0xd8, 0xe0, 0xca, 0x35, 0xa7, 0x43, 0xcb, 0xc1, 0x73, 0x32, 0x06,
- 0xc8, 0x38, 0xd0, 0xcb, 0x90, 0x0c, 0xd4, 0x0c, 0x13, 0xeb, 0xa4, 0x3a, 0x24, 0xa0, 0x45, 0x98,
- 0xd6, 0x89, 0xe6, 0xec, 0x32, 0xcf, 0xfd, 0xbc, 0x73, 0xa7, 0xc6, 0x74, 0xb2, 0xb9, 0x8b, 0xde,
- 0x82, 0xcc, 0xee, 0x43, 0x1e, 0xcd, 0x70, 0xb3, 0xc2, 0x9f, 0x78, 0xcc, 0x1c, 0x0d, 0x8a, 0xa9,
- 0xa5, 0xfb, 0x6c, 0xb2, 0xd4, 0xa8, 0xa8, 0xa9, 0xdd, 0x87, 0x41, 0x01, 0xdd, 0x86, 0x5c, 0x57,
- 0x7f, 0xa2, 0xed, 0xba, 0xba, 0x21, 0xdc, 0xf7, 0x0e, 0xd7, 0x95, 0x92, 0x3a, 0xd3, 0xd5, 0x9f,
- 0x2c, 0x09, 0x7a, 0xd3, 0xec, 0xe0, 0xc2, 0x7f, 0x48, 0x90, 0x10, 0x33, 0x42, 0x3d, 0x00, 0xb1,
- 0x3c, 0x96, 0xc9, 0x45, 0x3d, 0x53, 0xbd, 0x7f, 0x34, 0x28, 0x26, 0x6b, 0x8c, 0xda, 0xac, 0x93,
- 0xe7, 0x83, 0xe2, 0xfb, 0x2f, 0xaa, 0xca, 0x7d, 0x10, 0x35, 0xc9, 0x3b, 0x69, 0x9a, 0x2c, 0xcd,
- 0xba, 0xaf, 0x13, 0x6d, 0xdf, 0x22, 0x9e, 0xb3, 0xe7, 0xea, 0x5d, 0x71, 0xbb, 0x98, 0xde, 0xd7,
- 0xc9, 0x8a, 0x4f, 0x43, 0x05, 0xea, 0x88, 0x3d, 0xe2, 0x4f, 0x41, 0xb8, 0x76, 0x09, 0xca, 0x68,
- 0x11, 0xae, 0x04, 0x8d, 0x35, 0x3a, 0xe9, 0x76, 0xdf, 0x38, 0xc0, 0xcc, 0x3e, 0x52, 0x4d, 0x3e,
- 0x1b, 0x54, 0xae, 0xeb, 0x4f, 0xaa, 0xbc, 0xaa, 0x74, 0x05, 0x66, 0x43, 0xdb, 0x1a, 0xb8, 0xcd,
- 0x18, 0x94, 0x75, 0x6b, 0xcf, 0xd5, 0xc3, 0x2f, 0x2d, 0xef, 0xc3, 0xcc, 0xd8, 0x4b, 0x66, 0xa1,
- 0x6c, 0xc3, 0xe9, 0xc1, 0xd1, 0xa7, 0xcf, 0xe5, 0x1a, 0x2f, 0xfa, 0x81, 0x48, 0xd6, 0x18, 0x29,
- 0x97, 0x66, 0x21, 0x17, 0x74, 0x13, 0xf4, 0xfd, 0xab, 0x34, 0x24, 0xb6, 0xf4, 0xc3, 0x8e, 0xa3,
- 0x9b, 0x68, 0x1e, 0x52, 0xfe, 0xf3, 0x11, 0xbf, 0xbf, 0xa4, 0x1a, 0x26, 0x21, 0x0b, 0xb2, 0x7d,
- 0x82, 0x5d, 0x2a, 0x0f, 0x1a, 0x7b, 0x58, 0xcd, 0xad, 0x47, 0xb5, 0xfa, 0x7c, 0x50, 0xbc, 0x3b,
- 0xd9, 0xf6, 0x60, 0xa3, 0xef, 0x5a, 0xde, 0x61, 0xb9, 0x75, 0xff, 0xde, 0x8e, 0x80, 0xa2, 0x87,
- 0xd8, 0x51, 0x33, 0xfd, 0x70, 0x51, 0x3c, 0xc6, 0xa1, 0x4b, 0xad, 0x75, 0x2d, 0xc3, 0x75, 0x88,
- 0x7f, 0xdf, 0x21, 0xa8, 0xeb, 0x8c, 0x88, 0x6e, 0xc0, 0xcc, 0xae, 0x65, 0xb3, 0xbb, 0x36, 0x9f,
- 0x8f, 0x5f, 0x75, 0x64, 0x7d, 0xb2, 0x60, 0x7c, 0x04, 0xd9, 0xd0, 0x03, 0x1c, 0x2a, 0x66, 0x71,
- 0x26, 0x66, 0x9b, 0x47, 0x83, 0x62, 0x66, 0x78, 0x6c, 0xb9, 0xa8, 0x5d, 0xc6, 0x6b, 0xc8, 0x0c,
- 0xbb, 0xa1, 0x82, 0x36, 0x07, 0xd3, 0xec, 0xd9, 0x3d, 0x7f, 0x73, 0xa7, 0xf2, 0x02, 0xfa, 0x0e,
- 0x4c, 0x77, 0xb0, 0x4e, 0xb0, 0x78, 0x4e, 0x37, 0x7f, 0x86, 0x22, 0x60, 0xef, 0xd6, 0x55, 0xce,
- 0x8e, 0xaa, 0x10, 0xe7, 0xb7, 0xa7, 0xec, 0xce, 0xf3, 0x78, 0xb2, 0xf8, 0xd4, 0x97, 0x93, 0x2b,
- 0x53, 0xaa, 0x68, 0x89, 0x1a, 0x90, 0x10, 0xcf, 0x50, 0xd8, 0x4d, 0xe8, 0xb9, 0xe9, 0x8f, 0xd0,
- 0xc5, 0xfe, 0xca, 0x94, 0xea, 0xb7, 0x45, 0xdb, 0xfe, 0xcb, 0x1b, 0x6e, 0x51, 0xc4, 0xc3, 0xa2,
- 0xf2, 0x84, 0x2e, 0xf2, 0x10, 0x70, 0x04, 0x85, 0x4e, 0xd0, 0x62, 0xb7, 0x27, 0xec, 0x42, 0xf5,
- 0xec, 0x09, 0x8e, 0xdc, 0xc4, 0xd3, 0x09, 0xf2, 0x96, 0x68, 0x03, 0xc0, 0x08, 0xac, 0x1c, 0xbb,
- 0x6a, 0x4d, 0x2d, 0x7e, 0xfd, 0x22, 0xfe, 0xe5, 0xca, 0x94, 0x1a, 0x42, 0x40, 0xf7, 0x21, 0x65,
- 0x0c, 0x8f, 0x6d, 0x7e, 0x86, 0x01, 0xbe, 0x79, 0x21, 0xdd, 0xbd, 0x42, 0xf5, 0xf5, 0x90, 0x3a,
- 0xaa, 0xaf, 0x95, 0x71, 0x7d, 0xdd, 0x80, 0x8c, 0xc8, 0x54, 0xf1, 0x5f, 0x6c, 0x88, 0xd7, 0x42,
- 0x61, 0x29, 0xf1, 0x7f, 0xd3, 0x51, 0x6e, 0xd8, 0x86, 0x63, 0x62, 0xb3, 0x41, 0xcb, 0xaa, 0x48,
- 0xcc, 0xb3, 0x02, 0x41, 0xcb, 0x90, 0x35, 0x3a, 0x58, 0xb7, 0xfb, 0x3d, 0x1f, 0x07, 0x4d, 0x88,
- 0x93, 0x11, 0xed, 0x04, 0xd0, 0x06, 0xa0, 0x5d, 0xf6, 0x64, 0x26, 0x3c, 0x2a, 0x76, 0x63, 0x3b,
- 0x09, 0x98, 0xc2, 0xda, 0xaa, 0xc3, 0x91, 0xa1, 0x57, 0x21, 0x63, 0x3b, 0xb6, 0xa1, 0xdb, 0x06,
- 0xee, 0x30, 0xcb, 0xca, 0x2f, 0x79, 0x47, 0x89, 0xe8, 0x63, 0xc8, 0x92, 0x91, 0xa0, 0x2a, 0x7f,
- 0x85, 0xf5, 0xf8, 0x8d, 0x8b, 0xa6, 0x61, 0x57, 0xa6, 0xd4, 0x31, 0x24, 0xf4, 0x1b, 0xa0, 0x78,
- 0x63, 0x77, 0x35, 0xec, 0xba, 0xf8, 0xec, 0x97, 0x71, 0xa7, 0xdc, 0x48, 0xad, 0x4c, 0xa9, 0xc7,
- 0xd0, 0xd0, 0x27, 0x30, 0x43, 0x46, 0xdf, 0x90, 0xe7, 0xaf, 0xb1, 0x0e, 0xbe, 0x79, 0xe6, 0x85,
- 0xc3, 0x49, 0xcf, 0xee, 0x57, 0xa6, 0xd4, 0x71, 0x2c, 0x0a, 0x6f, 0x8f, 0x5e, 0xf9, 0xe4, 0xf3,
- 0xe7, 0xc2, 0x9f, 0x7c, 0x05, 0x45, 0xe1, 0xc7, 0xb0, 0xd0, 0x1a, 0x24, 0xbb, 0xbe, 0xad, 0xc8,
- 0xbf, 0x74, 0x6e, 0x1c, 0x32, 0x6e, 0xbe, 0x56, 0xa6, 0xd4, 0x61, 0xfb, 0x6a, 0x12, 0x12, 0xe2,
- 0x6a, 0x30, 0xb8, 0x70, 0x4f, 0x28, 0x72, 0xe9, 0x57, 0x32, 0xc8, 0x81, 0x0f, 0xba, 0x00, 0x28,
- 0xf0, 0x1a, 0x86, 0x2f, 0x35, 0xa9, 0x09, 0x8a, 0xac, 0x4c, 0xa9, 0x39, 0xbf, 0x6e, 0xf8, 0x58,
- 0xf3, 0x06, 0xcc, 0x74, 0x1d, 0xd3, 0xda, 0xb5, 0x86, 0x8a, 0x9f, 0x27, 0x82, 0xb3, 0x3e, 0x59,
- 0x28, 0xfe, 0xbb, 0x23, 0x8f, 0x81, 0x26, 0xf9, 0x41, 0x00, 0x1d, 0x7d, 0xf0, 0x5a, 0x88, 0x1a,
- 0x22, 0xb7, 0x6f, 0xb3, 0x5b, 0x40, 0x91, 0x0c, 0xe0, 0xfe, 0x55, 0x46, 0x50, 0x45, 0x3c, 0x5f,
- 0x1b, 0xd3, 0xcc, 0xb7, 0xce, 0xd5, 0xcc, 0xfe, 0xdc, 0x57, 0xa4, 0x40, 0x35, 0x2f, 0x8d, 0xab,
- 0xe6, 0xdb, 0xe7, 0xab, 0xe6, 0x10, 0x4c, 0xa0, 0x9b, 0x77, 0x4e, 0xd4, 0xcd, 0x0b, 0x13, 0x1e,
- 0x9c, 0x10, 0xe2, 0xa8, 0x72, 0xae, 0x8d, 0x29, 0xe7, 0x5b, 0xe7, 0x2a, 0xe7, 0xf0, 0x1c, 0x85,
- 0x76, 0xde, 0x3c, 0x41, 0x3b, 0xbf, 0x39, 0x91, 0x76, 0x0e, 0x81, 0x85, 0xd5, 0xb3, 0x7a, 0x92,
- 0x7a, 0x2e, 0x4f, 0xa6, 0x9e, 0x43, 0x90, 0x23, 0xfa, 0xf9, 0xfb, 0xc7, 0x74, 0x8f, 0x72, 0xfe,
- 0xe1, 0x3d, 0x31, 0x03, 0xb4, 0x22, 0x1d, 0x53, 0x3e, 0xfa, 0x09, 0xca, 0x27, 0xc7, 0xe0, 0xdf,
- 0xba, 0x80, 0xf2, 0x09, 0x75, 0x70, 0x5c, 0xfb, 0x7c, 0x08, 0xe9, 0xb0, 0xc6, 0x60, 0x8f, 0x6e,
- 0xce, 0xd6, 0x6d, 0xa7, 0xfc, 0xe0, 0x85, 0xc9, 0x40, 0xa8, 0x0a, 0xfd, 0xe0, 0xb8, 0xe2, 0x99,
- 0x3d, 0x17, 0xfc, 0x94, 0xdb, 0xe9, 0x15, 0xe9, 0xb8, 0xe6, 0xb9, 0x17, 0xd6, 0x3c, 0x73, 0xe7,
- 0xda, 0xee, 0x63, 0x1e, 0xed, 0x8a, 0x14, 0x56, 0x3d, 0x00, 0xb2, 0xff, 0xf8, 0x21, 0xa4, 0x86,
- 0x4a, 0x7f, 0x24, 0x41, 0x74, 0xd5, 0x69, 0xa3, 0xec, 0x30, 0x35, 0xc9, 0x92, 0x8a, 0xef, 0x0d,
- 0xd9, 0x45, 0xb4, 0xf7, 0xb5, 0x33, 0xfa, 0x0e, 0x52, 0xb9, 0x41, 0x23, 0xf4, 0x0e, 0x24, 0x7a,
- 0xdc, 0x9b, 0x16, 0x9a, 0xa6, 0x74, 0x56, 0x7b, 0xce, 0xa9, 0xfa, 0x4d, 0x6e, 0xdf, 0x0a, 0xff,
- 0xf6, 0x6d, 0xdd, 0x31, 0x31, 0xca, 0x02, 0x6c, 0xe9, 0x84, 0xf4, 0xf6, 0x5d, 0x9d, 0x60, 0x65,
- 0x0a, 0x25, 0x20, 0xba, 0xb6, 0xde, 0x52, 0xa4, 0xdb, 0x1f, 0x86, 0x13, 0x89, 0x75, 0xb5, 0xd2,
- 0xdc, 0x68, 0x6e, 0x2c, 0x6b, 0x1b, 0x95, 0xf5, 0x46, 0x4b, 0x99, 0x42, 0x79, 0x98, 0xfb, 0xa0,
- 0xd2, 0xdc, 0x16, 0x99, 0x45, 0xad, 0xb9, 0xb1, 0xdd, 0x50, 0x1f, 0x54, 0xee, 0x29, 0x12, 0xba,
- 0x0a, 0x48, 0xdd, 0xac, 0xad, 0xb5, 0xea, 0x55, 0xad, 0xb6, 0xb9, 0xbe, 0x55, 0xa9, 0x6d, 0x37,
- 0x37, 0x37, 0x94, 0x08, 0x92, 0x21, 0x56, 0xdf, 0xdc, 0x68, 0x28, 0x70, 0xfb, 0x27, 0x31, 0x88,
- 0xb1, 0x1c, 0xc6, 0xab, 0x90, 0xda, 0xd9, 0x68, 0x6d, 0x35, 0x6a, 0xcd, 0xa5, 0x66, 0xa3, 0xae,
- 0x4c, 0x15, 0x66, 0x9f, 0x3e, 0x9b, 0x9f, 0xa1, 0x55, 0x3b, 0x36, 0xe9, 0x61, 0x83, 0x29, 0x59,
- 0x54, 0x80, 0x78, 0xb5, 0x52, 0x5b, 0xdb, 0xd9, 0x52, 0xa4, 0x42, 0xf6, 0xe9, 0xb3, 0x79, 0xa0,
- 0x0c, 0x5c, 0xc1, 0xa1, 0x97, 0x79, 0x8e, 0x63, 0x53, 0x6d, 0x28, 0x91, 0xc2, 0xcc, 0xd3, 0x67,
- 0xf3, 0x29, 0x96, 0x3a, 0x11, 0x4a, 0xea, 0x06, 0x64, 0x5a, 0xb5, 0x95, 0xc6, 0x7a, 0x45, 0xab,
- 0xad, 0x54, 0x36, 0x96, 0x1b, 0x4a, 0xb4, 0x30, 0xf7, 0xf4, 0xd9, 0xbc, 0x32, 0x2e, 0xe8, 0xb4,
- 0x8b, 0xe6, 0xfa, 0xd6, 0xa6, 0xba, 0xad, 0xc4, 0x86, 0x5d, 0x70, 0xfd, 0x82, 0x4a, 0x00, 0xbc,
- 0xf5, 0x52, 0xa3, 0x51, 0x57, 0xa6, 0x0b, 0xe8, 0xe9, 0xb3, 0xf9, 0x2c, 0xad, 0x1f, 0xaa, 0x0d,
- 0xf4, 0x1a, 0xa4, 0x6b, 0x6a, 0xa3, 0xb2, 0xdd, 0xd0, 0x5a, 0xdb, 0x95, 0xed, 0x96, 0x12, 0x1f,
- 0xce, 0x24, 0xa4, 0x0a, 0x50, 0x19, 0x72, 0x95, 0x9d, 0xed, 0x4d, 0x6d, 0x84, 0x37, 0x51, 0xb8,
- 0xf6, 0xf4, 0xd9, 0xfc, 0x2c, 0xe5, 0xad, 0xf4, 0x3d, 0x27, 0xcc, 0xff, 0x75, 0x50, 0x46, 0xc6,
- 0xaf, 0x2d, 0xd7, 0x14, 0xb9, 0x70, 0xf5, 0xe9, 0xb3, 0x79, 0x34, 0x3e, 0x85, 0xe5, 0x1a, 0xfa,
- 0x16, 0x5c, 0xdd, 0xfe, 0x68, 0xab, 0x51, 0x6f, 0xb4, 0x6a, 0xda, 0xe8, 0xb4, 0x93, 0x85, 0xfc,
- 0xd3, 0x67, 0xf3, 0x73, 0xb4, 0xcd, 0xb1, 0xa9, 0xbf, 0x09, 0x4a, 0x6b, 0x5b, 0x6d, 0x54, 0xd6,
- 0xb5, 0xe6, 0xc6, 0x72, 0xa3, 0xc5, 0x36, 0x0b, 0x86, 0x43, 0x1a, 0x3b, 0xb4, 0x74, 0x0a, 0x1b,
- 0x8d, 0x0f, 0xc6, 0xf0, 0x53, 0x43, 0xfe, 0xb1, 0x73, 0x88, 0xe6, 0x21, 0xb9, 0xde, 0x5c, 0x56,
- 0x2b, 0x0c, 0x37, 0x5d, 0xc8, 0x3d, 0x7d, 0x36, 0x9f, 0xa1, 0x7c, 0xc1, 0xa9, 0x2a, 0xc8, 0x3f,
- 0xfe, 0xf3, 0xeb, 0x53, 0x7f, 0xf1, 0xd3, 0xeb, 0x53, 0xd5, 0x9b, 0x9f, 0xfe, 0xdb, 0xf5, 0xa9,
- 0x4f, 0x8f, 0xae, 0x4b, 0xbf, 0x38, 0xba, 0x2e, 0x7d, 0x76, 0x74, 0x5d, 0xfa, 0xd7, 0xa3, 0xeb,
- 0xd2, 0x1f, 0x7c, 0x7e, 0x7d, 0xea, 0x17, 0x9f, 0x5f, 0x9f, 0xfa, 0xec, 0xf3, 0xeb, 0x53, 0x1f,
- 0xc7, 0xb9, 0x5c, 0xb7, 0xe3, 0x2c, 0x34, 0x7c, 0xeb, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc2,
- 0x1a, 0xd6, 0xda, 0xac, 0x3c, 0x00, 0x00,
+func init() { proto.RegisterFile("jobs/jobspb/jobs.proto", fileDescriptor_jobs_dec159c6138441c3) }
+
+var fileDescriptor_jobs_dec159c6138441c3 = []byte{
+ // 5016 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5b, 0x4d, 0x6c, 0x23, 0x47,
+ 0x76, 0x56, 0x93, 0x14, 0xd9, 0x7c, 0xfc, 0x51, 0xab, 0xa4, 0x99, 0xa1, 0xb9, 0xb6, 0xa8, 0xe5,
+ 0xda, 0x9e, 0x9f, 0xb5, 0x29, 0xaf, 0xbc, 0xeb, 0xb5, 0x67, 0xed, 0xb1, 0xf9, 0x27, 0x89, 0xd2,
+ 0xe8, 0x67, 0x9a, 0xd2, 0xf8, 0x67, 0xe3, 0xed, 0x34, 0xbb, 0x4b, 0x52, 0x47, 0x64, 0x37, 0xa7,
+ 0xab, 0x39, 0x33, 0xda, 0x00, 0x49, 0xb0, 0x41, 0x80, 0xc5, 0x9c, 0x12, 0x20, 0x9b, 0x43, 0x92,
+ 0x01, 0x02, 0x64, 0x17, 0xc8, 0x21, 0x97, 0x2c, 0x82, 0x24, 0x87, 0xdc, 0x72, 0xf1, 0x21, 0x01,
+ 0xf6, 0x68, 0xe4, 0xc0, 0x4d, 0xe4, 0x4b, 0x8e, 0x41, 0x02, 0x04, 0x8b, 0xb9, 0x24, 0xa8, 0x9f,
+ 0x6e, 0x36, 0xa9, 0x3f, 0x6a, 0x64, 0x6f, 0x2e, 0x52, 0xd7, 0xab, 0x57, 0x5f, 0x55, 0xbd, 0x7a,
+ 0xf5, 0xde, 0xab, 0x57, 0x45, 0xb8, 0xfa, 0x5b, 0x4e, 0x8b, 0x2c, 0xd0, 0x3f, 0xdd, 0x16, 0xfb,
+ 0x57, 0xea, 0xba, 0x8e, 0xe7, 0xa0, 0x17, 0x0c, 0xc7, 0x38, 0x70, 0x1d, 0xdd, 0xd8, 0x2f, 0x91,
+ 0x07, 0xed, 0x12, 0xab, 0xe1, 0x5c, 0xf9, 0x2b, 0xd8, 0x75, 0x1d, 0x97, 0xf2, 0xf3, 0x0f, 0xde,
+ 0x22, 0x3f, 0xbb, 0xe7, 0xec, 0x39, 0xec, 0x73, 0x81, 0x7e, 0x09, 0x2a, 0x62, 0x18, 0xdd, 0xd6,
+ 0x82, 0xa9, 0x7b, 0xba, 0xa0, 0xe5, 0x7c, 0x9a, 0xe5, 0xbc, 0xbe, 0xeb, 0xb8, 0x1d, 0xdd, 0xf3,
+ 0x31, 0xbe, 0x41, 0x1e, 0xb4, 0x17, 0x0c, 0xdd, 0xd3, 0xdb, 0xce, 0xde, 0x82, 0x89, 0x89, 0xd1,
+ 0x6d, 0x2d, 0x10, 0xcf, 0xed, 0x19, 0x5e, 0xcf, 0xc5, 0xa6, 0x60, 0x2a, 0x9c, 0xc0, 0xe4, 0x61,
+ 0x5b, 0xb7, 0x3d, 0x1f, 0xbf, 0xe7, 0x59, 0xed, 0x85, 0xfd, 0xb6, 0xb1, 0xe0, 0x59, 0x1d, 0x4c,
+ 0x3c, 0xbd, 0xd3, 0x15, 0x35, 0x5f, 0xa7, 0x4d, 0x89, 0xb1, 0x8f, 0x3b, 0xba, 0xb1, 0xaf, 0xdb,
+ 0x7b, 0xd8, 0x5d, 0xe0, 0x7d, 0x18, 0xdd, 0x96, 0x60, 0x79, 0xd9, 0x68, 0xf7, 0x88, 0x87, 0xdd,
+ 0x87, 0xd8, 0x25, 0x96, 0x63, 0x2f, 0x88, 0xa2, 0x26, 0xca, 0x9c, 0xab, 0xf8, 0xbb, 0x30, 0x79,
+ 0x17, 0xeb, 0x04, 0xa3, 0x4f, 0x20, 0x61, 0x3b, 0x26, 0xd6, 0x2c, 0x33, 0x27, 0xcd, 0x4b, 0x37,
+ 0x32, 0x95, 0xf2, 0x51, 0xbf, 0x10, 0xdf, 0x70, 0x4c, 0xdc, 0xa8, 0x3d, 0xeb, 0x17, 0xde, 0xdc,
+ 0xb3, 0xbc, 0xfd, 0x5e, 0xab, 0x64, 0x38, 0x9d, 0x85, 0x40, 0xa2, 0x66, 0x6b, 0xf0, 0xbd, 0xd0,
+ 0x3d, 0xd8, 0x5b, 0x10, 0xf2, 0x28, 0xf1, 0x66, 0x6a, 0x9c, 0x22, 0x36, 0x4c, 0x34, 0x0b, 0x93,
+ 0xb8, 0xeb, 0x18, 0xfb, 0xb9, 0xc8, 0xbc, 0x74, 0x23, 0xaa, 0xf2, 0xc2, 0xed, 0xd8, 0x7f, 0xfc,
+ 0x45, 0x41, 0x2a, 0xfe, 0x2c, 0x02, 0xd7, 0x2a, 0xba, 0x71, 0xd0, 0xeb, 0xd6, 0x6d, 0xc3, 0x3d,
+ 0xec, 0x7a, 0x96, 0x63, 0x6f, 0xb2, 0xbf, 0x04, 0x29, 0x10, 0x3d, 0xc0, 0x87, 0x6c, 0x3c, 0x69,
+ 0x95, 0x7e, 0xa2, 0xf7, 0x20, 0xd6, 0x71, 0x4c, 0xcc, 0x80, 0xb2, 0x8b, 0x37, 0x4b, 0xa7, 0x2e,
+ 0x6e, 0x69, 0x80, 0xb6, 0xee, 0x98, 0x58, 0x65, 0xcd, 0x50, 0x0b, 0xe4, 0x83, 0x0e, 0xd1, 0x2c,
+ 0x7b, 0xd7, 0xc9, 0x45, 0xe7, 0xa5, 0x1b, 0xa9, 0xc5, 0xdb, 0x67, 0x40, 0x9c, 0x32, 0xac, 0xd2,
+ 0xda, 0x7a, 0xb3, 0x61, 0xef, 0x3a, 0x95, 0xd4, 0x51, 0xbf, 0x90, 0x10, 0x05, 0x35, 0x71, 0xd0,
+ 0x21, 0xf4, 0x23, 0xbf, 0x09, 0x3e, 0x8d, 0x8e, 0xbf, 0xe7, 0x5a, 0x6c, 0xfc, 0x49, 0x95, 0x7e,
+ 0xa2, 0xd7, 0x00, 0x61, 0x8e, 0x87, 0x4d, 0x8d, 0x6a, 0x92, 0x46, 0x27, 0x18, 0x61, 0x13, 0x54,
+ 0x82, 0x9a, 0x9a, 0xee, 0xe9, 0x6b, 0xf8, 0x90, 0x4b, 0x48, 0xc8, 0xe9, 0xf7, 0xa2, 0x90, 0x1d,
+ 0x0c, 0x85, 0xc1, 0xaf, 0x40, 0x9c, 0xa9, 0x00, 0x66, 0x3d, 0x64, 0x17, 0xdf, 0x18, 0x4b, 0x1c,
+ 0xb4, 0x69, 0xa9, 0xc9, 0xda, 0xa9, 0xa2, 0x3d, 0x42, 0x10, 0x23, 0x7a, 0xdb, 0x13, 0x03, 0x61,
+ 0xdf, 0xe8, 0xcf, 0x24, 0x98, 0x1f, 0x1d, 0x51, 0xe5, 0x70, 0x6d, 0xbd, 0xb9, 0xae, 0x53, 0x3d,
+ 0x5a, 0xc3, 0x87, 0x8d, 0x5a, 0x2e, 0x3a, 0x1f, 0xbd, 0x91, 0x5a, 0xdc, 0x1c, 0xbf, 0xe3, 0xfa,
+ 0x39, 0x88, 0x75, 0xdb, 0x73, 0x0f, 0xd5, 0x73, 0x3b, 0xce, 0x37, 0xe1, 0x95, 0xb1, 0xa0, 0xc2,
+ 0x3a, 0x94, 0xe4, 0x3a, 0x34, 0x0b, 0x93, 0x0f, 0xf5, 0x76, 0x0f, 0x8b, 0xd9, 0xf2, 0xc2, 0xed,
+ 0xc8, 0xdb, 0x52, 0xf1, 0x1a, 0xc4, 0xb9, 0x60, 0x50, 0x06, 0x92, 0xe5, 0x7a, 0x73, 0xf1, 0x3b,
+ 0x6f, 0x2d, 0x57, 0xd7, 0x95, 0x09, 0xb1, 0x04, 0xff, 0x2b, 0xc1, 0xd5, 0xa6, 0xe7, 0x62, 0xbd,
+ 0xd3, 0xb0, 0xf7, 0x30, 0xa1, 0x73, 0xaa, 0x61, 0x4f, 0xb7, 0xda, 0x04, 0xd9, 0x90, 0x25, 0xac,
+ 0x46, 0xd3, 0x4d, 0xd3, 0xc5, 0x84, 0xf0, 0x0e, 0x2b, 0xcb, 0xcf, 0xfa, 0x85, 0xea, 0x58, 0x5b,
+ 0xc7, 0x30, 0xda, 0x0b, 0x1c, 0xc2, 0xb2, 0xf7, 0x0c, 0xa3, 0x5d, 0xe2, 0x3d, 0x95, 0x39, 0x9c,
+ 0x9a, 0x21, 0xe1, 0x22, 0xfa, 0x16, 0xc4, 0x48, 0x57, 0xb7, 0xd9, 0x14, 0x52, 0x8b, 0xd7, 0x42,
+ 0xf2, 0xf7, 0xb7, 0x60, 0xb3, 0xab, 0xdb, 0x95, 0xd8, 0x67, 0xfd, 0xc2, 0x84, 0xca, 0x58, 0x51,
+ 0x05, 0x80, 0x78, 0xba, 0xeb, 0x69, 0xd4, 0x96, 0x08, 0xed, 0x7f, 0x29, 0xd4, 0x90, 0xda, 0x9a,
+ 0xd2, 0x7e, 0xdb, 0x28, 0x6d, 0xfb, 0xb6, 0x46, 0x34, 0x4f, 0xb2, 0x66, 0x94, 0x5a, 0xd4, 0xe1,
+ 0xda, 0x88, 0x00, 0xb6, 0x5c, 0x67, 0x8f, 0x8d, 0x68, 0x09, 0xd2, 0x46, 0xcf, 0x73, 0x1e, 0x62,
+ 0x97, 0x77, 0x20, 0x8d, 0xdf, 0x41, 0x4a, 0x34, 0x64, 0x5d, 0xfc, 0x63, 0x1c, 0x32, 0x7c, 0xe3,
+ 0xf9, 0xb2, 0x1d, 0x1e, 0xb8, 0xf4, 0x3c, 0x03, 0x47, 0x77, 0x40, 0xc6, 0xb6, 0xc9, 0x11, 0x22,
+ 0xe3, 0x23, 0x24, 0xb0, 0x6d, 0xb2, 0xf6, 0x2f, 0xf0, 0x9d, 0x1c, 0x65, 0x8b, 0x9a, 0x38, 0xea,
+ 0x17, 0xa2, 0x3b, 0x6a, 0x83, 0x6f, 0xe9, 0x77, 0x21, 0x6f, 0xe2, 0xae, 0x8b, 0x0d, 0x9d, 0xee,
+ 0xe9, 0x16, 0x1b, 0xba, 0xd6, 0xd1, 0x6d, 0x6b, 0x17, 0x13, 0x2f, 0x17, 0x63, 0x3a, 0x96, 0x1b,
+ 0x70, 0xf0, 0xb9, 0xad, 0x8b, 0x7a, 0xf4, 0xfb, 0x12, 0xcc, 0xf4, 0x5c, 0x8b, 0x68, 0xad, 0x43,
+ 0xad, 0xed, 0x18, 0x7a, 0xdb, 0xf2, 0x0e, 0xb5, 0x83, 0x87, 0xb9, 0x49, 0xb6, 0xb1, 0xee, 0x9c,
+ 0x6b, 0x9d, 0x84, 0x90, 0x4a, 0x3b, 0xae, 0x45, 0x2a, 0x87, 0x77, 0x05, 0xc2, 0xda, 0x43, 0xa6,
+ 0xfc, 0x95, 0xd9, 0xa3, 0x7e, 0x41, 0xd9, 0x51, 0x1b, 0xe1, 0xaa, 0xfb, 0xaa, 0xd2, 0x1b, 0x61,
+ 0x46, 0x7a, 0x60, 0x96, 0x2c, 0xc7, 0xd6, 0x1c, 0x6e, 0xe7, 0x72, 0x71, 0x26, 0xa8, 0xc5, 0x8b,
+ 0x5b, 0x48, 0x75, 0x1a, 0x1f, 0xb3, 0xe5, 0x7f, 0x24, 0x41, 0x9e, 0xba, 0x1c, 0x6c, 0x50, 0x31,
+ 0x05, 0xfe, 0x4c, 0x73, 0xb1, 0xe1, 0xb8, 0x66, 0x2e, 0x41, 0xe5, 0x54, 0x69, 0xfe, 0xeb, 0xb8,
+ 0x9e, 0x86, 0x79, 0xc6, 0x5e, 0xcf, 0x32, 0x4b, 0x3b, 0x3b, 0x8d, 0xda, 0x51, 0xbf, 0x90, 0xdb,
+ 0xf2, 0xc1, 0x83, 0x45, 0x54, 0x19, 0xb4, 0x9a, 0xeb, 0x9e, 0x52, 0x83, 0xde, 0x86, 0xac, 0xe1,
+ 0xb4, 0xdb, 0xd8, 0x60, 0xd3, 0xde, 0x51, 0x1b, 0x39, 0x99, 0x2d, 0xf0, 0xf4, 0x51, 0xbf, 0x90,
+ 0xa9, 0x06, 0x35, 0x74, 0xa9, 0x33, 0x46, 0xb8, 0x88, 0x54, 0x98, 0x0a, 0x09, 0x8c, 0xf9, 0x93,
+ 0x24, 0x93, 0xd6, 0xcd, 0xb1, 0x4d, 0xa1, 0x9a, 0xc5, 0x43, 0xe5, 0x7c, 0x15, 0xae, 0x9c, 0xb8,
+ 0x8a, 0xe7, 0x99, 0xb0, 0x64, 0xd8, 0x84, 0x29, 0x90, 0xe5, 0x8b, 0xe2, 0x6f, 0xcc, 0xe2, 0xff,
+ 0x4c, 0x41, 0x56, 0xc5, 0xc4, 0x73, 0x5c, 0xec, 0xef, 0xa8, 0x9f, 0x4b, 0x30, 0x43, 0xe3, 0x0d,
+ 0xd7, 0xea, 0x7a, 0x8e, 0xab, 0xb9, 0xf8, 0x91, 0x6b, 0x79, 0x98, 0xe4, 0x22, 0x4c, 0xe9, 0xca,
+ 0x67, 0x4c, 0x61, 0x18, 0xa8, 0x54, 0x0b, 0x40, 0x54, 0x81, 0xc1, 0xf5, 0xee, 0xce, 0x8f, 0x7e,
+ 0x59, 0xb8, 0x3d, 0xd6, 0x3a, 0x1e, 0x0f, 0x81, 0x4a, 0x8d, 0x9a, 0x8a, 0xcc, 0x63, 0xc0, 0xe8,
+ 0x45, 0x88, 0x51, 0xbd, 0x65, 0x2e, 0x27, 0x59, 0x91, 0x8f, 0xfa, 0x85, 0x18, 0xd5, 0x6c, 0x95,
+ 0x51, 0x87, 0x36, 0x78, 0xec, 0x39, 0x36, 0xf8, 0x32, 0xa4, 0x3c, 0xbd, 0xd5, 0xc6, 0x1a, 0xed,
+ 0x99, 0x88, 0xed, 0xf7, 0xea, 0x88, 0x24, 0xc8, 0x83, 0x76, 0x4b, 0x27, 0xb8, 0xb4, 0x4d, 0x39,
+ 0x43, 0x73, 0x07, 0xcf, 0x27, 0x10, 0xb4, 0x00, 0x29, 0x6a, 0xcb, 0x5c, 0xcb, 0xc4, 0x9a, 0xd9,
+ 0x62, 0x7b, 0x28, 0x59, 0xc9, 0x1e, 0xf5, 0x0b, 0xb0, 0x29, 0xc8, 0xb5, 0x8a, 0x0a, 0x3e, 0x4b,
+ 0xad, 0x85, 0x3c, 0x98, 0x15, 0x46, 0x23, 0xd8, 0xff, 0x4c, 0x9f, 0x12, 0x6c, 0x08, 0xef, 0x8e,
+ 0xbf, 0x18, 0x7c, 0xdd, 0x7d, 0xe5, 0x61, 0x11, 0x0a, 0x9f, 0x24, 0x6a, 0x1d, 0xab, 0x41, 0xdf,
+ 0x84, 0xe9, 0xae, 0x8b, 0xbb, 0xba, 0x8b, 0x35, 0xc3, 0xe9, 0x74, 0xdb, 0xd8, 0xc3, 0x26, 0xd3,
+ 0x7e, 0x59, 0x55, 0x44, 0x45, 0xd5, 0xa7, 0xa3, 0x57, 0xa8, 0x77, 0xd3, 0x3d, 0x1a, 0x38, 0x11,
+ 0xec, 0x52, 0xce, 0x24, 0xe3, 0xcc, 0x30, 0x6a, 0x43, 0x10, 0xd1, 0x9b, 0x70, 0x65, 0xb0, 0x6e,
+ 0x44, 0xeb, 0xf6, 0x5a, 0x6d, 0x8b, 0xec, 0x63, 0x33, 0x07, 0x8c, 0x7b, 0x36, 0x54, 0xb9, 0xe5,
+ 0xd7, 0xa1, 0xc3, 0x21, 0x55, 0x34, 0xa8, 0x60, 0xf4, 0x3d, 0x9c, 0x4b, 0xcd, 0x4b, 0x37, 0x26,
+ 0x2b, 0x2b, 0xcf, 0xfa, 0x85, 0xda, 0xd8, 0x7a, 0x44, 0x70, 0x67, 0xc1, 0x73, 0x31, 0x0e, 0xa9,
+ 0x65, 0x55, 0xe0, 0x85, 0x35, 0xca, 0xa7, 0x21, 0x15, 0x60, 0xb0, 0x05, 0x73, 0xe9, 0xe7, 0xb6,
+ 0x76, 0x21, 0x14, 0x54, 0x86, 0x04, 0x0f, 0xe1, 0x49, 0x2e, 0xc3, 0x16, 0xf0, 0xeb, 0xa7, 0xe9,
+ 0x10, 0xe3, 0x0a, 0xad, 0x92, 0xdf, 0x0e, 0xd5, 0x00, 0xbc, 0xc3, 0xae, 0xaf, 0x89, 0x59, 0x86,
+ 0xf2, 0xca, 0x69, 0x28, 0x87, 0xdd, 0xb0, 0x22, 0x26, 0x3d, 0x51, 0x26, 0x68, 0x15, 0xd2, 0xfc,
+ 0x7c, 0x20, 0x70, 0xa6, 0x18, 0xce, 0xf5, 0x53, 0x70, 0x58, 0xd8, 0xa3, 0x87, 0x90, 0x52, 0x24,
+ 0xa0, 0x10, 0xb4, 0x05, 0x59, 0x1a, 0xab, 0x52, 0x4e, 0x81, 0xa6, 0x30, 0xb4, 0x9b, 0xa7, 0xa0,
+ 0xd5, 0x04, 0x73, 0x08, 0x2f, 0x63, 0x86, 0x68, 0x04, 0x1d, 0xc2, 0x55, 0x72, 0x48, 0x3c, 0xdc,
+ 0xd1, 0xd8, 0xd6, 0x21, 0x9a, 0xcb, 0x75, 0xd9, 0xcc, 0x4d, 0x33, 0xe4, 0xea, 0xf8, 0x6a, 0xdf,
+ 0x64, 0x38, 0x6c, 0x4b, 0x12, 0x51, 0x65, 0xf2, 0x28, 0x72, 0x96, 0x9c, 0x50, 0x95, 0xff, 0x6f,
+ 0x09, 0xa6, 0x8f, 0xd9, 0x2d, 0xb4, 0x0d, 0x91, 0xe0, 0xe4, 0x43, 0xdd, 0x49, 0x84, 0x9d, 0x7a,
+ 0x2e, 0x63, 0xc3, 0x22, 0x96, 0x89, 0xf6, 0x20, 0x49, 0x77, 0x92, 0xed, 0xd1, 0x63, 0x55, 0x84,
+ 0x81, 0xaf, 0x1e, 0xf5, 0x0b, 0xf2, 0x16, 0x23, 0x5e, 0xba, 0x0b, 0x99, 0x83, 0x37, 0x4c, 0x54,
+ 0x80, 0x94, 0xe7, 0x68, 0xf8, 0xb1, 0x45, 0x3c, 0xcb, 0xde, 0x63, 0x71, 0x8a, 0xac, 0x82, 0xe7,
+ 0xd4, 0x05, 0x25, 0xff, 0xe7, 0x11, 0x40, 0xc7, 0x0d, 0x04, 0xfa, 0x07, 0x09, 0x5e, 0xf4, 0xc3,
+ 0x0f, 0xc7, 0xb5, 0xf6, 0x2c, 0x5b, 0x6f, 0x0f, 0xc5, 0x21, 0x12, 0x5b, 0x8e, 0x4f, 0x2e, 0x63,
+ 0x85, 0x44, 0x6c, 0xb2, 0x29, 0xe0, 0x47, 0x63, 0x94, 0x17, 0xa9, 0xf3, 0xe6, 0x31, 0xca, 0x31,
+ 0x96, 0xfb, 0x6a, 0xae, 0x77, 0x4a, 0xe3, 0xfc, 0x1a, 0xbc, 0x74, 0x26, 0xf0, 0x45, 0xdc, 0x66,
+ 0xfe, 0x47, 0x12, 0x5c, 0x3b, 0xc5, 0x99, 0x85, 0x71, 0x32, 0x1c, 0xe7, 0x5e, 0x18, 0x27, 0xb5,
+ 0xf8, 0xbd, 0x4b, 0x38, 0xcc, 0xf0, 0x20, 0x96, 0xe1, 0x85, 0x53, 0x95, 0xf9, 0xbc, 0xd9, 0xc8,
+ 0x21, 0xa0, 0xd5, 0x98, 0x2c, 0x29, 0x91, 0xe2, 0x1b, 0x30, 0x25, 0x20, 0x82, 0x20, 0xfd, 0x25,
+ 0x80, 0x7d, 0x6b, 0x6f, 0x5f, 0x7b, 0xa4, 0x7b, 0xd8, 0x15, 0xe7, 0xea, 0x24, 0xa5, 0x7c, 0x48,
+ 0x09, 0xc5, 0x3f, 0x05, 0xc8, 0x34, 0x3a, 0x5d, 0xc7, 0xf5, 0xfc, 0x48, 0xe1, 0x2e, 0xc4, 0xf9,
+ 0x06, 0x15, 0x8a, 0x50, 0x3a, 0x63, 0xaa, 0x43, 0x2d, 0xb9, 0x8f, 0x14, 0xa6, 0x4d, 0x60, 0x04,
+ 0x2e, 0x3c, 0x72, 0xa2, 0x0b, 0x7f, 0x0f, 0xe2, 0x3c, 0x89, 0x22, 0x0e, 0x27, 0x85, 0x13, 0x4e,
+ 0x35, 0x8d, 0xcd, 0x25, 0xab, 0x8d, 0x97, 0x18, 0x9b, 0x0f, 0xce, 0x1b, 0xa1, 0x57, 0x41, 0x26,
+ 0xc4, 0xd3, 0x88, 0xf5, 0x43, 0x1e, 0x01, 0x44, 0xf9, 0xf9, 0xbc, 0xd9, 0xdc, 0x6e, 0x5a, 0x3f,
+ 0xc4, 0x6a, 0x82, 0x10, 0x8f, 0x7e, 0xa0, 0x3c, 0xc8, 0x8f, 0xf4, 0x76, 0x9b, 0x45, 0x0a, 0x93,
+ 0x2c, 0x1f, 0x11, 0x94, 0x87, 0xf7, 0x6b, 0xfc, 0xab, 0xdd, 0xaf, 0xc2, 0xe9, 0x77, 0x75, 0x6f,
+ 0x9f, 0x45, 0xbf, 0x49, 0x15, 0x38, 0x69, 0x4b, 0xf7, 0xf6, 0x51, 0x0e, 0x12, 0x44, 0xa7, 0xfe,
+ 0x97, 0xe4, 0xe4, 0xf9, 0xe8, 0x8d, 0xb4, 0xea, 0x17, 0xd1, 0x1c, 0xb0, 0xe8, 0x81, 0x17, 0x99,
+ 0x23, 0x8e, 0xaa, 0x21, 0x0a, 0x93, 0xc3, 0x81, 0xd5, 0xd5, 0x76, 0x0f, 0x08, 0x77, 0xbc, 0x42,
+ 0x0e, 0x07, 0x56, 0x77, 0x69, 0x8d, 0xa8, 0x09, 0x5a, 0xb9, 0x74, 0x40, 0xd0, 0x75, 0x98, 0xb2,
+ 0xd8, 0x29, 0x4e, 0x33, 0x2d, 0x17, 0x1b, 0x5e, 0xfb, 0x90, 0x39, 0x5d, 0x59, 0xcd, 0x72, 0x72,
+ 0x4d, 0x50, 0xd1, 0x4d, 0x50, 0x46, 0x43, 0x05, 0xe6, 0x2c, 0x65, 0x75, 0x6a, 0x24, 0x52, 0xa0,
+ 0xac, 0xc2, 0x9e, 0x0f, 0x9c, 0x7f, 0x86, 0xb3, 0x72, 0xfa, 0xc0, 0xef, 0x97, 0x60, 0xa6, 0xab,
+ 0xbb, 0x04, 0x6b, 0xad, 0x9e, 0x6d, 0xb6, 0xb1, 0xc6, 0xfd, 0x4d, 0x2e, 0xcb, 0xb8, 0xa7, 0x59,
+ 0x55, 0x85, 0xd5, 0x70, 0xd7, 0x74, 0xde, 0xf9, 0xe1, 0xea, 0xff, 0xc7, 0xf9, 0x61, 0x13, 0x12,
+ 0x7c, 0xd8, 0x24, 0x77, 0x8d, 0x6d, 0x8f, 0x85, 0xb1, 0xb7, 0x07, 0x9f, 0x95, 0xef, 0xfa, 0x05,
+ 0x0a, 0x8d, 0xca, 0xc4, 0x67, 0x48, 0x80, 0x39, 0x1e, 0x95, 0x89, 0x8a, 0x40, 0x82, 0xf9, 0x9f,
+ 0x45, 0x60, 0x92, 0xed, 0x32, 0x74, 0x1b, 0x62, 0x54, 0xc9, 0xc4, 0xd9, 0x78, 0xdc, 0xa8, 0x95,
+ 0xb5, 0x41, 0x08, 0x62, 0xb6, 0xde, 0xc1, 0x39, 0xc4, 0x54, 0x90, 0x7d, 0xa3, 0x6b, 0x90, 0x20,
+ 0xf8, 0x81, 0xf6, 0x50, 0x6f, 0xe7, 0x66, 0x98, 0x7e, 0xc5, 0x09, 0x7e, 0x70, 0x5f, 0x6f, 0xa3,
+ 0x2b, 0x10, 0xb7, 0x88, 0x66, 0xe3, 0x47, 0xb9, 0x59, 0x6e, 0x73, 0x2c, 0xb2, 0x81, 0x1f, 0x31,
+ 0xef, 0xa3, 0xbb, 0x7b, 0xd8, 0xd3, 0x0c, 0xa7, 0x4d, 0x72, 0x57, 0xe8, 0xf6, 0xa6, 0x41, 0x31,
+ 0x25, 0x55, 0x9d, 0x36, 0x41, 0x5f, 0x83, 0xe4, 0x23, 0x9d, 0x68, 0xb8, 0xd3, 0xf5, 0x0e, 0xd9,
+ 0x52, 0xc9, 0x74, 0xd3, 0x91, 0x3a, 0x2d, 0xaf, 0xc6, 0xe4, 0x88, 0x12, 0x5d, 0x8d, 0xc9, 0x51,
+ 0x25, 0xb6, 0x1a, 0x93, 0x63, 0xca, 0xe4, 0x6a, 0x4c, 0x9e, 0x54, 0xe2, 0xab, 0x31, 0x39, 0xae,
+ 0x24, 0x56, 0x63, 0x72, 0x42, 0x91, 0x57, 0x63, 0xb2, 0xac, 0x24, 0x57, 0x63, 0x72, 0x52, 0x81,
+ 0xd5, 0x98, 0x0c, 0x4a, 0x6a, 0x35, 0x26, 0xa7, 0x94, 0xf4, 0x6a, 0x4c, 0x4e, 0x2b, 0x99, 0xd5,
+ 0x98, 0x9c, 0x51, 0xb2, 0xab, 0x31, 0x39, 0xab, 0x4c, 0xad, 0xc6, 0xe4, 0x29, 0x45, 0x59, 0x8d,
+ 0xc9, 0x8a, 0x32, 0xbd, 0x1a, 0x93, 0xa7, 0x15, 0x94, 0xaf, 0x8b, 0xac, 0x8e, 0x8e, 0xbe, 0x37,
+ 0x24, 0xa7, 0xb1, 0x63, 0x21, 0xd6, 0xa8, 0xf8, 0x73, 0x09, 0x94, 0x26, 0x7e, 0xd0, 0xc3, 0xb6,
+ 0x81, 0xef, 0xeb, 0xed, 0xea, 0x7e, 0xcf, 0x3e, 0x40, 0xaf, 0xc2, 0x94, 0x41, 0x3f, 0x34, 0x9e,
+ 0xa1, 0xa0, 0x12, 0x93, 0x98, 0xc4, 0x32, 0x8c, 0xdc, 0xa4, 0x54, 0x2a, 0xb8, 0x97, 0x00, 0x04,
+ 0x1f, 0x35, 0x4f, 0x3c, 0x0d, 0x9a, 0xe4, 0x2c, 0xd4, 0x26, 0x8d, 0xc0, 0xb8, 0xce, 0x23, 0x66,
+ 0x03, 0x87, 0x60, 0x54, 0xe7, 0x11, 0x5a, 0x80, 0x59, 0x1b, 0x3f, 0xf6, 0xb4, 0x51, 0x66, 0x66,
+ 0xef, 0xd4, 0x69, 0x5a, 0x57, 0x0d, 0x37, 0x28, 0xfe, 0x4b, 0x04, 0xa6, 0xfc, 0x41, 0xfb, 0x36,
+ 0x7d, 0x17, 0x14, 0xba, 0xba, 0x96, 0xa9, 0x79, 0x0e, 0x47, 0xf2, 0xad, 0xfb, 0x7b, 0x67, 0xa8,
+ 0xef, 0x08, 0x0a, 0x2d, 0x37, 0xcc, 0x6d, 0x87, 0x75, 0xc7, 0x1d, 0xa5, 0x9a, 0x21, 0x61, 0x5a,
+ 0x7e, 0x07, 0xb2, 0x7e, 0x23, 0x4e, 0x41, 0x55, 0x88, 0x0f, 0xf5, 0xf7, 0xcd, 0x31, 0xfa, 0xf3,
+ 0x45, 0xad, 0x8a, 0xa6, 0xf9, 0xdf, 0x06, 0x74, 0xbc, 0xef, 0xb0, 0x7b, 0x9c, 0xe4, 0xee, 0x71,
+ 0x73, 0xd8, 0x49, 0xbf, 0x73, 0xb1, 0xb9, 0x85, 0x86, 0x1d, 0x3e, 0x5e, 0xff, 0x53, 0x04, 0xb2,
+ 0x7c, 0x23, 0x07, 0x3e, 0x95, 0xee, 0x59, 0x6a, 0x79, 0x2d, 0x7b, 0x4f, 0xeb, 0x0a, 0x22, 0x9b,
+ 0x5f, 0x44, 0x55, 0xfc, 0x8a, 0x80, 0xf9, 0x1b, 0x90, 0x71, 0xb1, 0x6e, 0x0e, 0x18, 0x23, 0x8c,
+ 0x31, 0x4d, 0x89, 0x01, 0xd3, 0x2b, 0x90, 0x65, 0xb1, 0xc1, 0x80, 0x2b, 0xca, 0xb8, 0x32, 0x8c,
+ 0x1a, 0xb0, 0x55, 0x20, 0x43, 0xba, 0xba, 0x3d, 0xe0, 0x8a, 0x31, 0xa1, 0x9e, 0x93, 0x0c, 0x4c,
+ 0xd3, 0x36, 0xe1, 0x80, 0xc0, 0xc5, 0xa4, 0xd7, 0xc1, 0x5a, 0xd7, 0xe1, 0xa7, 0xde, 0xa8, 0x9a,
+ 0xe4, 0x94, 0x2d, 0x87, 0xa0, 0x1d, 0xa6, 0x2a, 0x4c, 0x16, 0x9a, 0xc9, 0x85, 0x93, 0x8b, 0xb3,
+ 0x5e, 0x6e, 0x8d, 0x2f, 0x4e, 0x75, 0x8a, 0x0c, 0x13, 0x8a, 0x7f, 0x23, 0xc1, 0x35, 0x7a, 0x72,
+ 0xe1, 0x3b, 0xad, 0xca, 0x2e, 0x30, 0x7c, 0xed, 0xd4, 0x21, 0xc1, 0x4e, 0x3f, 0x41, 0x34, 0xbe,
+ 0x72, 0xd4, 0x2f, 0xc4, 0x29, 0xf7, 0xa5, 0xdd, 0x6f, 0x9c, 0x02, 0x37, 0xd8, 0x39, 0xd5, 0x73,
+ 0x75, 0x9b, 0x58, 0xf4, 0xc4, 0x46, 0x97, 0xad, 0x83, 0x3b, 0x2d, 0xec, 0xf2, 0xc5, 0x48, 0xab,
+ 0xb3, 0x43, 0x95, 0xeb, 0xbc, 0xae, 0x98, 0x87, 0xdc, 0xe8, 0x90, 0x83, 0x14, 0xcb, 0x6f, 0xc0,
+ 0xd5, 0x0d, 0xfc, 0xe8, 0xa4, 0xd9, 0x54, 0x20, 0xc1, 0xcd, 0xa0, 0xaf, 0xf2, 0x37, 0x46, 0x8d,
+ 0x4e, 0xf8, 0x0e, 0xa7, 0xc4, 0x46, 0xba, 0xcd, 0x1a, 0xa8, 0x7e, 0xc3, 0xe2, 0x27, 0x70, 0x6d,
+ 0x04, 0x3d, 0x58, 0xbe, 0xf7, 0x21, 0x4e, 0x8f, 0xe0, 0x22, 0x3c, 0xcb, 0x1e, 0x37, 0x69, 0xc7,
+ 0xd1, 0x9b, 0x94, 0x5f, 0x15, 0xcd, 0x8a, 0x2a, 0xcb, 0x0d, 0xf5, 0x3a, 0x98, 0x6a, 0xc8, 0x5d,
+ 0x8b, 0x78, 0xe8, 0x03, 0x48, 0x0b, 0x8d, 0xa0, 0x8a, 0xe2, 0x0f, 0xfb, 0x1c, 0xa5, 0x4a, 0xb9,
+ 0x01, 0x08, 0x29, 0xfe, 0xad, 0x04, 0x33, 0x35, 0xd7, 0xe9, 0x76, 0xb1, 0x29, 0x5c, 0x0e, 0x97,
+ 0x85, 0xef, 0x69, 0xa4, 0x90, 0xa7, 0xd9, 0x80, 0x48, 0xa3, 0x26, 0x4e, 0x46, 0x77, 0x2e, 0x7b,
+ 0xe0, 0x6a, 0xd4, 0xd0, 0x3b, 0x5c, 0x20, 0x3d, 0xc2, 0xec, 0x67, 0xf6, 0xd8, 0xe9, 0x7b, 0x48,
+ 0x4d, 0x19, 0xa3, 0x2a, 0x1a, 0x14, 0x7f, 0x9a, 0x80, 0x2b, 0x61, 0x21, 0x2f, 0x57, 0xfd, 0x81,
+ 0x7f, 0x0a, 0x09, 0xcb, 0x36, 0xf1, 0x63, 0x3c, 0x96, 0x9d, 0x3c, 0x09, 0xa2, 0x24, 0xe4, 0xd1,
+ 0xa0, 0x30, 0xbe, 0xd3, 0x17, 0x98, 0xe8, 0xa3, 0x20, 0xc6, 0xe6, 0xf9, 0xb7, 0xdb, 0xcf, 0x8d,
+ 0x5e, 0x1b, 0x89, 0xb7, 0x87, 0xc2, 0x59, 0xe6, 0x50, 0xbe, 0xa2, 0x70, 0xb6, 0x09, 0xd3, 0x96,
+ 0xed, 0x61, 0xb7, 0x8d, 0xf5, 0x87, 0x34, 0x3a, 0xa3, 0xdd, 0x8b, 0x34, 0xdc, 0xb8, 0xd1, 0x88,
+ 0x12, 0x02, 0xe0, 0x51, 0xcd, 0xa7, 0x30, 0x13, 0x06, 0xf5, 0x97, 0xe0, 0xec, 0xd4, 0x1c, 0x93,
+ 0xf0, 0x00, 0xd6, 0xcf, 0x80, 0x85, 0x80, 0x1a, 0x42, 0xec, 0xf7, 0x21, 0xce, 0x33, 0x2e, 0x22,
+ 0xcf, 0x7d, 0xe7, 0x79, 0xc5, 0xce, 0x33, 0x39, 0xaa, 0x40, 0xcb, 0xff, 0x89, 0x04, 0xe9, 0xf0,
+ 0x72, 0x23, 0x0b, 0x64, 0x36, 0x76, 0xdf, 0xa4, 0x45, 0x2b, 0x1b, 0x34, 0x20, 0x67, 0x95, 0x6c,
+ 0x0d, 0xde, 0x7f, 0xee, 0x35, 0xe0, 0x10, 0x42, 0x95, 0x1a, 0x26, 0x8d, 0xb3, 0x4c, 0xd7, 0xe9,
+ 0x0e, 0xee, 0x39, 0xa2, 0xaa, 0x4c, 0x09, 0x34, 0x70, 0xcd, 0xff, 0x0e, 0x24, 0x03, 0x45, 0x09,
+ 0xe5, 0x3b, 0xa2, 0x5f, 0x62, 0xbe, 0xe3, 0xcc, 0xfe, 0x6b, 0x90, 0x19, 0x92, 0x18, 0xba, 0x1a,
+ 0x8c, 0x21, 0x56, 0x89, 0xf3, 0x31, 0x9c, 0x8b, 0x52, 0xfc, 0x65, 0x1c, 0x66, 0x4e, 0xb2, 0xb4,
+ 0x1f, 0x83, 0x12, 0xb2, 0x5b, 0x5a, 0xdb, 0x22, 0x9e, 0xd8, 0x4f, 0x37, 0xcf, 0x3e, 0x9e, 0x87,
+ 0x8c, 0x9f, 0xd0, 0x96, 0xac, 0x3b, 0x6c, 0x12, 0xbf, 0x0f, 0x59, 0x93, 0x0f, 0x5c, 0x64, 0xab,
+ 0xc4, 0xb5, 0xe7, 0x59, 0x87, 0xe1, 0x13, 0x0c, 0xa0, 0x40, 0xcf, 0x98, 0xa1, 0x2a, 0xc2, 0x2e,
+ 0x80, 0x7c, 0xf4, 0x20, 0xc9, 0x66, 0x99, 0x6c, 0xf7, 0x64, 0x2a, 0xcd, 0xa3, 0x7e, 0x61, 0x5a,
+ 0x60, 0xf9, 0x59, 0xb5, 0x4b, 0xaf, 0xd4, 0xb4, 0x39, 0x02, 0x68, 0x52, 0xaf, 0x4b, 0xeb, 0x69,
+ 0xc7, 0x93, 0x03, 0xaf, 0x4b, 0xf7, 0xd1, 0xe5, 0xbd, 0x2e, 0xfd, 0x6c, 0x98, 0xe8, 0x0f, 0x24,
+ 0x98, 0xe6, 0x29, 0xf6, 0x4e, 0xcf, 0xd3, 0xf9, 0xbd, 0x89, 0x7f, 0xc8, 0xfe, 0xf8, 0xa8, 0x5f,
+ 0x98, 0x62, 0x02, 0x59, 0x17, 0x75, 0xac, 0xdb, 0xca, 0xf3, 0x76, 0x3b, 0x40, 0x11, 0x07, 0xcf,
+ 0x80, 0x60, 0xa2, 0x35, 0xc8, 0xf2, 0x8c, 0x81, 0xff, 0x12, 0x82, 0x9d, 0xbe, 0x33, 0x95, 0x97,
+ 0x9f, 0xf5, 0x0b, 0xf3, 0x27, 0x68, 0x16, 0x4f, 0x36, 0xdc, 0xe7, 0xbc, 0x6a, 0x66, 0x37, 0x5c,
+ 0x44, 0x06, 0x64, 0x02, 0xd5, 0x38, 0xec, 0x8a, 0xc3, 0xfa, 0xe5, 0x5d, 0x59, 0xda, 0xd7, 0x11,
+ 0x8a, 0x89, 0xf6, 0x60, 0xca, 0xef, 0xc4, 0x3f, 0x6e, 0x26, 0xbf, 0x94, 0x6e, 0x7c, 0xb5, 0xe6,
+ 0xb3, 0x26, 0x22, 0x6f, 0x74, 0x15, 0x66, 0x4f, 0x8c, 0x72, 0x3e, 0x8f, 0xc3, 0xd5, 0x61, 0x43,
+ 0x18, 0xc4, 0x21, 0xda, 0xa8, 0x87, 0x7c, 0x7f, 0x6c, 0x63, 0xea, 0x63, 0x70, 0x63, 0xe6, 0x97,
+ 0x46, 0x7d, 0xe4, 0xa7, 0x23, 0x3e, 0xf2, 0x39, 0xf0, 0x99, 0x7a, 0x8d, 0xe0, 0xfb, 0x8e, 0xf2,
+ 0xa3, 0xc0, 0x17, 0xf0, 0xd4, 0xd3, 0x07, 0xcf, 0x01, 0xcf, 0xda, 0xfb, 0xc5, 0xc0, 0x1b, 0xfc,
+ 0xb3, 0x04, 0x99, 0xa1, 0x99, 0xfd, 0x3a, 0xdd, 0xc1, 0x56, 0x10, 0x0d, 0xf1, 0xf7, 0x32, 0x6f,
+ 0x5f, 0x7c, 0x5a, 0xc3, 0x41, 0x52, 0xfe, 0xef, 0x25, 0xc8, 0x0c, 0x09, 0xf2, 0x2b, 0x72, 0x24,
+ 0x5f, 0xfe, 0xc8, 0x5b, 0x90, 0x1d, 0x5e, 0xa2, 0x50, 0x1f, 0xd2, 0x97, 0xd3, 0x47, 0xf1, 0xbb,
+ 0x10, 0xe7, 0x14, 0x84, 0x20, 0xfb, 0x61, 0xb9, 0xb1, 0xdd, 0xd8, 0x58, 0xd6, 0x96, 0x36, 0x55,
+ 0x6d, 0xb9, 0xaa, 0x4c, 0xa0, 0x34, 0xc8, 0xb5, 0xfa, 0xdd, 0x3a, 0x25, 0x2a, 0x12, 0x4a, 0x41,
+ 0x82, 0x95, 0xea, 0x35, 0x25, 0x52, 0xac, 0x80, 0xc2, 0xb1, 0x77, 0x31, 0x75, 0x0c, 0x34, 0xee,
+ 0x47, 0x25, 0x98, 0x61, 0x41, 0x7a, 0x87, 0xc6, 0x6f, 0xd4, 0x15, 0x6a, 0xa1, 0xe8, 0x79, 0x3a,
+ 0xa8, 0xa2, 0x4e, 0x71, 0x43, 0xef, 0xe0, 0xe2, 0xdf, 0xc5, 0x60, 0x7a, 0x00, 0xe2, 0xbb, 0xc5,
+ 0x57, 0x41, 0x26, 0x96, 0x7d, 0xa0, 0x0d, 0x5e, 0x2f, 0xf0, 0x6c, 0xa0, 0x65, 0x1f, 0xec, 0xa8,
+ 0x0d, 0x35, 0x41, 0x2b, 0x77, 0x5c, 0x0b, 0xad, 0x42, 0xcc, 0xe9, 0x7a, 0xfe, 0x19, 0xf2, 0xad,
+ 0x33, 0x44, 0x71, 0xac, 0x8f, 0xd2, 0x66, 0xd7, 0x13, 0x19, 0x00, 0x86, 0x81, 0xfe, 0x5a, 0x1a,
+ 0x9c, 0x7a, 0xf8, 0x69, 0xf1, 0x9d, 0x0b, 0xe1, 0x71, 0x01, 0x88, 0xab, 0xe4, 0x0f, 0xe9, 0x46,
+ 0x7d, 0xd6, 0x2f, 0x4c, 0x8f, 0x0a, 0x88, 0x5c, 0xf2, 0x8e, 0xd9, 0x1f, 0x22, 0x5a, 0xe5, 0xb7,
+ 0x9b, 0x03, 0x41, 0x33, 0x87, 0x30, 0xe6, 0x05, 0x72, 0x66, 0x68, 0x21, 0xf2, 0x7b, 0x90, 0x0e,
+ 0x8f, 0xfe, 0x84, 0xbb, 0x83, 0xf2, 0x70, 0x5a, 0xe2, 0x9b, 0x63, 0x49, 0x46, 0x1c, 0x09, 0x43,
+ 0x77, 0x05, 0xdf, 0x85, 0x64, 0x20, 0xf6, 0x8b, 0xdc, 0x74, 0x70, 0x1b, 0x1f, 0xe4, 0xdc, 0x26,
+ 0x95, 0x78, 0xb1, 0x1f, 0x81, 0xb4, 0x8a, 0x89, 0xd3, 0x7e, 0x88, 0x4d, 0x1a, 0xf3, 0x04, 0x8f,
+ 0x8b, 0xa4, 0xf1, 0x1f, 0x17, 0x95, 0x21, 0x19, 0xa4, 0x64, 0x2f, 0xf2, 0xc0, 0x66, 0xd0, 0x0a,
+ 0xdd, 0x81, 0xaf, 0x85, 0xdf, 0xd1, 0x38, 0x3d, 0xdb, 0xd4, 0xdd, 0x43, 0xcd, 0xc5, 0xba, 0xb1,
+ 0x8f, 0x4d, 0x71, 0xa5, 0xf5, 0x42, 0xe8, 0x21, 0x8d, 0xe0, 0x50, 0x39, 0x03, 0xfa, 0x18, 0x32,
+ 0x41, 0x23, 0xea, 0x8b, 0x59, 0x04, 0x95, 0x5d, 0xfc, 0xf6, 0xd9, 0xd1, 0x5f, 0x30, 0xeb, 0x92,
+ 0x8f, 0x47, 0x7d, 0xae, 0x9a, 0x6e, 0x85, 0x4a, 0xc5, 0xf7, 0x20, 0x1d, 0xae, 0x45, 0x32, 0xc4,
+ 0x36, 0x36, 0x37, 0xea, 0x7c, 0x4f, 0x57, 0xca, 0xd5, 0xb5, 0xa5, 0xc6, 0xdd, 0xbb, 0x8a, 0x44,
+ 0xe9, 0xf5, 0x8f, 0x1a, 0xdb, 0x4a, 0x84, 0xee, 0x6e, 0xb5, 0xde, 0xdc, 0x2e, 0xab, 0xdb, 0x4a,
+ 0xb4, 0x88, 0x21, 0x13, 0xee, 0x89, 0xda, 0x4c, 0x1a, 0x62, 0x32, 0xc2, 0xd0, 0x29, 0xfb, 0xfa,
+ 0x98, 0x63, 0xf5, 0x75, 0xcf, 0x0d, 0xa3, 0x16, 0x7f, 0x1c, 0x01, 0x34, 0x50, 0x99, 0x90, 0x81,
+ 0x1e, 0xed, 0x2c, 0x72, 0xf9, 0xce, 0xd0, 0x4f, 0xce, 0x4e, 0xc7, 0x47, 0x59, 0x3a, 0x9e, 0xed,
+ 0xdd, 0x5f, 0x6b, 0x4a, 0x5e, 0x84, 0x30, 0xff, 0x15, 0x03, 0x54, 0x75, 0xb1, 0xee, 0x61, 0x6a,
+ 0x8f, 0xc9, 0x59, 0x19, 0x88, 0x0a, 0x4c, 0xf2, 0xe3, 0x6a, 0xe4, 0x22, 0xc7, 0x55, 0x21, 0x14,
+ 0xde, 0x14, 0xfd, 0x00, 0xd2, 0x86, 0xd3, 0xee, 0x75, 0x6c, 0x8d, 0x3d, 0x88, 0x10, 0xc7, 0x83,
+ 0xef, 0x9c, 0xb5, 0xb5, 0x8f, 0x0d, 0xae, 0x54, 0x75, 0xda, 0xb4, 0x1c, 0xbc, 0x89, 0x63, 0x80,
+ 0x8c, 0x03, 0xbd, 0x08, 0xc9, 0xc0, 0xcc, 0x30, 0xb5, 0x4e, 0xaa, 0x03, 0x02, 0x5a, 0x84, 0x49,
+ 0x9d, 0x68, 0xce, 0x2e, 0x8b, 0xdc, 0xcf, 0xdb, 0x77, 0x6a, 0x4c, 0x27, 0x9b, 0xbb, 0xe8, 0x4d,
+ 0xc8, 0xec, 0x3e, 0xe0, 0xa7, 0x19, 0xee, 0x56, 0xf8, 0x3b, 0x95, 0xa9, 0xa3, 0x7e, 0x21, 0xb5,
+ 0x74, 0x8f, 0x4d, 0x96, 0x3a, 0x15, 0x35, 0xb5, 0xfb, 0x20, 0x28, 0xa0, 0x5b, 0x30, 0xdd, 0xd1,
+ 0x1f, 0x6b, 0xbb, 0xae, 0x6e, 0x88, 0xf0, 0xbd, 0xcd, 0x6d, 0xa5, 0xa4, 0x4e, 0x75, 0xf4, 0xc7,
+ 0x4b, 0x82, 0xde, 0x30, 0xdb, 0x38, 0xff, 0x9f, 0x12, 0x24, 0xc4, 0x8c, 0x50, 0x17, 0x40, 0x88,
+ 0xc7, 0x32, 0xb9, 0xaa, 0x67, 0x2a, 0xf7, 0x8e, 0xfa, 0x85, 0x64, 0x95, 0x51, 0x1b, 0x35, 0xf2,
+ 0xac, 0x5f, 0xf8, 0xe0, 0x79, 0x4d, 0xb9, 0x0f, 0xa2, 0x26, 0x79, 0x27, 0x0d, 0x93, 0xa5, 0x59,
+ 0xf7, 0x75, 0xa2, 0xed, 0x5b, 0xc4, 0x73, 0xf6, 0x5c, 0xbd, 0x23, 0xae, 0x48, 0xd3, 0xfb, 0x3a,
+ 0x59, 0xf1, 0x69, 0x28, 0x4f, 0x03, 0xb1, 0x87, 0xfc, 0x3d, 0x0b, 0xb7, 0x2e, 0x41, 0x19, 0x2d,
+ 0xc2, 0x95, 0xa0, 0xb1, 0x46, 0x27, 0xdd, 0xea, 0x19, 0x07, 0x98, 0xf9, 0x47, 0x6a, 0xc9, 0x67,
+ 0x82, 0xca, 0x75, 0xfd, 0x71, 0x85, 0x57, 0x15, 0xaf, 0xc0, 0x4c, 0x68, 0x59, 0x83, 0xb0, 0x19,
+ 0x83, 0xb2, 0x6e, 0xed, 0xb9, 0x7a, 0xf8, 0xb9, 0xe8, 0x3d, 0x98, 0x1a, 0x79, 0x8e, 0x2d, 0x8c,
+ 0x6d, 0x38, 0x3d, 0x38, 0xfc, 0x7e, 0xbb, 0x54, 0xe5, 0x45, 0xff, 0x20, 0x92, 0x35, 0x86, 0xca,
+ 0xc5, 0x19, 0x98, 0x0e, 0xba, 0x09, 0xfa, 0xfe, 0x55, 0x1a, 0x12, 0x5b, 0xfa, 0x61, 0xdb, 0xd1,
+ 0x4d, 0x34, 0x0f, 0x29, 0xff, 0x0d, 0x8c, 0xdf, 0x5f, 0x52, 0x0d, 0x93, 0x90, 0x05, 0xd9, 0x1e,
+ 0xc1, 0x2e, 0xd5, 0x07, 0x8d, 0xbd, 0x0e, 0xe7, 0xde, 0xa3, 0x52, 0x79, 0xd6, 0x2f, 0xdc, 0x19,
+ 0x6f, 0x79, 0xb0, 0xd1, 0x73, 0x2d, 0xef, 0xb0, 0xd4, 0xbc, 0x77, 0x77, 0x47, 0x40, 0xd1, 0x4d,
+ 0xec, 0xa8, 0x99, 0x5e, 0xb8, 0x28, 0x5e, 0x14, 0x51, 0x51, 0x6b, 0x1d, 0xcb, 0x70, 0x1d, 0xe2,
+ 0xdf, 0x77, 0x08, 0xea, 0x3a, 0x23, 0xa2, 0xeb, 0x30, 0xb5, 0x6b, 0xd9, 0xec, 0xba, 0xcb, 0xe7,
+ 0xe3, 0x57, 0x1d, 0x59, 0x9f, 0x2c, 0x18, 0x1f, 0x42, 0x36, 0xf4, 0x8a, 0x88, 0xaa, 0x59, 0x9c,
+ 0xa9, 0xd9, 0xe6, 0x51, 0xbf, 0x90, 0x19, 0x6c, 0x5b, 0xae, 0x6a, 0x97, 0x89, 0x1a, 0x32, 0x83,
+ 0x6e, 0xa8, 0xa2, 0xcd, 0xc2, 0x24, 0xfb, 0xed, 0x00, 0x7f, 0x38, 0xa8, 0xf2, 0x02, 0x7a, 0x0b,
+ 0x26, 0xdb, 0x58, 0x27, 0x58, 0xbc, 0x09, 0x9c, 0x3f, 0xc3, 0x10, 0xb0, 0xc7, 0xf7, 0x2a, 0x67,
+ 0x47, 0x15, 0x88, 0xf3, 0x2b, 0x60, 0x76, 0x71, 0x7b, 0x3c, 0x59, 0x7c, 0xea, 0xf3, 0xcf, 0x95,
+ 0x09, 0x55, 0xb4, 0x44, 0x75, 0x48, 0x88, 0xb7, 0x34, 0xec, 0x3a, 0xf7, 0xdc, 0xf4, 0x47, 0xe8,
+ 0x75, 0xc2, 0xca, 0x84, 0xea, 0xb7, 0x45, 0xdb, 0xfe, 0xf3, 0x21, 0xee, 0x51, 0xc4, 0xeb, 0xa8,
+ 0xd2, 0x98, 0x21, 0xf2, 0x00, 0x70, 0x08, 0x85, 0x4e, 0xd0, 0x62, 0xb7, 0x27, 0xec, 0x56, 0xf8,
+ 0xec, 0x09, 0x0e, 0xdd, 0x97, 0xd2, 0x09, 0xf2, 0x96, 0x68, 0x03, 0xc0, 0x08, 0xbc, 0x1c, 0xbb,
+ 0x2f, 0x4e, 0x2d, 0xbe, 0x76, 0x91, 0xf8, 0x72, 0x65, 0x42, 0x0d, 0x21, 0xa0, 0x7b, 0x90, 0x32,
+ 0x06, 0xdb, 0x36, 0x37, 0xc5, 0x00, 0x5f, 0xbf, 0x90, 0xed, 0x5e, 0xa1, 0xf6, 0x7a, 0x40, 0x1d,
+ 0xb6, 0xd7, 0xca, 0xa8, 0xbd, 0xae, 0x43, 0x46, 0x64, 0xaa, 0xf8, 0xcf, 0x4e, 0xc4, 0x93, 0xa7,
+ 0xb0, 0x96, 0xf8, 0x3f, 0x4c, 0x29, 0xd5, 0x6d, 0xc3, 0x31, 0xb1, 0x59, 0xa7, 0x65, 0x55, 0x24,
+ 0xe6, 0x59, 0x81, 0xa0, 0x65, 0xc8, 0x1a, 0x6d, 0xac, 0xdb, 0xbd, 0xae, 0x8f, 0x83, 0xc6, 0xc4,
+ 0xc9, 0x88, 0x76, 0x02, 0x68, 0x03, 0xd0, 0x2e, 0x7b, 0xf7, 0x13, 0x1e, 0x15, 0xbb, 0xf8, 0x1d,
+ 0x07, 0x4c, 0x61, 0x6d, 0xd5, 0xc1, 0xc8, 0xd0, 0xcb, 0x90, 0xb1, 0x1d, 0xdb, 0xd0, 0x6d, 0x03,
+ 0xb7, 0x99, 0x67, 0xe5, 0x77, 0xc5, 0xc3, 0x44, 0xf4, 0x09, 0x64, 0xc9, 0xd0, 0xa1, 0x2a, 0x77,
+ 0x85, 0xf5, 0xf8, 0xc6, 0x45, 0xd3, 0xb0, 0x2b, 0x13, 0xea, 0x08, 0x12, 0xfa, 0x4d, 0x50, 0xbc,
+ 0x91, 0xbb, 0x1a, 0x76, 0xeb, 0x7c, 0xf6, 0xf3, 0xbe, 0x53, 0x6e, 0xa4, 0x56, 0x26, 0xd4, 0x63,
+ 0x68, 0xe8, 0x53, 0x98, 0x22, 0xc3, 0x0f, 0xe1, 0x73, 0xd7, 0x58, 0x07, 0xdf, 0x3a, 0xf3, 0xc2,
+ 0xe1, 0xa4, 0xdf, 0x0e, 0xac, 0x4c, 0xa8, 0xa3, 0x58, 0x14, 0xde, 0x1e, 0xbe, 0xf2, 0x61, 0xaf,
+ 0x00, 0xce, 0x86, 0x3f, 0xf9, 0x0a, 0x8a, 0xc2, 0x8f, 0x60, 0xa1, 0x35, 0x48, 0x76, 0x7c, 0x5f,
+ 0x91, 0x7b, 0xe1, 0xdc, 0x73, 0xc8, 0xa8, 0xfb, 0x5a, 0x99, 0x50, 0x07, 0xed, 0x2b, 0x49, 0x48,
+ 0x88, 0xab, 0xc1, 0xe0, 0xde, 0x3e, 0xa1, 0xc8, 0xc5, 0x5f, 0xc9, 0x20, 0x07, 0x31, 0xe8, 0x02,
+ 0xa0, 0x20, 0x6a, 0x18, 0x3c, 0x37, 0xa5, 0x2e, 0x28, 0xb2, 0x32, 0xa1, 0x4e, 0xfb, 0x75, 0x83,
+ 0x17, 0xa7, 0xd7, 0x61, 0xaa, 0xe3, 0x98, 0xd6, 0xae, 0x35, 0x30, 0xfc, 0x3c, 0x11, 0x9c, 0xf5,
+ 0xc9, 0xc2, 0xf0, 0xdf, 0x19, 0x7a, 0xd1, 0x34, 0xce, 0xaf, 0x1a, 0xe8, 0xe8, 0x83, 0x27, 0x4f,
+ 0xd4, 0x11, 0xb9, 0x3d, 0x9b, 0xdd, 0x02, 0x8a, 0x64, 0x00, 0x8f, 0xaf, 0x32, 0x82, 0x2a, 0xce,
+ 0xf3, 0xd5, 0x11, 0xcb, 0x7c, 0xf3, 0x5c, 0xcb, 0xec, 0xcf, 0x7d, 0x45, 0x0a, 0x4c, 0xf3, 0xd2,
+ 0xa8, 0x69, 0xbe, 0x75, 0xbe, 0x69, 0x0e, 0xc1, 0x04, 0xb6, 0x79, 0xe7, 0x44, 0xdb, 0xbc, 0x30,
+ 0xe6, 0xc6, 0x09, 0x21, 0x0e, 0x1b, 0xe7, 0xea, 0x88, 0x71, 0xbe, 0x79, 0xae, 0x71, 0x0e, 0xcf,
+ 0x51, 0x58, 0xe7, 0xcd, 0x13, 0xac, 0xf3, 0xeb, 0x63, 0x59, 0xe7, 0x10, 0x58, 0xd8, 0x3c, 0xab,
+ 0x27, 0x99, 0xe7, 0xd2, 0x78, 0xe6, 0x39, 0x04, 0x39, 0x64, 0x9f, 0xbf, 0x7f, 0xcc, 0xf6, 0x28,
+ 0xe7, 0x6f, 0xde, 0x13, 0x33, 0x40, 0x2b, 0xd2, 0x31, 0xe3, 0xa3, 0x9f, 0x60, 0x7c, 0xa6, 0x19,
+ 0xfc, 0x9b, 0x17, 0x30, 0x3e, 0xa1, 0x0e, 0x8e, 0x5b, 0x9f, 0x8f, 0x20, 0x1d, 0xb6, 0x18, 0xec,
+ 0xed, 0xce, 0xd9, 0xb6, 0xed, 0x94, 0x5f, 0xed, 0x30, 0x1d, 0x08, 0x55, 0xa1, 0x1f, 0x1c, 0x37,
+ 0x3c, 0x33, 0xe7, 0x82, 0x9f, 0x72, 0x3b, 0xbd, 0x22, 0x1d, 0xb7, 0x3c, 0x77, 0xc3, 0x96, 0x67,
+ 0xf6, 0x5c, 0xdf, 0x7d, 0x2c, 0xa2, 0x5d, 0x91, 0xc2, 0xa6, 0x07, 0x40, 0xf6, 0x1f, 0x3f, 0x84,
+ 0xcc, 0x50, 0xf1, 0x8f, 0x25, 0x88, 0xae, 0x3a, 0x2d, 0x94, 0x1d, 0xa4, 0x26, 0x59, 0x52, 0xf1,
+ 0xfd, 0x01, 0xbb, 0x38, 0xed, 0x7d, 0xe3, 0x8c, 0xbe, 0x83, 0x54, 0x6e, 0xd0, 0x08, 0xbd, 0x0b,
+ 0x89, 0x2e, 0x8f, 0xa6, 0x85, 0xa5, 0x29, 0x9e, 0xd5, 0x9e, 0x73, 0xaa, 0x7e, 0x93, 0x5b, 0x37,
+ 0xc3, 0x3f, 0xe0, 0x5b, 0x77, 0x4c, 0x8c, 0xb2, 0x00, 0x5b, 0x3a, 0x21, 0xdd, 0x7d, 0x57, 0x27,
+ 0x58, 0x99, 0x40, 0x09, 0x88, 0xae, 0xad, 0x37, 0x15, 0xe9, 0xd6, 0x47, 0xe1, 0x44, 0x62, 0x4d,
+ 0x2d, 0x37, 0x36, 0x1a, 0x1b, 0xcb, 0xda, 0x46, 0x79, 0xbd, 0xde, 0x54, 0x26, 0x50, 0x0e, 0x66,
+ 0x3f, 0x2c, 0x37, 0xb6, 0x45, 0x66, 0x51, 0x6b, 0x6c, 0x6c, 0xd7, 0xd5, 0xfb, 0xe5, 0xbb, 0x8a,
+ 0x84, 0xae, 0x02, 0x52, 0x37, 0xab, 0x6b, 0xcd, 0x5a, 0x45, 0xab, 0x6e, 0xae, 0x6f, 0x95, 0xab,
+ 0xdb, 0x8d, 0xcd, 0x0d, 0x25, 0x82, 0x64, 0x88, 0xd5, 0x36, 0x37, 0xea, 0x0a, 0xdc, 0xfa, 0x49,
+ 0x0c, 0x62, 0x2c, 0x87, 0xf1, 0x32, 0xa4, 0x76, 0x36, 0x9a, 0x5b, 0xf5, 0x6a, 0x63, 0xa9, 0x51,
+ 0xaf, 0x29, 0x13, 0xf9, 0x99, 0x27, 0x4f, 0xe7, 0xa7, 0x68, 0xd5, 0x8e, 0x4d, 0xba, 0xd8, 0x60,
+ 0x46, 0x16, 0xe5, 0x21, 0x5e, 0x29, 0x57, 0xd7, 0x76, 0xb6, 0x14, 0x29, 0x9f, 0x7d, 0xf2, 0x74,
+ 0x1e, 0x28, 0x03, 0x37, 0x70, 0xe8, 0x45, 0x9e, 0xe3, 0xd8, 0x54, 0xeb, 0x4a, 0x24, 0x3f, 0xf5,
+ 0xe4, 0xe9, 0x7c, 0x8a, 0xa5, 0x4e, 0x84, 0x91, 0xba, 0x0e, 0x99, 0x66, 0x75, 0xa5, 0xbe, 0x5e,
+ 0xd6, 0xaa, 0x2b, 0xe5, 0x8d, 0xe5, 0xba, 0x12, 0xcd, 0xcf, 0x3e, 0x79, 0x3a, 0xaf, 0x8c, 0x2a,
+ 0x3a, 0xed, 0xa2, 0xb1, 0xbe, 0xb5, 0xa9, 0x6e, 0x2b, 0xb1, 0x41, 0x17, 0xdc, 0xbe, 0xa0, 0x22,
+ 0x00, 0x6f, 0xbd, 0x54, 0xaf, 0xd7, 0x94, 0xc9, 0x3c, 0x7a, 0xf2, 0x74, 0x3e, 0x4b, 0xeb, 0x07,
+ 0x66, 0x03, 0xbd, 0x02, 0xe9, 0xaa, 0x5a, 0x2f, 0x6f, 0xd7, 0xb5, 0xe6, 0x76, 0x79, 0xbb, 0xa9,
+ 0xc4, 0x07, 0x33, 0x09, 0x99, 0x02, 0x54, 0x82, 0xe9, 0xf2, 0xce, 0xf6, 0xa6, 0x36, 0xc4, 0x9b,
+ 0xc8, 0x5f, 0x7b, 0xf2, 0x74, 0x7e, 0x86, 0xf2, 0x96, 0x7b, 0x9e, 0x13, 0xe6, 0x7f, 0x0d, 0x94,
+ 0xa1, 0xf1, 0x6b, 0xcb, 0x55, 0x45, 0xce, 0x5f, 0x7d, 0xf2, 0x74, 0x1e, 0x8d, 0x4e, 0x61, 0xb9,
+ 0x8a, 0xbe, 0x0d, 0x57, 0xb7, 0x3f, 0xde, 0xaa, 0xd7, 0xea, 0xcd, 0xaa, 0x36, 0x3c, 0xed, 0x64,
+ 0x3e, 0xf7, 0xe4, 0xe9, 0xfc, 0x2c, 0x6d, 0x73, 0x6c, 0xea, 0xaf, 0x83, 0xd2, 0xdc, 0x56, 0xeb,
+ 0xe5, 0x75, 0xad, 0xb1, 0xb1, 0x5c, 0x6f, 0xb2, 0xc5, 0x82, 0xc1, 0x90, 0x46, 0x36, 0x2d, 0x9d,
+ 0xc2, 0x46, 0xfd, 0xc3, 0x11, 0xfc, 0xd4, 0x80, 0x7f, 0x64, 0x1f, 0xa2, 0x79, 0x48, 0xae, 0x37,
+ 0x96, 0xd5, 0x32, 0xc3, 0x4d, 0xe7, 0xa7, 0x9f, 0x3c, 0x9d, 0xcf, 0x50, 0xbe, 0x60, 0x57, 0xe5,
+ 0xe5, 0x1f, 0xff, 0xe5, 0xdc, 0xc4, 0x5f, 0xfd, 0x74, 0x6e, 0xa2, 0x72, 0xe3, 0xb3, 0x7f, 0x9f,
+ 0x9b, 0xf8, 0xec, 0x68, 0x4e, 0xfa, 0xc5, 0xd1, 0x9c, 0xf4, 0xf9, 0xd1, 0x9c, 0xf4, 0x6f, 0x47,
+ 0x73, 0xd2, 0x1f, 0x7e, 0x31, 0x37, 0xf1, 0x8b, 0x2f, 0xe6, 0x26, 0x3e, 0xff, 0x62, 0x6e, 0xe2,
+ 0x93, 0x38, 0xd7, 0xeb, 0x56, 0x9c, 0x1d, 0x0d, 0xdf, 0xfc, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff,
+ 0xb6, 0x52, 0x16, 0x3d, 0x71, 0x3d, 0x00, 0x00,
}
diff --git a/pkg/jobs/jobspb/jobs.proto b/pkg/jobs/jobspb/jobs.proto
index b5b40eb07293..c4f2604fdcbb 100644
--- a/pkg/jobs/jobspb/jobs.proto
+++ b/pkg/jobs/jobspb/jobs.proto
@@ -214,7 +214,11 @@ message ImportDetails {
repeated string target_cols = 21;
reserved 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17;
}
+ message Schema {
+ sqlbase.SchemaDescriptor desc = 1;
+ }
repeated Table tables = 1 [(gogoproto.nullable) = false];
+ repeated Schema schemas = 23 [(gogoproto.nullable) = false];
repeated string uris = 2 [(gogoproto.customname) = "URIs"];
roachpb.IOFileFormat format = 3 [(gogoproto.nullable) = false];
@@ -243,6 +247,7 @@ message ImportDetails {
bool ingest_directly = 11;
bool prepare_complete = 12;
+ bool schemas_published = 24;
bool tables_published = 13;
bool parse_bundle_schema = 14;
@@ -256,6 +261,8 @@ message ImportDetails {
(gogoproto.customname) = "ProtectedTimestampRecord",
(gogoproto.customtype) = "github.com/cockroachdb/cockroach/pkg/util/uuid.UUID"
];
+
+ // next val: 25
}
// SequenceValChunks represents a single chunk of sequence values allocated
diff --git a/pkg/sql/alter_schema.go b/pkg/sql/alter_schema.go
index a5bdb547ffd8..a920929f8f74 100644
--- a/pkg/sql/alter_schema.go
+++ b/pkg/sql/alter_schema.go
@@ -184,7 +184,7 @@ func (p *planner) renameSchema(
ctx context.Context, db *dbdesc.Mutable, desc *schemadesc.Mutable, newName string, jobDesc string,
) error {
// Check that there isn't a name collision with the new name.
- found, err := p.schemaExists(ctx, db.ID, newName)
+ found, err := schemaExists(ctx, p.txn, p.ExecCfg().Codec, db.ID, newName)
if err != nil {
return err
}
diff --git a/pkg/sql/authorization.go b/pkg/sql/authorization.go
index 8e5968ee913a..5529fbe080ad 100644
--- a/pkg/sql/authorization.go
+++ b/pkg/sql/authorization.go
@@ -570,7 +570,7 @@ func (p *planner) checkCanAlterToNewOwner(
ctx context.Context, desc catalog.MutableDescriptor, newOwner security.SQLUsername,
) error {
// Make sure the newOwner exists.
- roleExists, err := p.RoleExists(ctx, newOwner)
+ roleExists, err := RoleExists(ctx, p.ExecCfg(), p.Txn(), newOwner)
if err != nil {
return err
}
diff --git a/pkg/sql/create_schema.go b/pkg/sql/create_schema.go
index 134756ae4650..c55ee48156a8 100644
--- a/pkg/sql/create_schema.go
+++ b/pkg/sql/create_schema.go
@@ -16,8 +16,11 @@ import (
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
+ "github.com/cockroachdb/cockroach/pkg/kv"
+ "github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv"
+ "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
@@ -37,42 +40,16 @@ func (n *createSchemaNode) startExec(params runParams) error {
return params.p.createUserDefinedSchema(params, n.n)
}
-func (p *planner) createUserDefinedSchema(params runParams, n *tree.CreateSchema) error {
- if err := checkSchemaChangeEnabled(
- p.EvalContext().Context,
- p.ExecCfg(),
- "CREATE SCHEMA",
- ); err != nil {
- return err
- }
-
- // Users can't create a schema without being connected to a DB.
- if p.CurrentDatabase() == "" {
- return pgerror.New(pgcode.UndefinedDatabase,
- "cannot create schema without being connected to a database")
- }
-
- sqltelemetry.IncrementUserDefinedSchemaCounter(sqltelemetry.UserDefinedSchemaCreate)
- dbName := p.CurrentDatabase()
- if n.Schema.ExplicitCatalog {
- dbName = n.Schema.Catalog()
- }
-
- _, db, err := p.Descriptors().GetMutableDatabaseByName(params.ctx, p.txn, dbName,
- tree.DatabaseLookupFlags{Required: true})
- if err != nil {
- return err
- }
-
- // Users cannot create schemas within the system database.
- if db.ID == keys.SystemDatabaseID {
- return pgerror.New(pgcode.InvalidObjectDefinition, "cannot create schemas in the system database")
- }
-
- if err := p.CheckPrivilege(params.ctx, db, privilege.CREATE); err != nil {
- return err
- }
-
+// CreateUserDefinedSchemaDescriptor constructs a mutable schema descriptor.
+func CreateUserDefinedSchemaDescriptor(
+ ctx context.Context,
+ user security.SQLUsername,
+ n *tree.CreateSchema,
+ txn *kv.Txn,
+ execCfg *ExecutorConfig,
+ db *dbdesc.Immutable,
+ allocateID bool,
+) (*schemadesc.Mutable, *descpb.PrivilegeDescriptor, error) {
var schemaName string
if !n.Schema.ExplicitSchema {
schemaName = n.AuthRole.Normalized()
@@ -81,34 +58,37 @@ func (p *planner) createUserDefinedSchema(params runParams, n *tree.CreateSchema
}
// Ensure there aren't any name collisions.
- exists, err := p.schemaExists(params.ctx, db.ID, schemaName)
+ exists, err := schemaExists(ctx, txn, execCfg.Codec, db.ID, schemaName)
if err != nil {
- return err
+ return nil, nil, err
}
if exists {
if n.IfNotExists {
- return nil
+ return nil, nil, nil
}
- return pgerror.Newf(pgcode.DuplicateSchema, "schema %q already exists", schemaName)
+ return nil, nil, pgerror.Newf(pgcode.DuplicateSchema, "schema %q already exists", schemaName)
}
// Check validity of the schema name.
if err := schemadesc.IsSchemaNameValid(schemaName); err != nil {
- return err
+ return nil, nil, err
}
// Ensure that the cluster version is high enough to create the schema.
- if !params.p.ExecCfg().Settings.Version.IsActive(params.ctx, clusterversion.UserDefinedSchemas) {
- return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState,
+ if !execCfg.Settings.Version.IsActive(ctx, clusterversion.UserDefinedSchemas) {
+ return nil, nil, pgerror.Newf(pgcode.ObjectNotInPrerequisiteState,
`creating schemas requires all nodes to be upgraded to %s`,
clusterversion.ByKey(clusterversion.UserDefinedSchemas))
}
// Create the ID.
- id, err := catalogkv.GenerateUniqueDescID(params.ctx, p.ExecCfg().DB, p.ExecCfg().Codec)
- if err != nil {
- return err
+ var id descpb.ID
+ if allocateID {
+ id, err = catalogkv.GenerateUniqueDescID(ctx, execCfg.DB, execCfg.Codec)
+ if err != nil {
+ return nil, nil, err
+ }
}
// Inherit the parent privileges and filter out those which are not valid for
@@ -119,16 +99,17 @@ func (p *planner) createUserDefinedSchema(params runParams, n *tree.CreateSchema
}
if !n.AuthRole.Undefined() {
- exists, err := p.RoleExists(params.ctx, n.AuthRole)
+ exists, err := RoleExists(ctx, execCfg, txn, n.AuthRole)
if err != nil {
- return err
+ return nil, nil, err
}
if !exists {
- return pgerror.Newf(pgcode.UndefinedObject, "role/user %q does not exist", n.AuthRole)
+ return nil, nil, pgerror.Newf(pgcode.UndefinedObject, "role/user %q does not exist",
+ n.AuthRole)
}
privs.SetOwner(n.AuthRole)
} else {
- privs.SetOwner(params.SessionData().User())
+ privs.SetOwner(user)
}
// Create the SchemaDescriptor.
@@ -140,6 +121,57 @@ func (p *planner) createUserDefinedSchema(params runParams, n *tree.CreateSchema
Version: 1,
})
+ return desc, privs, nil
+}
+
+func (p *planner) createUserDefinedSchema(params runParams, n *tree.CreateSchema) error {
+ if err := checkSchemaChangeEnabled(
+ p.EvalContext().Context,
+ p.ExecCfg(),
+ "CREATE SCHEMA",
+ ); err != nil {
+ return err
+ }
+
+ // Users can't create a schema without being connected to a DB.
+ if p.CurrentDatabase() == "" {
+ return pgerror.New(pgcode.UndefinedDatabase,
+ "cannot create schema without being connected to a database")
+ }
+
+ sqltelemetry.IncrementUserDefinedSchemaCounter(sqltelemetry.UserDefinedSchemaCreate)
+ dbName := p.CurrentDatabase()
+ if n.Schema.ExplicitCatalog {
+ dbName = n.Schema.Catalog()
+ }
+
+ _, db, err := p.Descriptors().GetMutableDatabaseByName(params.ctx, p.txn, dbName,
+ tree.DatabaseLookupFlags{Required: true})
+ if err != nil {
+ return err
+ }
+
+ // Users cannot create schemas within the system database.
+ if db.ID == keys.SystemDatabaseID {
+ return pgerror.New(pgcode.InvalidObjectDefinition, "cannot create schemas in the system database")
+ }
+
+ if err := p.CheckPrivilege(params.ctx, db, privilege.CREATE); err != nil {
+ return err
+ }
+
+ desc, privs, err := CreateUserDefinedSchemaDescriptor(params.ctx, params.SessionData().User(), n,
+ p.Txn(), p.ExecCfg(), &db.Immutable, true /* allocateID */)
+ if err != nil {
+ return err
+ }
+
+ // This is true when the schema exists and we are processing a
+ // CREATE SCHEMA IF NOT EXISTS statement.
+ if desc == nil {
+ return nil
+ }
+
// Update the parent database with this schema information.
if db.Schemas == nil {
db.Schemas = make(map[string]descpb.DatabaseDescriptor_SchemaInfo)
@@ -159,8 +191,8 @@ func (p *planner) createUserDefinedSchema(params runParams, n *tree.CreateSchema
// Finally create the schema on disk.
if err := p.createDescriptorWithID(
params.ctx,
- catalogkeys.NewSchemaKey(db.ID, schemaName).Key(p.ExecCfg().Codec),
- id,
+ catalogkeys.NewSchemaKey(db.ID, desc.Name).Key(p.ExecCfg().Codec),
+ desc.ID,
desc,
params.ExecCfg().Settings,
tree.AsStringWithFQNames(n, params.Ann()),
diff --git a/pkg/sql/grant_revoke.go b/pkg/sql/grant_revoke.go
index d35d16207582..63528be6ffc6 100644
--- a/pkg/sql/grant_revoke.go
+++ b/pkg/sql/grant_revoke.go
@@ -221,8 +221,7 @@ func (n *changePrivilegesNode) startExec(params runParams) error {
return err
}
if err := p.createNonDropDatabaseChangeJob(ctx, d.ID,
- fmt.Sprintf("updating privileges for database %d", d.ID),
- ); err != nil {
+ fmt.Sprintf("updating privileges for database %d", d.ID)); err != nil {
return err
}
for _, grantee := range n.grantees {
diff --git a/pkg/sql/reassign_owned_by.go b/pkg/sql/reassign_owned_by.go
index 41e82ac010ff..a9204067efbd 100644
--- a/pkg/sql/reassign_owned_by.go
+++ b/pkg/sql/reassign_owned_by.go
@@ -43,7 +43,7 @@ func (p *planner) ReassignOwnedBy(ctx context.Context, n *tree.ReassignOwnedBy)
// Check all roles in old roles exist. Checks in authorization.go will confirm that current user
// is a member of old roles and new roles and has CREATE privilege.
for _, oldRole := range n.OldRoles {
- roleExists, err := p.RoleExists(ctx, oldRole)
+ roleExists, err := RoleExists(ctx, p.ExecCfg(), p.Txn(), oldRole)
if err != nil {
return nil, err
}
diff --git a/pkg/sql/reparent_database.go b/pkg/sql/reparent_database.go
index 295bdf971b0c..2b9d18c20048 100644
--- a/pkg/sql/reparent_database.go
+++ b/pkg/sql/reparent_database.go
@@ -81,7 +81,7 @@ func (p *planner) ReparentDatabase(
}
// Ensure that this database wouldn't collide with a name under the new database.
- exists, err := p.schemaExists(ctx, parent.ID, db.Name)
+ exists, err := schemaExists(ctx, p.txn, p.ExecCfg().Codec, parent.ID, db.Name)
if err != nil {
return nil, err
}
diff --git a/pkg/sql/schema.go b/pkg/sql/schema.go
index b33993a035cc..5c2bc8ba789a 100644
--- a/pkg/sql/schema.go
+++ b/pkg/sql/schema.go
@@ -16,6 +16,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
+ "github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc"
@@ -23,8 +24,8 @@ import (
"github.com/cockroachdb/cockroach/pkg/util/log"
)
-func (p *planner) schemaExists(
- ctx context.Context, parentID descpb.ID, schema string,
+func schemaExists(
+ ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, parentID descpb.ID, schema string,
) (bool, error) {
// Check statically known schemas.
if schema == tree.PublicSchema {
@@ -36,7 +37,7 @@ func (p *planner) schemaExists(
}
}
// Now lookup in the namespace for other schemas.
- exists, _, err := catalogkv.LookupObjectID(ctx, p.txn, p.ExecCfg().Codec, parentID, keys.RootNamespaceID, schema)
+ exists, _, err := catalogkv.LookupObjectID(ctx, txn, codec, parentID, keys.RootNamespaceID, schema)
if err != nil {
return false, err
}
diff --git a/pkg/sql/sem/tree/name_resolution.go b/pkg/sql/sem/tree/name_resolution.go
index 3e172c0ab71c..75bb45874285 100644
--- a/pkg/sql/sem/tree/name_resolution.go
+++ b/pkg/sql/sem/tree/name_resolution.go
@@ -343,7 +343,8 @@ func ResolveExisting(
// This is a naked table name. Use the search path.
iter := searchPath.Iter()
for next, ok := iter.Next(); ok; next, ok = iter.Next() {
- if found, objMeta, err := r.LookupObject(ctx, lookupFlags, curDb, next, u.Object()); found || err != nil {
+ if found, objMeta, err := r.LookupObject(ctx, lookupFlags, curDb, next,
+ u.Object()); found || err != nil {
if err == nil {
namePrefix.CatalogName = Name(curDb)
namePrefix.SchemaName = Name(next)
diff --git a/pkg/sql/table.go b/pkg/sql/table.go
index 5296037abc7b..992f6cb2b17c 100644
--- a/pkg/sql/table.go
+++ b/pkg/sql/table.go
@@ -71,7 +71,7 @@ func (p *planner) createDropDatabaseJob(
return nil
}
-// createNonDropDatabaseChangeJob covers all database descriptor updates other
+// CreateNonDropDatabaseChangeJob covers all database descriptor updates other
// than dropping the database.
// TODO (lucy): This should ideally look into the set of queued jobs so that we
// don't queue multiple jobs for the same database.
diff --git a/pkg/sql/user.go b/pkg/sql/user.go
index f87ceffe7bc5..9fc0b00f1a16 100644
--- a/pkg/sql/user.go
+++ b/pkg/sql/user.go
@@ -14,6 +14,7 @@ import (
"context"
"time"
+ "github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
@@ -211,10 +212,12 @@ func (p *planner) GetAllRoles(ctx context.Context) (map[security.SQLUsername]boo
}
// RoleExists returns true if the role exists.
-func (p *planner) RoleExists(ctx context.Context, role security.SQLUsername) (bool, error) {
+func RoleExists(
+ ctx context.Context, execCfg *ExecutorConfig, txn *kv.Txn, role security.SQLUsername,
+) (bool, error) {
query := `SELECT username FROM system.users WHERE username = $1`
- row, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryRowEx(
- ctx, "read-users", p.txn,
+ row, err := execCfg.InternalExecutor.QueryRowEx(
+ ctx, "read-users", txn,
sessiondata.InternalExecutorOverride{User: security.RootUserName()},
query, role,
)
From dddec05805f44fe5f7e7eefc1c50a5a1caed876f Mon Sep 17 00:00:00 2001
From: angelapwen
Date: Mon, 22 Feb 2021 15:53:52 +0100
Subject: [PATCH 6/7] sql,cli: add payloads_for_trace builtin
Previously it was quite cumbersome to view all payloads for a given
trace: we needed to join on the `node_inflight_trace_spans` vtable
to filter for span IDs that match a trace ID, then apply the
`payloads_for_span()` builtin to each span ID. This patch adds
syntactic sugar to the above query.
Instead of
```
WITH spans AS (
SELECT span_id
FROM crdb_internal.node_inflight_trace_spans
WHERE trace_id = $TRACE_ID)
) SELECT *
FROM spans, LATERAL crdb_internal.payloads_for_span(spans.span_id);
```
we can now simply use:
```
crdb_internal.payloads_for_trace($TRACE_ID);
```
and achieve the same result. The patch also adds all payloads for all
long-running spans to the `crdb_internal.node_inflight_trace_spans`
table of the debug.zip file.
Release note (sql change): Add `payloads_for_trace()` builtin so that
all payloads attached to all spans for a given trace ID will be
displayed, utilizing the `crdb_internal.payloads_for_span()`
builtin under the hood. All payloads for long-running spans are also
added to debug.zip in the `crdb_internal.node_inflight_trace_spans`
table dump.
Co-authored-by: Tobias Grieger
Release justification: This patch is safe for release because it
adds syntactic sugar to an internal observability feature.
---
docs/generated/sql/functions.md | 2 +
pkg/cli/zip.go | 22 ++--
pkg/cli/zip_cluster_wide.go | 8 +-
pkg/cli/zip_per_node.go | 8 +-
pkg/cli/zip_test.go | 6 +-
.../testdata/logic_test/builtin_function | 16 +++
.../testdata/logic_test/contention_event | 15 ++-
pkg/sql/parser/parse_test.go | 2 +-
pkg/sql/sem/builtins/generator_builtins.go | 101 ++++++++++++++++++
9 files changed, 158 insertions(+), 22 deletions(-)
diff --git a/docs/generated/sql/functions.md b/docs/generated/sql/functions.md
index 349716acef1d..3e049b382835 100644
--- a/docs/generated/sql/functions.md
+++ b/docs/generated/sql/functions.md
@@ -2647,6 +2647,8 @@ SELECT * FROM crdb_internal.check_consistency(true, ‘\x02’, ‘\x04’)
|