diff --git a/docs/generated/settings/settings-for-tenants.txt b/docs/generated/settings/settings-for-tenants.txt
index 2caddc91426c..b39502b44e9a 100644
--- a/docs/generated/settings/settings-for-tenants.txt
+++ b/docs/generated/settings/settings-for-tenants.txt
@@ -106,6 +106,7 @@ sql.distsql.temp_storage.workmem byte size 64 MiB maximum amount of memory in by
sql.log.slow_query.experimental_full_table_scans.enabled boolean false when set to true, statements that perform a full table/index scan will be logged to the slow query log even if they do not meet the latency threshold. Must have the slow query log enabled for this setting to have any effect.
sql.log.slow_query.internal_queries.enabled boolean false when set to true, internal queries which exceed the slow query log threshold are logged to a separate log. Must have the slow query log enabled for this setting to have any effect.
sql.log.slow_query.latency_threshold duration 0s when set to non-zero, log statements whose service latency exceeds the threshold to a secondary logger on each node
+sql.metrics.index_usage_stats.enabled boolean true collect per index usage statistics
sql.metrics.max_mem_reported_stmt_fingerprints integer 100000 the maximum number of reported statement fingerprints stored in memory
sql.metrics.max_mem_reported_txn_fingerprints integer 100000 the maximum number of reported transaction fingerprints stored in memory
sql.metrics.max_mem_stmt_fingerprints integer 100000 the maximum number of statement fingerprints stored in memory
diff --git a/docs/generated/settings/settings.html b/docs/generated/settings/settings.html
index e9325ba76cab..c756f6e252df 100644
--- a/docs/generated/settings/settings.html
+++ b/docs/generated/settings/settings.html
@@ -107,6 +107,7 @@
encode(data: bytes, format: string) → string | Encodes data using format (hex / escape / base64 ).
diff --git a/pkg/BUILD.bazel b/pkg/BUILD.bazel
index d9062309c559..619b6d40fd48 100644
--- a/pkg/BUILD.bazel
+++ b/pkg/BUILD.bazel
@@ -219,6 +219,7 @@ ALL_TESTS = [
"//pkg/sql/gcjob/gcjobnotifier:gcjobnotifier_test",
"//pkg/sql/gcjob:gcjob_test",
"//pkg/sql/gcjob_test:gcjob_test_test",
+ "//pkg/sql/idxusage:idxusage_test",
"//pkg/sql/inverted:inverted_test",
"//pkg/sql/lex:lex_test",
"//pkg/sql/lexbase:lexbase_test",
diff --git a/pkg/cmd/dev/build.go b/pkg/cmd/dev/build.go
index f693dffe192d..41d7b8074b98 100644
--- a/pkg/cmd/dev/build.go
+++ b/pkg/cmd/dev/build.go
@@ -71,6 +71,7 @@ func (d *dev) build(cmd *cobra.Command, targets []string) (err error) {
// Don't let bazel generate any convenience symlinks, we'll create them
// ourself.
args = append(args, "--experimental_convenience_symlinks=ignore")
+ args = append(args, getConfigFlags()...)
args = append(args, mustGetRemoteCacheArgs(remoteCacheAddr)...)
if numCPUs != 0 {
args = append(args, fmt.Sprintf("--local_cpu_resources=%d", numCPUs))
@@ -131,7 +132,9 @@ func (d *dev) symlinkBinaries(ctx context.Context, targets []string) error {
}
func (d *dev) getPathToBin(ctx context.Context, target string) (string, error) {
- out, err := d.exec.CommandContextSilent(ctx, "bazel", "info", "bazel-bin", "--color=no")
+ args := []string{"info", "bazel-bin", "--color=no"}
+ args = append(args, getConfigFlags()...)
+ out, err := d.exec.CommandContextSilent(ctx, "bazel", args...)
if err != nil {
return "", err
}
diff --git a/pkg/cmd/dev/datadriven_test.go b/pkg/cmd/dev/datadriven_test.go
index 18f34abc94ca..81268983e010 100644
--- a/pkg/cmd/dev/datadriven_test.go
+++ b/pkg/cmd/dev/datadriven_test.go
@@ -43,6 +43,10 @@ var (
)
)
+func init() {
+ predictableDevProfile = true
+}
+
// TestDatadriven makes use of datadriven and recorder to capture all operations
// executed by individual `dev` invocations. The testcases are defined under
// testdata/*, where each test files corresponds to a recording capture found in
diff --git a/pkg/cmd/dev/dev.go b/pkg/cmd/dev/dev.go
index 6d3665cce43c..5b66c456b9ef 100644
--- a/pkg/cmd/dev/dev.go
+++ b/pkg/cmd/dev/dev.go
@@ -31,6 +31,7 @@ var (
// Shared flags.
remoteCacheAddr string
numCPUs int
+ skipDevConfig bool
)
func makeDevCmd() *dev {
@@ -76,7 +77,6 @@ lets engineers do a few things:
makeLintCmd(ret.lint),
makeTestCmd(ret.test),
)
-
// Add all the shared flags.
var debugVar bool
for _, subCmd := range ret.cli.Commands() {
@@ -88,6 +88,7 @@ lets engineers do a few things:
// support for the Remote Asset API, which helps speed things up when
// the cache sits across the network boundary.
subCmd.Flags().StringVar(&remoteCacheAddr, "remote-cache", "", "remote caching grpc endpoint to use")
+ subCmd.Flags().BoolVar(&skipDevConfig, "skip-dev-config", false, "Don't infer an appropriate dev config to build with")
}
for _, subCmd := range ret.cli.Commands() {
subCmd.PreRun = func(cmd *cobra.Command, args []string) {
diff --git a/pkg/cmd/dev/test.go b/pkg/cmd/dev/test.go
index fea91050c058..0a9d1807b10d 100644
--- a/pkg/cmd/dev/test.go
+++ b/pkg/cmd/dev/test.go
@@ -109,7 +109,8 @@ func (d *dev) runUnitTest(cmd *cobra.Command, pkgs []string) error {
var args []string
args = append(args, "test")
args = append(args, "--color=yes")
- args = append(args, "--experimental_convenience_symlinks=ignore") // don't generate any convenience symlinks
+ args = append(args, "--experimental_convenience_symlinks=ignore")
+ args = append(args, getConfigFlags()...)
args = append(args, mustGetRemoteCacheArgs(remoteCacheAddr)...)
if numCPUs != 0 {
args = append(args, fmt.Sprintf("--local_cpu_resources=%d", numCPUs))
diff --git a/pkg/cmd/dev/testdata/build.txt b/pkg/cmd/dev/testdata/build.txt
index 5eefe634274d..47eafd4efb9f 100644
--- a/pkg/cmd/dev/testdata/build.txt
+++ b/pkg/cmd/dev/testdata/build.txt
@@ -1,33 +1,42 @@
dev build cockroach-short
----
-bazel build --color=yes --experimental_convenience_symlinks=ignore //pkg/cmd/cockroach-short
-bazel info workspace --color=no
+bazel build --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/cmd/cockroach-short
+bazel info workspace --color=no --config=dev
mkdir go/src/github.com/cockroachdb/cockroach/bin
-bazel info bazel-bin --color=no
+bazel info bazel-bin --color=no --config=dev
rm go/src/github.com/cockroachdb/cockroach/cockroach-short
ln -s /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/pkg/cmd/cockroach-short/cockroach-short_/cockroach-short go/src/github.com/cockroachdb/cockroach/cockroach-short
dev build cockroach-short --cpus=12
----
-bazel build --color=yes --experimental_convenience_symlinks=ignore --local_cpu_resources=12 //pkg/cmd/cockroach-short
-bazel info workspace --color=no
+bazel build --color=yes --experimental_convenience_symlinks=ignore --config=dev --local_cpu_resources=12 //pkg/cmd/cockroach-short
+bazel info workspace --color=no --config=dev
mkdir go/src/github.com/cockroachdb/cockroach/bin
-bazel info bazel-bin --color=no
+bazel info bazel-bin --color=no --config=dev
rm go/src/github.com/cockroachdb/cockroach/cockroach-short
ln -s /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/pkg/cmd/cockroach-short/cockroach-short_/cockroach-short go/src/github.com/cockroachdb/cockroach/cockroach-short
dev build --debug cockroach-short
----
-bazel build --color=yes --experimental_convenience_symlinks=ignore //pkg/cmd/cockroach-short
-bazel info workspace --color=no
+bazel build --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/cmd/cockroach-short
+bazel info workspace --color=no --config=dev
mkdir go/src/github.com/cockroachdb/cockroach/bin
-bazel info bazel-bin --color=no
+bazel info bazel-bin --color=no --config=dev
rm go/src/github.com/cockroachdb/cockroach/cockroach-short
ln -s /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/pkg/cmd/cockroach-short/cockroach-short_/cockroach-short go/src/github.com/cockroachdb/cockroach/cockroach-short
dev build cockroach-short --remote-cache 127.0.0.1:9090
----
-bazel build --color=yes --experimental_convenience_symlinks=ignore --remote_local_fallback --remote_cache=grpc://127.0.0.1:9090 --experimental_remote_downloader=grpc://127.0.0.1:9090 //pkg/cmd/cockroach-short
+bazel build --color=yes --experimental_convenience_symlinks=ignore --config=dev --remote_local_fallback --remote_cache=grpc://127.0.0.1:9090 --experimental_remote_downloader=grpc://127.0.0.1:9090 //pkg/cmd/cockroach-short
+bazel info workspace --color=no --config=dev
+mkdir go/src/github.com/cockroachdb/cockroach/bin
+bazel info bazel-bin --color=no --config=dev
+rm go/src/github.com/cockroachdb/cockroach/cockroach-short
+ln -s /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/pkg/cmd/cockroach-short/cockroach-short_/cockroach-short go/src/github.com/cockroachdb/cockroach/cockroach-short
+
+dev build --skip-dev-config cockroach-short
+----
+bazel build --color=yes --experimental_convenience_symlinks=ignore //pkg/cmd/cockroach-short
bazel info workspace --color=no
mkdir go/src/github.com/cockroachdb/cockroach/bin
bazel info bazel-bin --color=no
diff --git a/pkg/cmd/dev/testdata/generate.txt b/pkg/cmd/dev/testdata/generate.txt
index d7b9b5fe06d0..e1d03a7ba8fc 100644
--- a/pkg/cmd/dev/testdata/generate.txt
+++ b/pkg/cmd/dev/testdata/generate.txt
@@ -1,4 +1,4 @@
dev gen bazel
----
-bazel info workspace --color=no
+bazel info workspace --color=no --config=dev
go/src/github.com/cockroachdb/cockroach/build/bazelutil/bazel-generate.sh
diff --git a/pkg/cmd/dev/testdata/recording/build.txt b/pkg/cmd/dev/testdata/recording/build.txt
index 1c9ef91d3cbd..6dbdf50dae8e 100644
--- a/pkg/cmd/dev/testdata/recording/build.txt
+++ b/pkg/cmd/dev/testdata/recording/build.txt
@@ -1,14 +1,14 @@
-bazel build --color=yes --experimental_convenience_symlinks=ignore //pkg/cmd/cockroach-short
+bazel build --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/cmd/cockroach-short
----
-bazel info workspace --color=no
+bazel info workspace --color=no --config=dev
----
go/src/github.com/cockroachdb/cockroach
mkdir go/src/github.com/cockroachdb/cockroach/bin
----
-bazel info bazel-bin --color=no
+bazel info bazel-bin --color=no --config=dev
----
/private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin
@@ -18,17 +18,17 @@ rm go/src/github.com/cockroachdb/cockroach/cockroach-short
ln -s /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/pkg/cmd/cockroach-short/cockroach-short_/cockroach-short go/src/github.com/cockroachdb/cockroach/cockroach-short
----
-bazel build --color=yes --experimental_convenience_symlinks=ignore --local_cpu_resources=12 //pkg/cmd/cockroach-short
+bazel build --color=yes --experimental_convenience_symlinks=ignore --config=dev --local_cpu_resources=12 //pkg/cmd/cockroach-short
----
-bazel info workspace --color=no
+bazel info workspace --color=no --config=dev
----
go/src/github.com/cockroachdb/cockroach
mkdir go/src/github.com/cockroachdb/cockroach/bin
----
-bazel info bazel-bin --color=no
+bazel info bazel-bin --color=no --config=dev
----
/private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin
@@ -38,17 +38,17 @@ rm go/src/github.com/cockroachdb/cockroach/cockroach-short
ln -s /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/pkg/cmd/cockroach-short/cockroach-short_/cockroach-short go/src/github.com/cockroachdb/cockroach/cockroach-short
----
-bazel build --color=yes --experimental_convenience_symlinks=ignore //pkg/cmd/cockroach-short
+bazel build --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/cmd/cockroach-short
----
-bazel info workspace --color=no
+bazel info workspace --color=no --config=dev
----
go/src/github.com/cockroachdb/cockroach
mkdir go/src/github.com/cockroachdb/cockroach/bin
----
-bazel info bazel-bin --color=no
+bazel info bazel-bin --color=no --config=dev
----
/private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin
@@ -58,7 +58,27 @@ rm go/src/github.com/cockroachdb/cockroach/cockroach-short
ln -s /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/pkg/cmd/cockroach-short/cockroach-short_/cockroach-short go/src/github.com/cockroachdb/cockroach/cockroach-short
----
-bazel build --color=yes --experimental_convenience_symlinks=ignore --remote_local_fallback --remote_cache=grpc://127.0.0.1:9090 --experimental_remote_downloader=grpc://127.0.0.1:9090 //pkg/cmd/cockroach-short
+bazel build --color=yes --experimental_convenience_symlinks=ignore --config=dev --remote_local_fallback --remote_cache=grpc://127.0.0.1:9090 --experimental_remote_downloader=grpc://127.0.0.1:9090 //pkg/cmd/cockroach-short
+----
+
+bazel info workspace --color=no --config=dev
+----
+go/src/github.com/cockroachdb/cockroach
+
+mkdir go/src/github.com/cockroachdb/cockroach/bin
+----
+
+bazel info bazel-bin --color=no --config=dev
+----
+/private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin
+
+rm go/src/github.com/cockroachdb/cockroach/cockroach-short
+----
+
+ln -s /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/pkg/cmd/cockroach-short/cockroach-short_/cockroach-short go/src/github.com/cockroachdb/cockroach/cockroach-short
+----
+
+bazel build --color=yes --experimental_convenience_symlinks=ignore //pkg/cmd/cockroach-short
----
bazel info workspace --color=no
@@ -77,4 +97,3 @@ rm go/src/github.com/cockroachdb/cockroach/cockroach-short
ln -s /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/pkg/cmd/cockroach-short/cockroach-short_/cockroach-short go/src/github.com/cockroachdb/cockroach/cockroach-short
----
-
diff --git a/pkg/cmd/dev/testdata/recording/generate.txt b/pkg/cmd/dev/testdata/recording/generate.txt
index 23a01c71756e..74f89793ad11 100644
--- a/pkg/cmd/dev/testdata/recording/generate.txt
+++ b/pkg/cmd/dev/testdata/recording/generate.txt
@@ -1,4 +1,4 @@
-bazel info workspace --color=no
+bazel info workspace --color=no --config=dev
----
go/src/github.com/cockroachdb/cockroach
diff --git a/pkg/cmd/dev/testdata/recording/test.txt b/pkg/cmd/dev/testdata/recording/test.txt
index f9cc8431c997..f2d7bc7db8a2 100644
--- a/pkg/cmd/dev/testdata/recording/test.txt
+++ b/pkg/cmd/dev/testdata/recording/test.txt
@@ -1,4 +1,4 @@
-bazel test --color=yes --experimental_convenience_symlinks=ignore //pkg/util/tracing:tracing_test --test_output errors
+bazel test --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/util/tracing:tracing_test --test_output errors
----
----
//pkg/util/tracing:tracing_test [0m[32mPASSED[0m in 0.2s
@@ -11,7 +11,7 @@ bazel query kind(go_test, //pkg/util/tracing/...)
----
//pkg/util/tracing:tracing_test
-bazel test --color=yes --experimental_convenience_symlinks=ignore //pkg/util/tracing:tracing_test --test_output errors
+bazel test --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/util/tracing:tracing_test --test_output errors
----
----
//pkg/util/tracing:tracing_test [0m[32m(cached) PASSED[0m in 0.2s
@@ -20,7 +20,7 @@ Executed 0 out of 1 test: 1 test passes.
----
----
-bazel test --color=yes --experimental_convenience_symlinks=ignore //pkg/util/tracing:tracing_test --test_filter='TestStartChild*' --test_output errors
+bazel test --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/util/tracing:tracing_test --test_filter='TestStartChild*' --test_output errors
----
----
//pkg/util/tracing:tracing_test [0m[32mPASSED[0m in 0.1s
@@ -29,7 +29,7 @@ Executed 1 out of 1 test: 1 test passes.
----
----
-bazel test --color=yes --experimental_convenience_symlinks=ignore //pkg/util/tracing:tracing_test --test_filter='TestStartChild*' --test_output all --test_arg -test.v
+bazel test --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/util/tracing:tracing_test --test_filter='TestStartChild*' --test_output all --test_arg -test.v
----
----
==================== Test output for //pkg/util/tracing:tracing_test:
@@ -42,7 +42,7 @@ Executed 1 out of 1 test: 1 test passes.
----
----
-bazel test --color=yes --experimental_convenience_symlinks=ignore --remote_local_fallback --remote_cache=grpc://127.0.0.1:9092 --experimental_remote_downloader=grpc://127.0.0.1:9092 //pkg/util/tracing:tracing_test --test_filter='TestStartChild*' --test_output errors
+bazel test --color=yes --experimental_convenience_symlinks=ignore --config=dev --remote_local_fallback --remote_cache=grpc://127.0.0.1:9092 --experimental_remote_downloader=grpc://127.0.0.1:9092 //pkg/util/tracing:tracing_test --test_filter='TestStartChild*' --test_output errors
----
----
//pkg/util/tracing:tracing_test [0m[32m(cached) PASSED[0m in 0.0s
@@ -51,7 +51,7 @@ Executed 0 out of 1 test: 1 test passes.
----
----
-bazel test --color=yes --experimental_convenience_symlinks=ignore //pkg/util/tracing:tracing_test --nocache_test_results --test_filter='TestStartChild*' --test_output errors
+bazel test --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/util/tracing:tracing_test --nocache_test_results --test_filter='TestStartChild*' --test_output errors
----
----
//pkg/util/tracing:tracing_test [0m[32mPASSED[0m in 0.1s
@@ -60,7 +60,7 @@ Executed 1 out of 1 test: 1 test passes.
----
----
-bazel test --color=yes --experimental_convenience_symlinks=ignore //pkg/util/tracing:tracing_test --run_under stress -maxtime=10s --test_filter='TestStartChild*' --test_output errors
+bazel test --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/util/tracing:tracing_test --run_under stress -maxtime=10s --test_filter='TestStartChild*' --test_output errors
----
----
//pkg/util/tracing:tracing_test [0m[32mPASSED[0m in 12.3s
@@ -69,7 +69,7 @@ Executed 1 out of 1 test: 1 test passes.
----
----
-bazel test --color=yes --experimental_convenience_symlinks=ignore //pkg/util/tracing:tracing_test --run_under stress -maxtime=10s --test_filter='TestStartChild*' --test_output all --test_arg -test.v
+bazel test --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/util/tracing:tracing_test --run_under stress -maxtime=10s --test_filter='TestStartChild*' --test_output all --test_arg -test.v
----
----
==================== Test output for //pkg/util/tracing:tracing_test:
diff --git a/pkg/cmd/dev/testdata/test.txt b/pkg/cmd/dev/testdata/test.txt
index aac090744552..5267650633e7 100644
--- a/pkg/cmd/dev/testdata/test.txt
+++ b/pkg/cmd/dev/testdata/test.txt
@@ -1,32 +1,32 @@
dev test pkg/util/tracing
----
-bazel test --color=yes --experimental_convenience_symlinks=ignore //pkg/util/tracing:tracing_test --test_output errors
+bazel test --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/util/tracing:tracing_test --test_output errors
dev test pkg/util/tracing/...
----
bazel query kind(go_test, //pkg/util/tracing/...)
-bazel test --color=yes --experimental_convenience_symlinks=ignore //pkg/util/tracing:tracing_test --test_output errors
+bazel test --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/util/tracing:tracing_test --test_output errors
dev test pkg/util/tracing -f 'TestStartChild*'
----
-bazel test --color=yes --experimental_convenience_symlinks=ignore //pkg/util/tracing:tracing_test --test_filter='TestStartChild*' --test_output errors
+bazel test --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/util/tracing:tracing_test --test_filter='TestStartChild*' --test_output errors
dev test pkg/util/tracing -f 'TestStartChild*' -v
----
-bazel test --color=yes --experimental_convenience_symlinks=ignore //pkg/util/tracing:tracing_test --test_filter='TestStartChild*' --test_output all --test_arg -test.v
+bazel test --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/util/tracing:tracing_test --test_filter='TestStartChild*' --test_output all --test_arg -test.v
dev test pkg/util/tracing -f 'TestStartChild*' --remote-cache 127.0.0.1:9092
----
-bazel test --color=yes --experimental_convenience_symlinks=ignore --remote_local_fallback --remote_cache=grpc://127.0.0.1:9092 --experimental_remote_downloader=grpc://127.0.0.1:9092 //pkg/util/tracing:tracing_test --test_filter='TestStartChild*' --test_output errors
+bazel test --color=yes --experimental_convenience_symlinks=ignore --config=dev --remote_local_fallback --remote_cache=grpc://127.0.0.1:9092 --experimental_remote_downloader=grpc://127.0.0.1:9092 //pkg/util/tracing:tracing_test --test_filter='TestStartChild*' --test_output errors
dev test pkg/util/tracing -f 'TestStartChild*' --ignore-cache
----
-bazel test --color=yes --experimental_convenience_symlinks=ignore //pkg/util/tracing:tracing_test --nocache_test_results --test_filter='TestStartChild*' --test_output errors
+bazel test --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/util/tracing:tracing_test --nocache_test_results --test_filter='TestStartChild*' --test_output errors
dev test --stress pkg/util/tracing --filter 'TestStartChild*' --timeout=10s
----
-bazel test --color=yes --experimental_convenience_symlinks=ignore //pkg/util/tracing:tracing_test --run_under stress -maxtime=10s --test_filter='TestStartChild*' --test_output errors
+bazel test --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/util/tracing:tracing_test --run_under stress -maxtime=10s --test_filter='TestStartChild*' --test_output errors
dev test --stress pkg/util/tracing --filter 'TestStartChild*' --timeout=10s -v
----
-bazel test --color=yes --experimental_convenience_symlinks=ignore //pkg/util/tracing:tracing_test --run_under stress -maxtime=10s --test_filter='TestStartChild*' --test_output all --test_arg -test.v
+bazel test --color=yes --experimental_convenience_symlinks=ignore --config=dev //pkg/util/tracing:tracing_test --run_under stress -maxtime=10s --test_filter='TestStartChild*' --test_output all --test_arg -test.v
diff --git a/pkg/cmd/dev/util.go b/pkg/cmd/dev/util.go
index 7c5631b98928..efd41b4aca94 100644
--- a/pkg/cmd/dev/util.go
+++ b/pkg/cmd/dev/util.go
@@ -15,6 +15,7 @@ import (
"fmt"
"log"
"net"
+ "runtime"
"strings"
"time"
@@ -22,6 +23,9 @@ import (
"github.com/spf13/cobra"
)
+// To be turned on for tests.
+var predictableDevProfile bool
+
func mustGetFlagString(cmd *cobra.Command, name string) string {
val, err := cmd.Flags().GetString(name)
if err != nil {
@@ -76,9 +80,21 @@ func parseAddr(addr string) (string, error) {
}
func (d *dev) getWorkspace(ctx context.Context) (string, error) {
- out, err := d.exec.CommandContextSilent(ctx, "bazel", "info", "workspace", "--color=no")
+ args := []string{"info", "workspace", "--color=no"}
+ args = append(args, getConfigFlags()...)
+ out, err := d.exec.CommandContextSilent(ctx, "bazel", args...)
if err != nil {
return "", err
}
return strings.TrimSpace(string(out)), nil
}
+
+func getConfigFlags() []string {
+ if skipDevConfig {
+ return []string{}
+ }
+ if !predictableDevProfile && runtime.GOOS == "darwin" && runtime.GOARCH == "amd64" {
+ return []string{"--config=devdarwinx86_64"}
+ }
+ return []string{"--config=dev"}
+}
diff --git a/pkg/roachpb/BUILD.bazel b/pkg/roachpb/BUILD.bazel
index 5eb24bb73e3d..9627fc8d1dc9 100644
--- a/pkg/roachpb/BUILD.bazel
+++ b/pkg/roachpb/BUILD.bazel
@@ -14,6 +14,7 @@ go_library(
"batch.go",
"data.go",
"errors.go",
+ "index_usage_stats.go",
"internal.go",
"merge_spans.go",
"metadata.go",
@@ -70,6 +71,7 @@ go_library(
"batch_generated.go",
"data.go",
"errors.go",
+ "index_usage_stats.go",
"metadata.go",
"metadata_replicas.go",
"method.go",
@@ -128,6 +130,7 @@ go_test(
"data_test.go",
"dep_test.go",
"errors_test.go",
+ "index_usage_stats_test.go",
"main_test.go",
"merge_spans_test.go",
"metadata_replicas_test.go",
@@ -195,6 +198,7 @@ proto_library(
"app_stats.proto",
"data.proto",
"errors.proto",
+ "index_usage_stats.proto",
"internal.proto",
"internal_raft.proto",
"io-formats.proto",
diff --git a/pkg/roachpb/index_usage_stats.go b/pkg/roachpb/index_usage_stats.go
new file mode 100644
index 000000000000..7340bb8b0787
--- /dev/null
+++ b/pkg/roachpb/index_usage_stats.go
@@ -0,0 +1,35 @@
+// Copyright 2021 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package roachpb
+
+// TableID is same as descpb.ID. We redefine it here to avoid importing descpb.
+type TableID uint32
+
+// IndexID is same as descpb.IndexID. We redefine it here to avoid importing
+// descpb.
+type IndexID uint32
+
+// Add adds the fields from other IndexUsageStatistics.
+func (m *IndexUsageStatistics) Add(other *IndexUsageStatistics) {
+ m.TotalRowsRead += other.TotalRowsRead
+ m.TotalRowsWritten += other.TotalRowsWritten
+
+ m.TotalReadCount += other.TotalReadCount
+ m.TotalWriteCount += other.TotalWriteCount
+
+ if m.LastWrite.Before(other.LastWrite) {
+ m.LastWrite = other.LastWrite
+ }
+
+ if m.LastRead.Before(other.LastRead) {
+ m.LastRead = other.LastRead
+ }
+}
diff --git a/pkg/roachpb/index_usage_stats.pb.go b/pkg/roachpb/index_usage_stats.pb.go
new file mode 100644
index 000000000000..ddde310d55af
--- /dev/null
+++ b/pkg/roachpb/index_usage_stats.pb.go
@@ -0,0 +1,856 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: roachpb/index_usage_stats.proto
+
+package roachpb
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ _ "github.com/gogo/protobuf/types"
+ github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+ time "time"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// IndexUsageStatistics represents the index usage statistics per index.
+// N.B. When fields are added to this struct, make sure to update
+// (*IndexUsageStatistics).Add in roachpb/index_usage_stats.go.
+type IndexUsageStatistics struct {
+ // TotalReadCount is the number of times this index has been read from.
+ TotalReadCount uint64 `protobuf:"varint,1,opt,name=total_read_count,json=totalReadCount" json:"total_read_count"`
+ // LastRead is the timestamp that this index was last being read from.
+ LastRead time.Time `protobuf:"bytes,2,opt,name=last_read,json=lastRead,stdtime" json:"last_read"`
+ // TotalRowsRead is the number rows that has read from this index.
+ // TODO(azhng): Currently this field is unused.
+ TotalRowsRead uint64 `protobuf:"varint,3,opt,name=total_rows_read,json=totalRowsRead" json:"total_rows_read"`
+ // TotalWriteCount is the number of times this index has been written to.
+ // TODO(azhng): Currently this field is unused.
+ TotalWriteCount uint64 `protobuf:"varint,4,opt,name=total_write_count,json=totalWriteCount" json:"total_write_count"`
+ // LastWrite is the timestamp that this index was last being written to.
+ // TODO(azhng): Currently this field is unused.
+ LastWrite time.Time `protobuf:"bytes,5,opt,name=last_write,json=lastWrite,stdtime" json:"last_write"`
+ // TotalRowsWritten is the number rows that have been written to this index.
+ // TODO(azhng): Currently this field is unused.
+ TotalRowsWritten uint64 `protobuf:"varint,6,opt,name=total_rows_written,json=totalRowsWritten" json:"total_rows_written"`
+}
+
+func (m *IndexUsageStatistics) Reset() { *m = IndexUsageStatistics{} }
+func (m *IndexUsageStatistics) String() string { return proto.CompactTextString(m) }
+func (*IndexUsageStatistics) ProtoMessage() {}
+func (*IndexUsageStatistics) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7523d71560a74d6c, []int{0}
+}
+func (m *IndexUsageStatistics) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IndexUsageStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IndexUsageStatistics) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IndexUsageStatistics.Merge(m, src)
+}
+func (m *IndexUsageStatistics) XXX_Size() int {
+ return m.Size()
+}
+func (m *IndexUsageStatistics) XXX_DiscardUnknown() {
+ xxx_messageInfo_IndexUsageStatistics.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IndexUsageStatistics proto.InternalMessageInfo
+
+// IndexUsageKey uniquely identifies an index. It's a tuple of TableID and a
+// IndexID.
+type IndexUsageKey struct {
+ // TableID is the ID of the table this index is created on. This is same as
+ // descpb.TableID and is unique within the cluster.
+ TableID TableID `protobuf:"varint,1,opt,name=table_id,json=tableId,casttype=TableID" json:"table_id"`
+ // IndexID is the ID of the index within the scope of the given table.
+ IndexID IndexID `protobuf:"varint,2,opt,name=index_id,json=indexId,casttype=IndexID" json:"index_id"`
+}
+
+func (m *IndexUsageKey) Reset() { *m = IndexUsageKey{} }
+func (m *IndexUsageKey) String() string { return proto.CompactTextString(m) }
+func (*IndexUsageKey) ProtoMessage() {}
+func (*IndexUsageKey) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7523d71560a74d6c, []int{1}
+}
+func (m *IndexUsageKey) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IndexUsageKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IndexUsageKey) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IndexUsageKey.Merge(m, src)
+}
+func (m *IndexUsageKey) XXX_Size() int {
+ return m.Size()
+}
+func (m *IndexUsageKey) XXX_DiscardUnknown() {
+ xxx_messageInfo_IndexUsageKey.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IndexUsageKey proto.InternalMessageInfo
+
+// CollectedIndexUsageStatistics wraps collected index key and its usage
+// statistics.
+type CollectedIndexUsageStatistics struct {
+ Key IndexUsageKey `protobuf:"bytes,1,opt,name=key" json:"key"`
+ Stats IndexUsageStatistics `protobuf:"bytes,2,opt,name=stats" json:"stats"`
+}
+
+func (m *CollectedIndexUsageStatistics) Reset() { *m = CollectedIndexUsageStatistics{} }
+func (m *CollectedIndexUsageStatistics) String() string { return proto.CompactTextString(m) }
+func (*CollectedIndexUsageStatistics) ProtoMessage() {}
+func (*CollectedIndexUsageStatistics) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7523d71560a74d6c, []int{2}
+}
+func (m *CollectedIndexUsageStatistics) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CollectedIndexUsageStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CollectedIndexUsageStatistics) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CollectedIndexUsageStatistics.Merge(m, src)
+}
+func (m *CollectedIndexUsageStatistics) XXX_Size() int {
+ return m.Size()
+}
+func (m *CollectedIndexUsageStatistics) XXX_DiscardUnknown() {
+ xxx_messageInfo_CollectedIndexUsageStatistics.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CollectedIndexUsageStatistics proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*IndexUsageStatistics)(nil), "cockroach.sql.IndexUsageStatistics")
+ proto.RegisterType((*IndexUsageKey)(nil), "cockroach.sql.IndexUsageKey")
+ proto.RegisterType((*CollectedIndexUsageStatistics)(nil), "cockroach.sql.CollectedIndexUsageStatistics")
+}
+
+func init() { proto.RegisterFile("roachpb/index_usage_stats.proto", fileDescriptor_7523d71560a74d6c) }
+
+var fileDescriptor_7523d71560a74d6c = []byte{
+ // 444 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x3f, 0x6f, 0xd3, 0x40,
+ 0x14, 0xf7, 0xb5, 0x29, 0x09, 0x57, 0x05, 0xca, 0xa9, 0x43, 0x14, 0xc1, 0xb9, 0x0a, 0x4b, 0x91,
+ 0xd0, 0x19, 0x45, 0x30, 0x23, 0x1c, 0x16, 0x8b, 0xcd, 0x14, 0x55, 0x62, 0xb1, 0x2e, 0xf6, 0x61,
+ 0xac, 0xba, 0xb9, 0xe0, 0x7b, 0x51, 0xe8, 0xc0, 0x77, 0xe8, 0xc4, 0x67, 0xca, 0xd8, 0xb1, 0x53,
+ 0x01, 0xe7, 0x5b, 0x30, 0xa1, 0x7b, 0xe7, 0x12, 0x47, 0x82, 0xa1, 0xdb, 0xb3, 0xdf, 0xef, 0xdf,
+ 0xbd, 0x7b, 0x47, 0xfd, 0x4a, 0xcb, 0xf4, 0xf3, 0x7c, 0x1a, 0x14, 0xb3, 0x4c, 0x7d, 0x4d, 0x16,
+ 0x46, 0xe6, 0x2a, 0x31, 0x20, 0xc1, 0x88, 0x79, 0xa5, 0x41, 0xb3, 0x7e, 0xaa, 0xd3, 0x33, 0x04,
+ 0x09, 0xf3, 0xa5, 0x1c, 0x1e, 0xe6, 0x3a, 0xd7, 0xd8, 0x09, 0x6c, 0xe5, 0x40, 0x43, 0x3f, 0xd7,
+ 0x3a, 0x2f, 0x55, 0x80, 0x5f, 0xd3, 0xc5, 0xa7, 0x00, 0x8a, 0x73, 0x65, 0x40, 0x9e, 0xcf, 0x1d,
+ 0x60, 0x54, 0xef, 0xd0, 0xc3, 0xc8, 0x3a, 0x7c, 0xb0, 0x06, 0xef, 0x41, 0x42, 0x61, 0xa0, 0x48,
+ 0x0d, 0x13, 0xf4, 0x00, 0x34, 0xc8, 0x32, 0xa9, 0x94, 0xcc, 0x92, 0x54, 0x2f, 0x66, 0x30, 0x20,
+ 0x47, 0xe4, 0xb8, 0x13, 0x76, 0x56, 0x37, 0xbe, 0x17, 0x3f, 0xc0, 0x6e, 0xac, 0x64, 0x36, 0xb1,
+ 0x3d, 0xf6, 0x86, 0xde, 0x2f, 0xa5, 0x01, 0x84, 0x0f, 0x76, 0x8e, 0xc8, 0xf1, 0xfe, 0x78, 0x28,
+ 0x9c, 0xbb, 0xb8, 0x75, 0x17, 0x27, 0xb7, 0xee, 0x61, 0xcf, 0x8a, 0x5c, 0xfe, 0xf0, 0x49, 0xdc,
+ 0xb3, 0x34, 0xab, 0xc3, 0x9e, 0xd3, 0x87, 0x8d, 0xa5, 0x5e, 0x1a, 0x27, 0xb4, 0xdb, 0x72, 0xec,
+ 0x3b, 0x47, 0xbd, 0x34, 0x88, 0x7e, 0x41, 0x1f, 0x39, 0xf4, 0xb2, 0x2a, 0x40, 0x35, 0x09, 0x3b,
+ 0x2d, 0xbc, 0x13, 0x3b, 0xb5, 0x5d, 0x17, 0x71, 0x42, 0x29, 0x46, 0x44, 0xc2, 0x60, 0xef, 0x0e,
+ 0x19, 0xf1, 0x68, 0xa8, 0xc4, 0xc6, 0x94, 0xb5, 0x42, 0x5a, 0x29, 0x50, 0xb3, 0xc1, 0xbd, 0x96,
+ 0xef, 0xc1, 0xdf, 0x9c, 0xa7, 0xae, 0x3b, 0xfa, 0x46, 0xfb, 0x9b, 0x19, 0xbf, 0x53, 0x17, 0xec,
+ 0x15, 0xed, 0x81, 0x9c, 0x96, 0x2a, 0x29, 0x32, 0x1c, 0x6a, 0x3f, 0x1c, 0x5a, 0x6a, 0x7d, 0xe3,
+ 0x77, 0x4f, 0xec, 0xff, 0xe8, 0xed, 0xef, 0x4d, 0x19, 0x77, 0x11, 0x1b, 0x65, 0x96, 0xe6, 0xb6,
+ 0xa1, 0x70, 0x23, 0x6e, 0xd1, 0x50, 0xdf, 0xd1, 0x9a, 0x32, 0xee, 0x22, 0x36, 0xca, 0x46, 0xdf,
+ 0x09, 0x7d, 0x32, 0xd1, 0x65, 0xa9, 0x52, 0x50, 0xd9, 0x3f, 0x2f, 0xfb, 0x25, 0xdd, 0x3d, 0x53,
+ 0x17, 0x18, 0x65, 0x7f, 0xfc, 0x58, 0x6c, 0x6d, 0x96, 0xd8, 0x8a, 0xde, 0x9c, 0xd1, 0xc2, 0xd9,
+ 0x6b, 0xba, 0x87, 0x0b, 0xd9, 0x5c, 0xf7, 0xd3, 0xff, 0xf2, 0x36, 0x4e, 0x0d, 0xdd, 0xf1, 0xc2,
+ 0x67, 0xab, 0x5f, 0xdc, 0x5b, 0xd5, 0x9c, 0x5c, 0xd5, 0x9c, 0x5c, 0xd7, 0x9c, 0xfc, 0xac, 0x39,
+ 0xb9, 0x5c, 0x73, 0xef, 0x6a, 0xcd, 0xbd, 0xeb, 0x35, 0xf7, 0x3e, 0x76, 0x9b, 0x57, 0xf0, 0x27,
+ 0x00, 0x00, 0xff, 0xff, 0x14, 0x7a, 0xa2, 0x07, 0x0f, 0x03, 0x00, 0x00,
+}
+
+func (m *IndexUsageStatistics) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IndexUsageStatistics) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IndexUsageStatistics) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintIndexUsageStats(dAtA, i, uint64(m.TotalRowsWritten))
+ i--
+ dAtA[i] = 0x30
+ n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastWrite, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastWrite):])
+ if err1 != nil {
+ return 0, err1
+ }
+ i -= n1
+ i = encodeVarintIndexUsageStats(dAtA, i, uint64(n1))
+ i--
+ dAtA[i] = 0x2a
+ i = encodeVarintIndexUsageStats(dAtA, i, uint64(m.TotalWriteCount))
+ i--
+ dAtA[i] = 0x20
+ i = encodeVarintIndexUsageStats(dAtA, i, uint64(m.TotalRowsRead))
+ i--
+ dAtA[i] = 0x18
+ n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastRead, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastRead):])
+ if err2 != nil {
+ return 0, err2
+ }
+ i -= n2
+ i = encodeVarintIndexUsageStats(dAtA, i, uint64(n2))
+ i--
+ dAtA[i] = 0x12
+ i = encodeVarintIndexUsageStats(dAtA, i, uint64(m.TotalReadCount))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *IndexUsageKey) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IndexUsageKey) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IndexUsageKey) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintIndexUsageStats(dAtA, i, uint64(m.IndexID))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintIndexUsageStats(dAtA, i, uint64(m.TableID))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *CollectedIndexUsageStatistics) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CollectedIndexUsageStatistics) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CollectedIndexUsageStatistics) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintIndexUsageStats(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.Key.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintIndexUsageStats(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintIndexUsageStats(dAtA []byte, offset int, v uint64) int {
+ offset -= sovIndexUsageStats(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *IndexUsageStatistics) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovIndexUsageStats(uint64(m.TotalReadCount))
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.LastRead)
+ n += 1 + l + sovIndexUsageStats(uint64(l))
+ n += 1 + sovIndexUsageStats(uint64(m.TotalRowsRead))
+ n += 1 + sovIndexUsageStats(uint64(m.TotalWriteCount))
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.LastWrite)
+ n += 1 + l + sovIndexUsageStats(uint64(l))
+ n += 1 + sovIndexUsageStats(uint64(m.TotalRowsWritten))
+ return n
+}
+
+func (m *IndexUsageKey) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovIndexUsageStats(uint64(m.TableID))
+ n += 1 + sovIndexUsageStats(uint64(m.IndexID))
+ return n
+}
+
+func (m *CollectedIndexUsageStatistics) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Key.Size()
+ n += 1 + l + sovIndexUsageStats(uint64(l))
+ l = m.Stats.Size()
+ n += 1 + l + sovIndexUsageStats(uint64(l))
+ return n
+}
+
+func sovIndexUsageStats(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozIndexUsageStats(x uint64) (n int) {
+ return sovIndexUsageStats(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *IndexUsageStatistics) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIndexUsageStats
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IndexUsageStatistics: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IndexUsageStatistics: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TotalReadCount", wireType)
+ }
+ m.TotalReadCount = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIndexUsageStats
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TotalReadCount |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastRead", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIndexUsageStats
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthIndexUsageStats
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthIndexUsageStats
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.LastRead, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TotalRowsRead", wireType)
+ }
+ m.TotalRowsRead = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIndexUsageStats
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TotalRowsRead |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TotalWriteCount", wireType)
+ }
+ m.TotalWriteCount = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIndexUsageStats
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TotalWriteCount |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastWrite", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIndexUsageStats
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthIndexUsageStats
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthIndexUsageStats
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.LastWrite, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TotalRowsWritten", wireType)
+ }
+ m.TotalRowsWritten = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIndexUsageStats
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TotalRowsWritten |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipIndexUsageStats(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthIndexUsageStats
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IndexUsageKey) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIndexUsageStats
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IndexUsageKey: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IndexUsageKey: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TableID", wireType)
+ }
+ m.TableID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIndexUsageStats
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TableID |= TableID(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IndexID", wireType)
+ }
+ m.IndexID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIndexUsageStats
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.IndexID |= IndexID(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipIndexUsageStats(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthIndexUsageStats
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CollectedIndexUsageStatistics) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIndexUsageStats
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CollectedIndexUsageStatistics: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CollectedIndexUsageStatistics: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIndexUsageStats
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthIndexUsageStats
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthIndexUsageStats
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Key.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIndexUsageStats
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthIndexUsageStats
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthIndexUsageStats
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipIndexUsageStats(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthIndexUsageStats
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipIndexUsageStats(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowIndexUsageStats
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowIndexUsageStats
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowIndexUsageStats
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthIndexUsageStats
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupIndexUsageStats
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthIndexUsageStats
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthIndexUsageStats = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowIndexUsageStats = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupIndexUsageStats = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/pkg/roachpb/index_usage_stats.proto b/pkg/roachpb/index_usage_stats.proto
new file mode 100644
index 000000000000..410e6ebaa7b3
--- /dev/null
+++ b/pkg/roachpb/index_usage_stats.proto
@@ -0,0 +1,61 @@
+// Copyright 2021 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+syntax = "proto2";
+package cockroach.sql;
+option go_package = "roachpb";
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/timestamp.proto";
+
+// IndexUsageStatistics represents the index usage statistics per index.
+// N.B. When fields are added to this struct, make sure to update
+// (*IndexUsageStatistics).Add in roachpb/index_usage_stats.go.
+message IndexUsageStatistics {
+ // TotalReadCount is the number of times this index has been read from.
+ optional uint64 total_read_count = 1 [(gogoproto.nullable) = false];
+
+ // LastRead is the timestamp that this index was last being read from.
+ optional google.protobuf.Timestamp last_read = 2 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
+
+ // TotalRowsRead is the number rows that has read from this index.
+ // TODO(azhng): Currently this field is unused.
+ optional uint64 total_rows_read = 3 [(gogoproto.nullable) = false];
+
+ // TotalWriteCount is the number of times this index has been written to.
+ // TODO(azhng): Currently this field is unused.
+ optional uint64 total_write_count = 4 [(gogoproto.nullable) = false];
+
+ // LastWrite is the timestamp that this index was last being written to.
+ // TODO(azhng): Currently this field is unused.
+ optional google.protobuf.Timestamp last_write = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
+
+ // TotalRowsWritten is the number rows that have been written to this index.
+ // TODO(azhng): Currently this field is unused.
+ optional uint64 total_rows_written = 6 [(gogoproto.nullable) = false];
+}
+
+// IndexUsageKey uniquely identifies an index. It's a tuple of TableID and a
+// IndexID.
+message IndexUsageKey {
+ // TableID is the ID of the table this index is created on. This is same as
+ // descpb.TableID and is unique within the cluster.
+ optional uint32 table_id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "TableID", (gogoproto.casttype) = "TableID"];
+
+ // IndexID is the ID of the index within the scope of the given table.
+ optional uint32 index_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "IndexID", (gogoproto.casttype) = "IndexID" ];
+}
+
+// CollectedIndexUsageStatistics wraps collected index key and its usage
+// statistics.
+message CollectedIndexUsageStatistics {
+ optional IndexUsageKey key = 1 [(gogoproto.nullable) = false];
+ optional IndexUsageStatistics stats = 2 [(gogoproto.nullable) = false];
+}
diff --git a/pkg/roachpb/index_usage_stats_test.go b/pkg/roachpb/index_usage_stats_test.go
new file mode 100644
index 000000000000..ebda45426f77
--- /dev/null
+++ b/pkg/roachpb/index_usage_stats_test.go
@@ -0,0 +1,89 @@
+// Copyright 2021 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package roachpb
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/cockroachdb/cockroach/pkg/util/timeutil"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAddIndexUsageStats(t *testing.T) {
+ testCases := []struct {
+ data IndexUsageStatistics
+ expected IndexUsageStatistics
+ }{
+ {
+ data: IndexUsageStatistics{
+ TotalReadCount: 1,
+ TotalWriteCount: 1,
+ TotalRowsWritten: 1,
+ TotalRowsRead: 1,
+ LastRead: timeutil.Unix(10, 1),
+ LastWrite: timeutil.Unix(10, 2),
+ },
+ expected: IndexUsageStatistics{
+ TotalReadCount: 1,
+ TotalWriteCount: 1,
+ TotalRowsWritten: 1,
+ TotalRowsRead: 1,
+ LastRead: timeutil.Unix(10, 1),
+ LastWrite: timeutil.Unix(10, 2),
+ },
+ },
+ {
+ data: IndexUsageStatistics{
+ TotalReadCount: 2,
+ TotalRowsRead: 9,
+ LastRead: timeutil.Unix(20, 1),
+ },
+ expected: IndexUsageStatistics{
+ TotalReadCount: 3,
+ TotalWriteCount: 1,
+ TotalRowsWritten: 1,
+ TotalRowsRead: 10,
+ LastRead: timeutil.Unix(20, 1),
+ LastWrite: timeutil.Unix(10, 2),
+ },
+ },
+ {
+ data: IndexUsageStatistics{
+ TotalWriteCount: 4,
+ TotalRowsWritten: 30,
+ LastWrite: timeutil.Unix(30, 1),
+ },
+ expected: IndexUsageStatistics{
+ TotalReadCount: 3,
+ TotalWriteCount: 5,
+ TotalRowsWritten: 31,
+ TotalRowsRead: 10,
+ LastRead: timeutil.Unix(20, 1),
+ LastWrite: timeutil.Unix(30, 1),
+ },
+ },
+ }
+
+ state := IndexUsageStatistics{}
+
+ for i := range testCases {
+ state.Add(&testCases[i].data)
+ require.Equal(t, testCases[i].expected, state)
+ }
+
+ // Ensure that we have tested all fields.
+ numFields := reflect.ValueOf(state).NumField()
+ for i := 0; i < numFields; i++ {
+ val := reflect.ValueOf(state).Field(i)
+ require.False(t, val.IsZero(), "expected all fields to be tested, but %s is not", val.String())
+ }
+}
diff --git a/pkg/sql/BUILD.bazel b/pkg/sql/BUILD.bazel
index aa7c334b68fd..eaae213668b9 100644
--- a/pkg/sql/BUILD.bazel
+++ b/pkg/sql/BUILD.bazel
@@ -302,6 +302,7 @@ go_library(
"//pkg/sql/faketreeeval",
"//pkg/sql/flowinfra",
"//pkg/sql/gcjob/gcjobnotifier",
+ "//pkg/sql/idxusage",
"//pkg/sql/inverted",
"//pkg/sql/lex",
"//pkg/sql/memsize",
diff --git a/pkg/sql/backfill/backfill.go b/pkg/sql/backfill/backfill.go
index 56e71697d82d..b743b586149f 100644
--- a/pkg/sql/backfill/backfill.go
+++ b/pkg/sql/backfill/backfill.go
@@ -702,7 +702,7 @@ func (ib *IndexBackfiller) initIndexes(desc catalog.TableDescriptor) util.FastIn
ib.added = append(ib.added, idx)
for i := range ib.cols {
id := ib.cols[i].GetID()
- if colIDs.Contains(id) && i < len(desc.PublicColumns()) {
+ if colIDs.Contains(id) && i < len(desc.PublicColumns()) && !ib.cols[i].IsVirtual() {
valNeededForCol.Add(i)
}
}
diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go
index f5cbb84cbda2..cba063b0f3f4 100644
--- a/pkg/sql/conn_executor.go
+++ b/pkg/sql/conn_executor.go
@@ -35,6 +35,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/execstats"
+ "github.com/cockroachdb/cockroach/pkg/sql/idxusage"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
@@ -268,6 +269,9 @@ type Server struct {
// pool is the parent monitor for all session monitors.
pool *mon.BytesMonitor
+ // indexUsageStats tracks the index usage statistics.
+ indexUsageStats *idxusage.LocalIndexUsageStats
+
// Metrics is used to account normal queries.
Metrics Metrics
@@ -319,7 +323,7 @@ func NewServer(cfg *ExecutorConfig, pool *mon.BytesMonitor) *Server {
sqlstats.SQLStatReset,
reportedSQLStats,
)
- return &Server{
+ s := &Server{
cfg: cfg,
Metrics: metrics,
InternalMetrics: makeMetrics(cfg, true /* internal */),
@@ -327,7 +331,13 @@ func NewServer(cfg *ExecutorConfig, pool *mon.BytesMonitor) *Server {
sqlStats: sqlStats,
reportedStats: reportedSQLStats,
reCache: tree.NewRegexpCache(512),
+ indexUsageStats: idxusage.NewLocalIndexUsageStats(&idxusage.Config{
+ ChannelSize: idxusage.DefaultChannelSize,
+ Setting: cfg.Settings,
+ }),
}
+
+ return s
}
func makeMetrics(cfg *ExecutorConfig, internal bool) Metrics {
@@ -380,6 +390,7 @@ func makeMetrics(cfg *ExecutorConfig, internal bool) Metrics {
// Start starts the Server's background processing.
func (s *Server) Start(ctx context.Context, stopper *stop.Stopper) {
+ s.indexUsageStats.Start(ctx, stopper)
// Start a loop to clear SQL stats at the max reset interval. This is
// to ensure that we always have some worker clearing SQL stats to avoid
// continually allocating space for the SQL stats. Additionally, spawn
@@ -734,6 +745,7 @@ func (s *Server) newConnExecutor(
executorType: executorTypeExec,
hasCreatedTemporarySchema: false,
stmtDiagnosticsRecorder: s.cfg.StmtDiagnosticsRecorder,
+ indexUsageStatsWriter: s.indexUsageStats,
}
ex.state.txnAbortCount = ex.metrics.EngineMetrics.TxnAbortCount
@@ -1280,6 +1292,9 @@ type connExecutor struct {
// stmtDiagnosticsRecorder is used to track which queries need to have
// information collected.
stmtDiagnosticsRecorder *stmtdiagnostics.Registry
+
+ // indexUsageStatsWriter is used to track index usage stats.
+ indexUsageStatsWriter idxusage.Writer
}
// ctxHolder contains a connection's context and, while session tracing is
@@ -2287,20 +2302,21 @@ func (ex *connExecutor) initEvalCtx(ctx context.Context, evalCtx *extendedEvalCo
SQLStatsResetter: ex.server,
CompactEngineSpan: ex.server.cfg.CompactEngineSpanFunc,
},
- SessionMutator: ex.dataMutator,
- VirtualSchemas: ex.server.cfg.VirtualSchemas,
- Tracing: &ex.sessionTracing,
- NodesStatusServer: ex.server.cfg.NodesStatusServer,
- RegionsServer: ex.server.cfg.RegionsServer,
- SQLStatusServer: ex.server.cfg.SQLStatusServer,
- MemMetrics: &ex.memMetrics,
- Descs: &ex.extraTxnState.descCollection,
- ExecCfg: ex.server.cfg,
- DistSQLPlanner: ex.server.cfg.DistSQLPlanner,
- TxnModesSetter: ex,
- Jobs: &ex.extraTxnState.jobs,
- SchemaChangeJobCache: ex.extraTxnState.schemaChangeJobsCache,
- statsStorage: ex.server.sqlStats,
+ SessionMutator: ex.dataMutator,
+ VirtualSchemas: ex.server.cfg.VirtualSchemas,
+ Tracing: &ex.sessionTracing,
+ NodesStatusServer: ex.server.cfg.NodesStatusServer,
+ RegionsServer: ex.server.cfg.RegionsServer,
+ SQLStatusServer: ex.server.cfg.SQLStatusServer,
+ MemMetrics: &ex.memMetrics,
+ Descs: &ex.extraTxnState.descCollection,
+ ExecCfg: ex.server.cfg,
+ DistSQLPlanner: ex.server.cfg.DistSQLPlanner,
+ TxnModesSetter: ex,
+ Jobs: &ex.extraTxnState.jobs,
+ SchemaChangeJobCache: ex.extraTxnState.schemaChangeJobsCache,
+ statsStorage: ex.server.sqlStats,
+ indexUsageStatsWriter: ex.indexUsageStatsWriter,
}
}
diff --git a/pkg/sql/idxusage/BUILD.bazel b/pkg/sql/idxusage/BUILD.bazel
new file mode 100644
index 000000000000..8c198f69b4d2
--- /dev/null
+++ b/pkg/sql/idxusage/BUILD.bazel
@@ -0,0 +1,53 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "indexusagestats",
+ srcs = [
+ "cluster_settings.go",
+ "index_usage_stats.go",
+ ],
+ importpath = "github.com/cockroachdb/cockroach/pkg/sql/indexusagestats",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//pkg/roachpb",
+ "//pkg/settings",
+ "//pkg/util/stop",
+ ],
+)
+
+go_library(
+ name = "idxusage",
+ srcs = [
+ "cluster_settings.go",
+ "index_usage_stats.go",
+ "local_idx_usage_stats.go",
+ "test_utils.go",
+ ],
+ importpath = "github.com/cockroachdb/cockroach/pkg/sql/idxusage",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//pkg/roachpb",
+ "//pkg/settings",
+ "//pkg/settings/cluster",
+ "//pkg/util/log",
+ "//pkg/util/stop",
+ "//pkg/util/syncutil",
+ "//pkg/util/timeutil",
+ "@com_github_cockroachdb_errors//:errors",
+ ],
+)
+
+go_test(
+ name = "idxusage_test",
+ srcs = ["local_index_usage_stats_test.go"],
+ embed = [":idxusage"],
+ deps = [
+ "//pkg/roachpb",
+ "//pkg/settings/cluster",
+ "//pkg/util/leaktest",
+ "//pkg/util/log",
+ "//pkg/util/stop",
+ "//pkg/util/timeutil",
+ "@com_github_stretchr_testify//require",
+ ],
+)
diff --git a/pkg/sql/idxusage/cluster_settings.go b/pkg/sql/idxusage/cluster_settings.go
new file mode 100644
index 000000000000..68343cd0e5f9
--- /dev/null
+++ b/pkg/sql/idxusage/cluster_settings.go
@@ -0,0 +1,18 @@
+// Copyright 2021 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package idxusage
+
+import "github.com/cockroachdb/cockroach/pkg/settings"
+
+// Enable determines whether to collect per-index usage statistics.
+var Enable = settings.RegisterBoolSetting(
+ "sql.metrics.index_usage_stats.enabled", "collect per index usage statistics", true, /* defaultValue */
+).WithPublic()
diff --git a/pkg/sql/idxusage/index_usage_stats.go b/pkg/sql/idxusage/index_usage_stats.go
new file mode 100644
index 000000000000..425be80a7782
--- /dev/null
+++ b/pkg/sql/idxusage/index_usage_stats.go
@@ -0,0 +1,53 @@
+// Copyright 2021 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+//
+// idxusage is a subsystem that is responsible for collecting index usage
+// statistics.
+
+package idxusage
+
+import (
+ "context"
+
+ "github.com/cockroachdb/cockroach/pkg/roachpb"
+)
+
+// Writer provides interface to record index usage statistics.
+type Writer interface {
+ // RecordRead records a read operation on the specified index.
+ RecordRead(ctx context.Context, key roachpb.IndexUsageKey)
+
+ // TODO(azhng): as we introduce more plumbing throughout the codebase,
+ // we should introduce additional interfaces here to record other index usage
+ // type.
+}
+
+// IteratorOptions provides knobs to change the iterating behavior when
+// calling ForEach.
+type IteratorOptions struct {
+ SortedTableID bool
+ SortedIndexID bool
+ Max *uint64
+}
+
+// StatsVisitor is the callback invoked when calling ForEach.
+type StatsVisitor func(key *roachpb.IndexUsageKey, value *roachpb.IndexUsageStatistics) error
+
+// Reader provides interfaces to retrieve index usage statistics from the
+// subsystem.
+type Reader interface {
+ // Get returns the index usage statistics for a given key.
+ Get(key roachpb.IndexUsageKey) roachpb.IndexUsageStatistics
+
+ // ForEach iterates through all stored index usage statistics
+ // based on the options specified in IteratorOptions. If an error is
+ // encountered when calling StatsVisitor, the iteration is aborted.
+ ForEach(options IteratorOptions, visitor StatsVisitor) error
+}
diff --git a/pkg/sql/idxusage/local_idx_usage_stats.go b/pkg/sql/idxusage/local_idx_usage_stats.go
new file mode 100644
index 000000000000..f2d3c77c5679
--- /dev/null
+++ b/pkg/sql/idxusage/local_idx_usage_stats.go
@@ -0,0 +1,368 @@
+// Copyright 2021 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package idxusage
+
+import (
+ "context"
+ "math"
+ "sort"
+
+ "github.com/cockroachdb/cockroach/pkg/roachpb"
+ "github.com/cockroachdb/cockroach/pkg/settings/cluster"
+ "github.com/cockroachdb/cockroach/pkg/util/log"
+ "github.com/cockroachdb/cockroach/pkg/util/stop"
+ "github.com/cockroachdb/cockroach/pkg/util/syncutil"
+ "github.com/cockroachdb/cockroach/pkg/util/timeutil"
+ "github.com/cockroachdb/errors"
+)
+
+// usageType is the enum specifying the type of usage of an index.
+type usageType int8
+
+const (
+ // readOp indicates that a read operation has occurred for an index.
+ readOp usageType = iota
+
+ // writeOp indicates that a write operation has occurred for an index.
+ writeOp
+)
+
+// indexUse is the payload struct that record a single instance of the index
+// usage.
+type indexUse struct {
+ // key is what specify a particular index. It's a tuple of
+ // (table_id, index_id).
+ key roachpb.IndexUsageKey
+
+ // usageTyp specifies how this index is being used.
+ usageTyp usageType
+}
+
+// LocalIndexUsageStats is a node-local provider of index usage statistics.
+// It implements both the idxusage.Reader and idxusage.Writer interfaces.
+//
+// NOTE: The index usage statistics is collected asynchronously by running a
+// statistics ingestion goroutine in the background. This is to avoid lock
+// contention during the critical path of query execution. This struct has the
+// same lifetime as the sql.Server and the Start() method should be called as
+// soon as possible to start the background ingestion goroutine.
+type LocalIndexUsageStats struct {
+ // statsChan the channel which all index usage metadata are being passed
+ // through.
+ statsChan chan indexUse
+
+ st *cluster.Settings
+
+ mu struct {
+ syncutil.RWMutex
+
+ // usageStats stores index usage statistics per unique roachpb.TableID.
+ usageStats map[roachpb.TableID]*tableIndexStats
+ }
+
+ // testingKnobs provide utilities for tests to hook into the internal states
+ // of the LocalIndexUsageStats.
+ testingKnobs *TestingKnobs
+}
+
+// tableIndexStats tracks index usage statistics per table.
+type tableIndexStats struct {
+ syncutil.RWMutex
+
+ tableID roachpb.TableID
+
+ // stats contains the usage information per unique roachpb.IndexID.
+ stats map[roachpb.IndexID]*indexStats
+}
+
+// indexStats track index usage statistics per index.
+type indexStats struct {
+ syncutil.RWMutex
+ roachpb.IndexUsageStatistics
+}
+
+// Config is the configuration struct used to instantiate the LocalIndexUsageStats.
+type Config struct {
+ // ChannelSize is the size of buffered channel for the statsChan in
+ // LocalIndexUsageStats.
+ ChannelSize uint64
+
+ // Setting is used to read cluster settings.
+ Setting *cluster.Settings
+
+ // Knobs is the testing knobs used for tests.
+ Knobs *TestingKnobs
+}
+
+// DefaultChannelSize is the default size of the statsChan.
+const DefaultChannelSize = uint64(128)
+
+var _ Reader = &LocalIndexUsageStats{}
+var _ Writer = &LocalIndexUsageStats{}
+
+var emptyIndexUsageStats roachpb.IndexUsageStatistics
+
+// NewLocalIndexUsageStats returns a new instance of LocalIndexUsageStats.
+func NewLocalIndexUsageStats(cfg *Config) *LocalIndexUsageStats {
+ is := &LocalIndexUsageStats{
+ statsChan: make(chan indexUse, cfg.ChannelSize),
+ st: cfg.Setting,
+ testingKnobs: cfg.Knobs,
+ }
+ is.mu.usageStats = make(map[roachpb.TableID]*tableIndexStats)
+
+ return is
+}
+
+// Start starts the background goroutine that is responsible for collecting
+// index usage statistics.
+func (s *LocalIndexUsageStats) Start(ctx context.Context, stopper *stop.Stopper) {
+ s.startStatsIngestionLoop(ctx, stopper)
+}
+
+// RecordRead implements the idxusage.Writer interface.
+func (s *LocalIndexUsageStats) RecordRead(ctx context.Context, key roachpb.IndexUsageKey) {
+ s.record(ctx, indexUse{
+ key: key,
+ usageTyp: readOp,
+ })
+}
+
+func (s *LocalIndexUsageStats) record(ctx context.Context, payload indexUse) {
+ // If the index usage stats collection s disabled, we abort.
+ if !Enable.Get(&s.st.SV) {
+ return
+ }
+ select {
+ case s.statsChan <- payload:
+ default:
+ if log.V(1 /* level */) {
+ log.Infof(ctx, "index usage stats provider channel full, discarding new stats")
+ }
+ }
+}
+
+// Get implements the idxusage.Reader interface.
+func (s *LocalIndexUsageStats) Get(key roachpb.IndexUsageKey) roachpb.IndexUsageStatistics {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ table, ok := s.mu.usageStats[key.TableID]
+ if !ok {
+ // We return a copy of the empty stats.
+ emptyStats := emptyIndexUsageStats
+ return emptyStats
+ }
+
+ table.RLock()
+ defer table.RUnlock()
+
+ indexStats, ok := table.stats[key.IndexID]
+ if !ok {
+ emptyStats := emptyIndexUsageStats
+ return emptyStats
+ }
+
+ // Take the read lock while returning the internal data.
+ indexStats.RLock()
+ defer indexStats.RUnlock()
+ return indexStats.IndexUsageStatistics
+}
+
+// ForEach the idxusage.Reader interface.
+func (s *LocalIndexUsageStats) ForEach(options IteratorOptions, visitor StatsVisitor) error {
+ maxIterationLimit := uint64(math.MaxUint64)
+ if options.Max != nil {
+ maxIterationLimit = *options.Max
+ }
+
+ s.mu.RLock()
+ var tableIDLists []roachpb.TableID
+ for tableID := range s.mu.usageStats {
+ tableIDLists = append(tableIDLists, tableID)
+ }
+
+ if options.SortedTableID {
+ sort.Slice(tableIDLists, func(i, j int) bool {
+ return tableIDLists[i] < tableIDLists[j]
+ })
+ }
+
+ s.mu.RUnlock()
+
+ for _, tableID := range tableIDLists {
+ tableIdxStats := s.getStatsForTableID(tableID, false /* createIfNotExists */)
+
+ // This means the data s being cleared before we can fetch it. It's not an
+ // error, so we simply just skip over it.
+ if tableIdxStats == nil {
+ continue
+ }
+
+ var err error
+ maxIterationLimit, err = tableIdxStats.iterateIndexStats(options.SortedIndexID, maxIterationLimit, visitor)
+ if err != nil {
+ return errors.Wrap(err, "unexpected error encountered when iterating through index usage stats")
+ }
+ // If we have already reached iterating limit, we abort iteration.
+ if maxIterationLimit == 0 {
+ break
+ }
+ }
+
+ return nil
+}
+
+func (s *LocalIndexUsageStats) clear() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ for _, tableStats := range s.mu.usageStats {
+ tableStats.clear()
+ }
+}
+
+func (s *LocalIndexUsageStats) insertIndexUsage(idxUse *indexUse) {
+ tableStats := s.getStatsForTableID(idxUse.key.TableID, true /* createIfNotExists */)
+ indexStats := tableStats.getStatsForIndexID(idxUse.key.IndexID, true /* createIfNotExists */)
+ indexStats.Lock()
+ defer indexStats.Unlock()
+ switch idxUse.usageTyp {
+ // TODO(azhng): include TotalRowsRead/TotalRowsWritten field once it s plumbed
+ // into the SQL engine.
+ case readOp:
+ indexStats.TotalReadCount++
+ indexStats.LastRead = timeutil.Now()
+ // TODO(azhng): include TotalRowsRead field once it s plumbed into
+ // the exec engine.
+ case writeOp:
+ indexStats.TotalWriteCount++
+ indexStats.LastWrite = timeutil.Now()
+ }
+}
+
+func (s *LocalIndexUsageStats) getStatsForTableID(
+ id roachpb.TableID, createIfNotExists bool,
+) *tableIndexStats {
+ if createIfNotExists {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ } else {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ }
+
+ if tableIndexStats, ok := s.mu.usageStats[id]; ok {
+ return tableIndexStats
+ }
+
+ if createIfNotExists {
+ newTableIndexStats := &tableIndexStats{
+ tableID: id,
+ stats: make(map[roachpb.IndexID]*indexStats),
+ }
+ s.mu.usageStats[id] = newTableIndexStats
+ return newTableIndexStats
+ }
+
+ return nil
+}
+
+func (t *tableIndexStats) getStatsForIndexID(
+ id roachpb.IndexID, createIfNotExists bool,
+) *indexStats {
+ if createIfNotExists {
+ t.Lock()
+ defer t.Unlock()
+ } else {
+ t.RLock()
+ defer t.RUnlock()
+ }
+
+ if stats, ok := t.stats[id]; ok {
+ return stats
+ }
+ if createIfNotExists {
+ newUsageEntry := &indexStats{}
+ t.stats[id] = newUsageEntry
+ return newUsageEntry
+ }
+ return nil
+}
+
+func (t *tableIndexStats) iterateIndexStats(
+ orderedIndexID bool, iterLimit uint64, visitor StatsVisitor,
+) (newIterLimit uint64, err error) {
+ var indexIDs []roachpb.IndexID
+ t.RLock()
+ for indexID := range t.stats {
+ if iterLimit == 0 {
+ break
+ }
+ indexIDs = append(indexIDs, indexID)
+ iterLimit--
+ }
+ t.RUnlock()
+
+ if orderedIndexID {
+ sort.Slice(indexIDs, func(i, j int) bool {
+ return indexIDs[i] < indexIDs[j]
+ })
+ }
+
+ for _, indexID := range indexIDs {
+ indexStats := t.getStatsForIndexID(indexID, false /* createIfNotExists */)
+
+ // This means the data is being cleared before we can fetch it. It's not an
+ // error, so we simply just skip over it.
+ if indexStats == nil {
+ continue
+ }
+
+ indexStats.RLock()
+ // Copy out the stats while holding read lock.
+ statsCopy := indexStats.IndexUsageStatistics
+ indexStats.RUnlock()
+
+ if err := visitor(&roachpb.IndexUsageKey{
+ TableID: t.tableID,
+ IndexID: indexID,
+ }, &statsCopy); err != nil {
+ return 0 /* newIterLimit */, err
+ }
+ }
+ return iterLimit, nil
+}
+
+func (t *tableIndexStats) clear() {
+ t.Lock()
+ defer t.Unlock()
+
+ t.stats = make(map[roachpb.IndexID]*indexStats, len(t.stats)/2)
+}
+
+func (s *LocalIndexUsageStats) startStatsIngestionLoop(ctx context.Context, stopper *stop.Stopper) {
+ _ = stopper.RunAsyncTask(ctx, "index-usage-stats-ingest", func(ctx context.Context) {
+ for {
+ select {
+ case payload := <-s.statsChan:
+ s.insertIndexUsage(&payload)
+ if s.testingKnobs != nil && s.testingKnobs.OnIndexUsageStatsProcessedCallback != nil {
+ s.testingKnobs.OnIndexUsageStatsProcessedCallback()
+ }
+ case <-stopper.ShouldQuiesce():
+ return
+ case <-ctx.Done():
+ return
+ }
+ }
+ })
+}
diff --git a/pkg/sql/idxusage/local_index_usage_stats_test.go b/pkg/sql/idxusage/local_index_usage_stats_test.go
new file mode 100644
index 000000000000..6ef274420092
--- /dev/null
+++ b/pkg/sql/idxusage/local_index_usage_stats_test.go
@@ -0,0 +1,202 @@
+// Copyright 2021 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package idxusage
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/cockroachdb/cockroach/pkg/roachpb"
+ "github.com/cockroachdb/cockroach/pkg/settings/cluster"
+ "github.com/cockroachdb/cockroach/pkg/util/leaktest"
+ "github.com/cockroachdb/cockroach/pkg/util/log"
+ "github.com/cockroachdb/cockroach/pkg/util/stop"
+ "github.com/cockroachdb/cockroach/pkg/util/timeutil"
+ "github.com/stretchr/testify/require"
+)
+
+func checkTimeHelper(t *testing.T, expected, actual time.Time, delta time.Duration) {
+ diff := actual.Sub(expected)
+ require.Less(t, diff, delta)
+}
+
+func checkStatsHelper(t *testing.T, expected, actual roachpb.IndexUsageStatistics) {
+ require.Equal(t, expected.TotalReadCount, actual.TotalReadCount)
+ require.Equal(t, expected.TotalWriteCount, actual.TotalWriteCount)
+
+ require.Equal(t, expected.TotalRowsRead, actual.TotalRowsRead)
+ require.Equal(t, expected.TotalRowsWritten, actual.TotalRowsWritten)
+
+ checkTimeHelper(t, expected.LastRead, actual.LastRead, time.Second)
+ checkTimeHelper(t, expected.LastWrite, actual.LastWrite, time.Second)
+}
+
+func TestIndexUsageStatisticsSubsystem(t *testing.T) {
+ defer leaktest.AfterTest(t)()
+ defer log.Scope(t).Close(t)
+
+ ctx := context.Background()
+ stopper := stop.NewStopper()
+
+ indices := []roachpb.IndexUsageKey{
+ {
+ TableID: 1,
+ IndexID: 1,
+ },
+ {
+ TableID: 2,
+ IndexID: 1,
+ },
+ {
+ TableID: 2,
+ IndexID: 2,
+ },
+ }
+
+ testInputs := []indexUse{
+ {
+ key: indices[0],
+ usageTyp: readOp,
+ },
+ {
+ key: indices[0],
+ usageTyp: readOp,
+ },
+ {
+ key: indices[0],
+ usageTyp: writeOp,
+ },
+ {
+ key: indices[1],
+ usageTyp: readOp,
+ },
+ {
+ key: indices[1],
+ usageTyp: readOp,
+ },
+ {
+ key: indices[2],
+ usageTyp: writeOp,
+ },
+ {
+ key: indices[2],
+ usageTyp: writeOp,
+ },
+ }
+
+ expectedIndexUsage := map[roachpb.IndexUsageKey]roachpb.IndexUsageStatistics{
+ indices[0]: {
+ TotalReadCount: 2,
+ LastRead: timeutil.Now(),
+ TotalWriteCount: 1,
+ LastWrite: timeutil.Now(),
+ },
+ indices[1]: {
+ TotalReadCount: 2,
+ LastRead: timeutil.Now(),
+ },
+ indices[2]: {
+ TotalWriteCount: 2,
+ LastWrite: timeutil.Now(),
+ },
+ }
+
+ statsProcessedSignal := make(chan struct{})
+ onStatsIngested := func() {
+ statsProcessedSignal <- struct{}{}
+ }
+ waitForStatsIngested := func() {
+ statsProcessed := 0
+ var timer timeutil.Timer
+ timer.Reset(time.Second)
+ for statsProcessed < len(testInputs) {
+ select {
+ case <-statsProcessedSignal:
+ statsProcessed++
+ case <-timer.C:
+ timer.Read = true
+ t.Fatalf("expected stats ingestion to complete, but it didn't.")
+ }
+ }
+ }
+
+ localIndexUsage := NewLocalIndexUsageStats(&Config{
+ ChannelSize: 10,
+ Setting: cluster.MakeTestingClusterSettings(),
+ Knobs: &TestingKnobs{
+ OnIndexUsageStatsProcessedCallback: onStatsIngested,
+ },
+ })
+
+ localIndexUsage.Start(ctx, stopper)
+ defer stopper.Stop(ctx)
+
+ for _, input := range testInputs {
+ localIndexUsage.record(ctx, input)
+ }
+
+ waitForStatsIngested()
+
+ t.Run("point lookup", func(t *testing.T) {
+ actualEntryCount := 0
+ for _, index := range indices {
+ stats := localIndexUsage.Get(index)
+ require.NotNil(t, stats)
+
+ actualEntryCount++
+
+ checkStatsHelper(t, expectedIndexUsage[index], stats)
+ }
+ })
+
+ t.Run("iterator", func(t *testing.T) {
+ actualEntryCount := 0
+ err := localIndexUsage.ForEach(IteratorOptions{}, func(key *roachpb.IndexUsageKey, value *roachpb.IndexUsageStatistics) error {
+ actualEntryCount++
+
+ checkStatsHelper(t, expectedIndexUsage[*key], *value)
+ return nil
+ })
+ require.Equal(t, len(expectedIndexUsage), actualEntryCount)
+ require.NoError(t, err)
+ })
+
+ t.Run("iterator with options", func(t *testing.T) {
+ actualEntryCount := uint64(0)
+ maxEntry := uint64(2)
+ err := localIndexUsage.ForEach(IteratorOptions{
+ SortedTableID: true,
+ SortedIndexID: true,
+ Max: &maxEntry,
+ }, func(key *roachpb.IndexUsageKey, value *roachpb.IndexUsageStatistics) error {
+ actualEntryCount++
+
+ checkStatsHelper(t, expectedIndexUsage[*key], *value)
+
+ return nil
+ })
+ require.Equal(t, maxEntry, actualEntryCount)
+ require.NoError(t, err)
+ })
+
+ t.Run("clear", func(t *testing.T) {
+ actualEntryCount := 0
+ expectedEntryCount := 0
+ localIndexUsage.clear()
+ err := localIndexUsage.ForEach(IteratorOptions{}, func(_ *roachpb.IndexUsageKey, _ *roachpb.IndexUsageStatistics) error {
+ actualEntryCount++
+ return nil
+ })
+ require.NoError(t, err)
+ require.Equal(t, expectedEntryCount, actualEntryCount)
+ })
+}
diff --git a/pkg/sql/idxusage/test_utils.go b/pkg/sql/idxusage/test_utils.go
new file mode 100644
index 000000000000..f03f825ffa30
--- /dev/null
+++ b/pkg/sql/idxusage/test_utils.go
@@ -0,0 +1,19 @@
+// Copyright 2021 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package idxusage
+
+// TestingKnobs is the testing knobs that provides callbacks that unit tests
+// can hook into.
+type TestingKnobs struct {
+ // OnIndexUsageStatsProcessedCallback is invoked whenever a index usage event
+ // is processed.
+ OnIndexUsageStatsProcessedCallback func()
+}
diff --git a/pkg/sql/logictest/testdata/logic_test/virtual_columns b/pkg/sql/logictest/testdata/logic_test/virtual_columns
index 6f9b3e70b0a0..7bffb344fd24 100644
--- a/pkg/sql/logictest/testdata/logic_test/virtual_columns
+++ b/pkg/sql/logictest/testdata/logic_test/virtual_columns
@@ -1193,3 +1193,18 @@ SELECT * FROM t_65915;
statement ok
DROP TABLE t_65915
+
+# Test that backfills on indexes with non-null virtual columns work.
+subtest 67528
+
+statement ok
+CREATE TABLE t67528 (
+ s STRING,
+ v STRING AS (lower(s)) VIRTUAL NOT NULL
+)
+
+statement ok
+INSERT INTO t67528 (s) VALUES ('')
+
+statement ok
+CREATE INDEX ON t67528 (v DESC)
diff --git a/pkg/sql/opt_exec_factory.go b/pkg/sql/opt_exec_factory.go
index 44361bf7fca3..2e387f3ef7be 100644
--- a/pkg/sql/opt_exec_factory.go
+++ b/pkg/sql/opt_exec_factory.go
@@ -13,7 +13,6 @@ package sql
import (
"bytes"
"compress/zlib"
- "context"
"encoding/base64"
"fmt"
"net/url"
@@ -96,7 +95,8 @@ func (ef *execFactory) ConstructScan(
// users might be able to access a view that uses a higher privilege table.
ef.planner.skipSelectPrivilegeChecks = true
defer func() { ef.planner.skipSelectPrivilegeChecks = false }()
- if err := scan.initTable(context.TODO(), ef.planner, tabDesc, nil, colCfg); err != nil {
+ ctx := ef.planner.extendedEvalCtx.Ctx()
+ if err := scan.initTable(ctx, ef.planner, tabDesc, nil, colCfg); err != nil {
return nil, err
}
@@ -129,6 +129,14 @@ func (ef *execFactory) ConstructScan(
scan.lockingWaitPolicy = descpb.ToScanLockingWaitPolicy(params.Locking.WaitPolicy)
}
scan.localityOptimized = params.LocalityOptimized
+ if !ef.isExplain {
+ idxUsageKey := roachpb.IndexUsageKey{
+ TableID: roachpb.TableID(tabDesc.GetID()),
+ IndexID: roachpb.IndexID(idx.GetID()),
+ }
+ ef.planner.extendedEvalCtx.indexUsageStatsWriter.RecordRead(ctx, idxUsageKey)
+ }
+
return scan, nil
}
@@ -607,7 +615,8 @@ func (ef *execFactory) ConstructIndexJoin(
tableScan := ef.planner.Scan()
- if err := tableScan.initTable(context.TODO(), ef.planner, tabDesc, nil, colCfg); err != nil {
+ ctx := ef.planner.extendedEvalCtx.Ctx()
+ if err := tableScan.initTable(ctx, ef.planner, tabDesc, nil, colCfg); err != nil {
return nil, err
}
@@ -654,7 +663,8 @@ func (ef *execFactory) ConstructLookupJoin(
colCfg := makeScanColumnsConfig(table, lookupCols)
tableScan := ef.planner.Scan()
- if err := tableScan.initTable(context.TODO(), ef.planner, tabDesc, nil, colCfg); err != nil {
+ ctx := ef.planner.extendedEvalCtx.Ctx()
+ if err := tableScan.initTable(ctx, ef.planner, tabDesc, nil, colCfg); err != nil {
return nil, err
}
@@ -664,6 +674,14 @@ func (ef *execFactory) ConstructLookupJoin(
tableScan.lockingWaitPolicy = descpb.ToScanLockingWaitPolicy(locking.WaitPolicy)
}
+ if !ef.isExplain {
+ idxUsageKey := roachpb.IndexUsageKey{
+ TableID: roachpb.TableID(tabDesc.GetID()),
+ IndexID: roachpb.IndexID(idx.GetID()),
+ }
+ ef.planner.extendedEvalCtx.indexUsageStatsWriter.RecordRead(ctx, idxUsageKey)
+ }
+
n := &lookupJoinNode{
input: input.(planNode),
table: tableScan,
@@ -728,7 +746,8 @@ func (ef *execFactory) constructVirtualTableLookupJoin(
// Set up a scanNode that we won't actually use, just to get the needed
// column analysis.
colCfg := makeScanColumnsConfig(table, lookupCols)
- if err := tableScan.initTable(context.TODO(), ef.planner, tableDesc, nil, colCfg); err != nil {
+ ctx := ef.planner.extendedEvalCtx.Ctx()
+ if err := tableScan.initTable(ctx, ef.planner, tableDesc, nil, colCfg); err != nil {
return nil, err
}
tableScan.index = idx
@@ -779,11 +798,20 @@ func (ef *execFactory) ConstructInvertedJoin(
colCfg := makeScanColumnsConfig(table, lookupCols)
tableScan := ef.planner.Scan()
- if err := tableScan.initTable(context.TODO(), ef.planner, tabDesc, nil, colCfg); err != nil {
+ ctx := ef.planner.extendedEvalCtx.Ctx()
+ if err := tableScan.initTable(ctx, ef.planner, tabDesc, nil, colCfg); err != nil {
return nil, err
}
tableScan.index = idx
+ if !ef.isExplain {
+ idxUsageKey := roachpb.IndexUsageKey{
+ TableID: roachpb.TableID(tabDesc.GetID()),
+ IndexID: roachpb.IndexID(idx.GetID()),
+ }
+ ef.planner.extendedEvalCtx.indexUsageStatsWriter.RecordRead(ctx, idxUsageKey)
+ }
+
n := &invertedJoinNode{
input: input.(planNode),
table: tableScan,
@@ -835,10 +863,19 @@ func (ef *execFactory) constructScanForZigzag(
}
scan := ef.planner.Scan()
- if err := scan.initTable(context.TODO(), ef.planner, tableDesc, nil, colCfg); err != nil {
+ ctx := ef.planner.extendedEvalCtx.Ctx()
+ if err := scan.initTable(ctx, ef.planner, tableDesc, nil, colCfg); err != nil {
return nil, err
}
+ if !ef.isExplain {
+ idxUsageKey := roachpb.IndexUsageKey{
+ TableID: roachpb.TableID(tableDesc.GetID()),
+ IndexID: roachpb.IndexID(index.GetID()),
+ }
+ ef.planner.extendedEvalCtx.indexUsageStatsWriter.RecordRead(ctx, idxUsageKey)
+ }
+
scan.index = index
return scan, nil
diff --git a/pkg/sql/parser/BUILD.bazel b/pkg/sql/parser/BUILD.bazel
index a59b26b50500..12fad32e7e9c 100644
--- a/pkg/sql/parser/BUILD.bazel
+++ b/pkg/sql/parser/BUILD.bazel
@@ -10,7 +10,7 @@ go_library(
"help.go",
"lexer.go",
"parse.go",
- "scan.go",
+ "scanner.go",
"show_syntax.go",
":gen-help-messages", # keep
":sql-goyacc", # keep
@@ -28,6 +28,7 @@ go_library(
"//pkg/sql/pgwire/pgcode",
"//pkg/sql/pgwire/pgerror",
"//pkg/sql/privilege", # keep
+ "//pkg/sql/scanner",
"//pkg/sql/sem/tree",
"//pkg/sql/sessiondata",
"//pkg/sql/types",
@@ -45,7 +46,7 @@ go_test(
"lexer_test.go",
"parse_internal_test.go",
"parse_test.go",
- "scan_test.go",
+ "scanner_test.go",
":gen-helpmap-test", # keep
],
data = glob(["testdata/**"]),
diff --git a/pkg/sql/parser/lexer_test.go b/pkg/sql/parser/lexer_test.go
index 8e3699b4951e..32f7f46e5986 100644
--- a/pkg/sql/parser/lexer_test.go
+++ b/pkg/sql/parser/lexer_test.go
@@ -33,7 +33,7 @@ func TestLexer(t *testing.T) {
var scanTokens []sqlSymType
for {
var lval sqlSymType
- s.scan(&lval)
+ s.Scan(&lval)
if lval.id == 0 {
break
}
diff --git a/pkg/sql/parser/parse.go b/pkg/sql/parser/parse.go
index cb9a17d26207..75eeb3992505 100644
--- a/pkg/sql/parser/parse.go
+++ b/pkg/sql/parser/parse.go
@@ -26,14 +26,15 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
+ "github.com/cockroachdb/cockroach/pkg/sql/scanner"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/errors"
)
func init() {
- NewNumValFn = func(a constant.Value, s string, b bool) interface{} { return tree.NewNumVal(a, s, b) }
- NewPlaceholderFn = func(s string) (interface{}, error) { return tree.NewPlaceholder(s) }
+ scanner.NewNumValFn = func(a constant.Value, s string, b bool) interface{} { return tree.NewNumVal(a, s, b) }
+ scanner.NewPlaceholderFn = func(s string) (interface{}, error) { return tree.NewPlaceholder(s) }
}
// Statement is the result of parsing a single statement. It contains the AST
@@ -85,7 +86,7 @@ func (stmts Statements) StringWithFlags(flags tree.FmtFlags) string {
// Parser wraps a scanner, parser and other utilities present in the parser
// package.
type Parser struct {
- scanner scanner
+ scanner scanner.Scanner
lexer lexer
parserImpl sqlParserImpl
tokBuf [8]sqlSymType
@@ -138,7 +139,7 @@ func (p *Parser) scanOneStmt() (sql string, tokens []sqlSymType, done bool) {
// Scan the first token.
for {
- p.scanner.scan(&lval)
+ p.scanner.Scan(&lval)
if lval.id == 0 {
return "", nil, true
}
@@ -153,12 +154,12 @@ func (p *Parser) scanOneStmt() (sql string, tokens []sqlSymType, done bool) {
tokens = append(tokens, lval)
for {
if lval.id == ERROR {
- return p.scanner.in[startPos:], tokens, true
+ return p.scanner.In()[startPos:], tokens, true
}
- posBeforeScan := p.scanner.pos
- p.scanner.scan(&lval)
+ posBeforeScan := p.scanner.Pos()
+ p.scanner.Scan(&lval)
if lval.id == 0 || lval.id == ';' {
- return p.scanner.in[startPos:posBeforeScan], tokens, (lval.id == 0)
+ return p.scanner.In()[startPos:posBeforeScan], tokens, (lval.id == 0)
}
lval.pos -= startPos
tokens = append(tokens, lval)
@@ -167,8 +168,8 @@ func (p *Parser) scanOneStmt() (sql string, tokens []sqlSymType, done bool) {
func (p *Parser) parseWithDepth(depth int, sql string, nakedIntType *types.T) (Statements, error) {
stmts := Statements(p.stmtBuf[:0])
- p.scanner.init(sql)
- defer p.scanner.cleanup()
+ p.scanner.Init(sql)
+ defer p.scanner.Cleanup()
for {
sql, tokens, done := p.scanOneStmt()
stmt, err := p.parse(depth+1, sql, tokens, nakedIntType)
@@ -267,8 +268,8 @@ func ParseOneWithInt(sql string, nakedIntType *types.T) (Statement, error) {
// statements.
func HasMultipleStatements(sql string) bool {
var p Parser
- p.scanner.init(sql)
- defer p.scanner.cleanup()
+ p.scanner.Init(sql)
+ defer p.scanner.Cleanup()
count := 0
for {
_, _, done := p.scanOneStmt()
diff --git a/pkg/sql/parser/parse_internal_test.go b/pkg/sql/parser/parse_internal_test.go
index 729f9e0fca3f..e878bdf0673e 100644
--- a/pkg/sql/parser/parse_internal_test.go
+++ b/pkg/sql/parser/parse_internal_test.go
@@ -118,7 +118,7 @@ func TestScanOneStmt(t *testing.T) {
for _, tc := range testData {
var p Parser
- p.scanner.init(tc.sql)
+ p.scanner.Init(tc.sql)
var result []stmt
for {
diff --git a/pkg/sql/parser/scanner.go b/pkg/sql/parser/scanner.go
new file mode 100644
index 000000000000..a85480939f9d
--- /dev/null
+++ b/pkg/sql/parser/scanner.go
@@ -0,0 +1,77 @@
+// Copyright 2021 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package parser
+
+import (
+ "github.com/cockroachdb/cockroach/pkg/sql/lexbase"
+ "github.com/cockroachdb/cockroach/pkg/sql/scanner"
+)
+
+func makeScanner(str string) scanner.Scanner {
+ var s scanner.Scanner
+ s.Init(str)
+ return s
+}
+
+// SplitFirstStatement returns the length of the prefix of the string up to and
+// including the first semicolon that separates statements. If there is no
+// including the first semicolon that separates statements. If there is no
+// semicolon, returns ok=false.
+func SplitFirstStatement(sql string) (pos int, ok bool) {
+ s := makeScanner(sql)
+ var lval = &sqlSymType{}
+ for {
+ s.Scan(lval)
+ switch lval.ID() {
+ case 0, lexbase.ERROR:
+ return 0, false
+ case ';':
+ return s.Pos(), true
+ }
+ }
+}
+
+// Tokens decomposes the input into lexical tokens.
+func Tokens(sql string) (tokens []TokenString, ok bool) {
+ s := makeScanner(sql)
+ for {
+ var lval = &sqlSymType{}
+ s.Scan(lval)
+ if lval.ID() == lexbase.ERROR {
+ return nil, false
+ }
+ if lval.ID() == 0 {
+ break
+ }
+ tokens = append(tokens, TokenString{TokenID: lval.ID(), Str: lval.Str()})
+ }
+ return tokens, true
+}
+
+// TokenString is the unit value returned by Tokens.
+type TokenString struct {
+ TokenID int32
+ Str string
+}
+
+// LastLexicalToken returns the last lexical token. If the string has no lexical
+// tokens, returns 0 and ok=false.
+func LastLexicalToken(sql string) (lastTok int, ok bool) {
+ s := makeScanner(sql)
+ var lval = &sqlSymType{}
+ for {
+ last := lval.ID()
+ s.Scan(lval)
+ if lval.ID() == 0 {
+ return int(last), last != 0
+ }
+ }
+}
diff --git a/pkg/sql/parser/scan_test.go b/pkg/sql/parser/scanner_test.go
similarity index 89%
rename from pkg/sql/parser/scan_test.go
rename to pkg/sql/parser/scanner_test.go
index 427ffa8b8490..29632dd9d17e 100644
--- a/pkg/sql/parser/scan_test.go
+++ b/pkg/sql/parser/scanner_test.go
@@ -119,12 +119,12 @@ world$$`, []int{SCONST}},
s := makeScanner(d.sql)
var tokens []int
for {
- var lval sqlSymType
- s.scan(&lval)
- if lval.id == 0 {
+ var lval = &sqlSymType{}
+ s.Scan(lval)
+ if lval.ID() == 0 {
break
}
- tokens = append(tokens, int(lval.id))
+ tokens = append(tokens, int(lval.ID()))
}
if !reflect.DeepEqual(d.expected, tokens) {
@@ -154,14 +154,14 @@ foo`, "", "foo"},
}
for i, d := range testData {
s := makeScanner(d.sql)
- var lval sqlSymType
- present, ok := s.scanComment(&lval)
+ var lval = &sqlSymType{}
+ present, ok := s.ScanComment(lval)
if d.err == "" && (!present || !ok) {
- t.Fatalf("%d: expected success, but found %s", i, lval.str)
- } else if d.err != "" && (present || ok || d.err != lval.str) {
- t.Fatalf("%d: expected %s, but found %s", i, d.err, lval.str)
+ t.Fatalf("%d: expected success, but found %s", i, lval.Str())
+ } else if d.err != "" && (present || ok || d.err != lval.Str()) {
+ t.Fatalf("%d: expected %s, but found %s", i, d.err, lval.Str())
}
- if r := s.in[s.pos:]; d.remainder != r {
+ if r := s.In()[s.Pos():]; d.remainder != r {
t.Fatalf("%d: expected '%s', but found '%s'", i, d.remainder, r)
}
}
@@ -170,10 +170,10 @@ foo`, "", "foo"},
func TestScanKeyword(t *testing.T) {
for _, kwName := range lexbase.KeywordNames {
s := makeScanner(kwName)
- var lval sqlSymType
- s.scan(&lval)
- if id := lexbase.GetKeywordID(kwName); id != lval.id {
- t.Errorf("%s: expected %d, but found %d", kwName, id, lval.id)
+ var lval = &sqlSymType{}
+ s.Scan(lval)
+ if id := lexbase.GetKeywordID(kwName); id != lval.ID() {
+ t.Errorf("%s: expected %d, but found %d", kwName, id, lval.ID())
}
}
}
@@ -210,13 +210,13 @@ func TestScanNumber(t *testing.T) {
}
for _, d := range testData {
s := makeScanner(d.sql)
- var lval sqlSymType
- s.scan(&lval)
- if d.id != int(lval.id) {
- t.Errorf("%s: expected %d, but found %d", d.sql, d.id, lval.id)
+ var lval = &sqlSymType{}
+ s.Scan(lval)
+ if d.id != int(lval.ID()) {
+ t.Errorf("%s: expected %d, but found %d", d.sql, d.id, lval.ID())
}
- if d.expected != lval.str {
- t.Errorf("%s: expected %s, but found %s", d.sql, d.expected, lval.str)
+ if d.expected != lval.Str() {
+ t.Errorf("%s: expected %s, but found %s", d.sql, d.expected, lval.Str())
}
}
}
@@ -232,13 +232,13 @@ func TestScanPlaceholder(t *testing.T) {
}
for _, d := range testData {
s := makeScanner(d.sql)
- var lval sqlSymType
- s.scan(&lval)
- if lval.id != PLACEHOLDER {
- t.Errorf("%s: expected %d, but found %d", d.sql, PLACEHOLDER, lval.id)
+ var lval = &sqlSymType{}
+ s.Scan(lval)
+ if lval.ID() != PLACEHOLDER {
+ t.Errorf("%s: expected %d, but found %d", d.sql, PLACEHOLDER, lval.ID())
}
- if d.expected != lval.str {
- t.Errorf("%s: expected %s, but found %s", d.sql, d.expected, lval.str)
+ if d.expected != lval.Str() {
+ t.Errorf("%s: expected %s, but found %s", d.sql, d.expected, lval.Str())
}
}
}
@@ -325,10 +325,10 @@ world`},
}
for _, d := range testData {
s := makeScanner(d.sql)
- var lval sqlSymType
- s.scan(&lval)
- if d.expected != lval.str {
- t.Errorf("%s: expected %q, but found %q", d.sql, d.expected, lval.str)
+ var lval = &sqlSymType{}
+ s.Scan(lval)
+ if d.expected != lval.Str() {
+ t.Errorf("%s: expected %q, but found %q", d.sql, d.expected, lval.Str())
}
}
}
@@ -358,13 +358,13 @@ func TestScanError(t *testing.T) {
}
for _, d := range testData {
s := makeScanner(d.sql)
- var lval sqlSymType
- s.scan(&lval)
- if lval.id != ERROR {
- t.Errorf("%s: expected ERROR, but found %d", d.sql, lval.id)
+ var lval = &sqlSymType{}
+ s.Scan(lval)
+ if lval.ID() != ERROR {
+ t.Errorf("%s: expected ERROR, but found %d", d.sql, lval.ID())
}
- if !testutils.IsError(errors.Newf("%s", lval.str), d.err) {
- t.Errorf("%s: expected %s, but found %v", d.sql, d.err, lval.str)
+ if !testutils.IsError(errors.Newf("%s", lval.Str()), d.err) {
+ t.Errorf("%s: expected %s, but found %v", d.sql, d.err, lval.Str())
}
}
}
diff --git a/pkg/sql/parser/sql.y b/pkg/sql/parser/sql.y
index 85c5a721d231..c6ac69f04f67 100644
--- a/pkg/sql/parser/sql.y
+++ b/pkg/sql/parser/sql.y
@@ -35,6 +35,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/lexbase"
"github.com/cockroachdb/cockroach/pkg/sql/privilege"
"github.com/cockroachdb/cockroach/pkg/sql/roleoption"
+ "github.com/cockroachdb/cockroach/pkg/sql/scanner"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/errors"
@@ -152,6 +153,41 @@ func processUnaryQualOpInternal(
%}
%{
+// sqlSymType is generated by goyacc, and implements the ScanSymType interface.
+var _ scanner.ScanSymType = &sqlSymType{}
+
+func (s *sqlSymType) ID() int32 {
+ return s.id
+}
+
+func (s *sqlSymType) SetID(id int32) {
+ s.id = id
+}
+
+func (s *sqlSymType) Pos() int32 {
+ return s.pos
+}
+
+func (s *sqlSymType) SetPos(pos int32) {
+ s.pos = pos
+}
+
+func (s *sqlSymType) Str() string {
+ return s.str
+}
+
+func (s *sqlSymType) SetStr(str string) {
+ s.str = str
+}
+
+func (s *sqlSymType) UnionVal() interface{} {
+ return s.union.val
+}
+
+func (s *sqlSymType) SetUnionVal(val interface{}) {
+ s.union.val = val
+}
+
// sqlSymUnion represents a union of types, providing accessor methods
// to retrieve the underlying type stored in the union's empty interface.
// The purpose of the sqlSymUnion struct is to reduce the memory footprint of
diff --git a/pkg/sql/planner.go b/pkg/sql/planner.go
index ff0365acda6c..05b58370ca14 100644
--- a/pkg/sql/planner.go
+++ b/pkg/sql/planner.go
@@ -27,6 +27,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/catalog/lease"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr"
+ "github.com/cockroachdb/cockroach/pkg/sql/idxusage"
"github.com/cockroachdb/cockroach/pkg/sql/opt/exec"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/querycache"
@@ -96,6 +97,8 @@ type extendedEvalContext struct {
statsStorage sqlstats.Storage
+ indexUsageStatsWriter idxusage.Writer
+
SchemaChangerState *SchemaChangerState
}
@@ -413,9 +416,20 @@ func internalExtendedEvalCtx(
) extendedEvalContext {
evalContextTestingKnobs := execCfg.EvalContextTestingKnobs
+ var indexUsageStats idxusage.Writer
var sqlStatsResetter tree.SQLStatsResetter
if execCfg.InternalExecutor != nil {
sqlStatsResetter = execCfg.InternalExecutor.s
+ if execCfg.InternalExecutor.s != nil {
+ indexUsageStats = execCfg.InternalExecutor.s.indexUsageStats
+ } else {
+ // If the indexUsageStats is nil from the sql.Server, we create a dummy
+ // index usage stats collector. The sql.Server in the ExecutorConfig
+ // is only nil during tests.
+ indexUsageStats = idxusage.NewLocalIndexUsageStats(&idxusage.Config{
+ Setting: execCfg.Settings,
+ })
+ }
}
return extendedEvalContext{
@@ -434,14 +448,15 @@ func internalExtendedEvalCtx(
InternalExecutor: execCfg.InternalExecutor,
SQLStatsResetter: sqlStatsResetter,
},
- SessionMutator: dataMutator,
- VirtualSchemas: execCfg.VirtualSchemas,
- Tracing: &SessionTracing{},
- NodesStatusServer: execCfg.NodesStatusServer,
- RegionsServer: execCfg.RegionsServer,
- Descs: tables,
- ExecCfg: execCfg,
- DistSQLPlanner: execCfg.DistSQLPlanner,
+ SessionMutator: dataMutator,
+ VirtualSchemas: execCfg.VirtualSchemas,
+ Tracing: &SessionTracing{},
+ NodesStatusServer: execCfg.NodesStatusServer,
+ RegionsServer: execCfg.RegionsServer,
+ Descs: tables,
+ ExecCfg: execCfg,
+ DistSQLPlanner: execCfg.DistSQLPlanner,
+ indexUsageStatsWriter: indexUsageStats,
}
}
diff --git a/pkg/sql/scanner/BUILD.bazel b/pkg/sql/scanner/BUILD.bazel
new file mode 100644
index 000000000000..85c6fbc81a9c
--- /dev/null
+++ b/pkg/sql/scanner/BUILD.bazel
@@ -0,0 +1,9 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "scanner",
+ srcs = ["scan.go"],
+ importpath = "github.com/cockroachdb/cockroach/pkg/sql/scanner",
+ visibility = ["//visibility:public"],
+ deps = ["//pkg/sql/lexbase"],
+)
diff --git a/pkg/sql/parser/scan.go b/pkg/sql/scanner/scan.go
similarity index 73%
rename from pkg/sql/parser/scan.go
rename to pkg/sql/scanner/scan.go
index e23c814d354a..c9505339d5ae 100644
--- a/pkg/sql/parser/scan.go
+++ b/pkg/sql/scanner/scan.go
@@ -8,7 +8,7 @@
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
-package parser
+package scanner
import (
"fmt"
@@ -40,33 +40,50 @@ var NewPlaceholderFn = func(string) (interface{}, error) {
return struct{}{}, nil
}
-// scanner lexes SQL statements.
-type scanner struct {
+// ScanSymType is the interface for accessing the fields of a yacc symType.
+type ScanSymType interface {
+ ID() int32
+ SetID(int32)
+ Pos() int32
+ SetPos(int32)
+ Str() string
+ SetStr(string)
+ UnionVal() interface{}
+ SetUnionVal(interface{})
+}
+
+// Scanner lexes SQL statements.
+type Scanner struct {
in string
pos int
bytesPrealloc []byte
}
-func makeScanner(str string) scanner {
- var s scanner
- s.init(str)
- return s
+// In returns the input string.
+func (s *Scanner) In() string {
+ return s.in
+}
+
+// Pos returns the current position being lexed.
+func (s *Scanner) Pos() int {
+ return s.pos
}
-func (s *scanner) init(str string) {
+// Init initializes a new Scanner that will process str.
+func (s *Scanner) Init(str string) {
s.in = str
s.pos = 0
// Preallocate some buffer space for identifiers etc.
s.bytesPrealloc = make([]byte, len(str))
}
-// cleanup is used to avoid holding on to memory unnecessarily (for the cases
-// where we reuse a scanner).
-func (s *scanner) cleanup() {
+// Cleanup is used to avoid holding on to memory unnecessarily (for the cases
+// where we reuse a Scanner).
+func (s *Scanner) Cleanup() {
s.bytesPrealloc = nil
}
-func (s *scanner) allocBytes(length int) []byte {
+func (s *Scanner) allocBytes(length int) []byte {
if len(s.bytesPrealloc) >= length {
res := s.bytesPrealloc[:length:length]
s.bytesPrealloc = s.bytesPrealloc[length:]
@@ -77,15 +94,15 @@ func (s *scanner) allocBytes(length int) []byte {
// buffer returns an empty []byte buffer that can be appended to. Any unused
// portion can be returned later using returnBuffer.
-func (s *scanner) buffer() []byte {
+func (s *Scanner) buffer() []byte {
buf := s.bytesPrealloc[:0]
s.bytesPrealloc = nil
return buf
}
-// returnBuffer returns the unused portion of buf to the scanner, to be used for
+// returnBuffer returns the unused portion of buf to the Scanner, to be used for
// future allocBytes() or buffer() calls. The caller must not use buf again.
-func (s *scanner) returnBuffer(buf []byte) {
+func (s *Scanner) returnBuffer(buf []byte) {
if len(buf) < cap(buf) {
s.bytesPrealloc = buf[len(buf):]
}
@@ -93,16 +110,17 @@ func (s *scanner) returnBuffer(buf []byte) {
// finishString casts the given buffer to a string and returns the unused
// portion of the buffer. The caller must not use buf again.
-func (s *scanner) finishString(buf []byte) string {
+func (s *Scanner) finishString(buf []byte) string {
str := *(*string)(unsafe.Pointer(&buf))
s.returnBuffer(buf)
return str
}
-func (s *scanner) scan(lval *sqlSymType) {
- lval.id = 0
- lval.pos = int32(s.pos)
- lval.str = "EOF"
+// Scan scans the next token and populates its information into lval.
+func (s *Scanner) Scan(lval ScanSymType) {
+ lval.SetID(0)
+ lval.SetPos(int32(s.pos))
+ lval.SetStr("EOF")
if _, ok := s.skipWhitespace(lval, true); !ok {
return
@@ -110,13 +128,13 @@ func (s *scanner) scan(lval *sqlSymType) {
ch := s.next()
if ch == eof {
- lval.pos = int32(s.pos)
+ lval.SetPos(int32(s.pos))
return
}
- lval.id = int32(ch)
- lval.pos = int32(s.pos - 1)
- lval.str = s.in[lval.pos:s.pos]
+ lval.SetID(int32(ch))
+ lval.SetPos(int32(s.pos - 1))
+ lval.SetStr(s.in[lval.Pos():s.pos])
switch ch {
case '$':
@@ -125,7 +143,7 @@ func (s *scanner) scan(lval *sqlSymType) {
s.scanPlaceholder(lval)
return
} else if s.scanDollarQuotedString(lval) {
- lval.id = SCONST
+ lval.SetID(lexbase.SCONST)
return
}
return
@@ -133,14 +151,14 @@ func (s *scanner) scan(lval *sqlSymType) {
case identQuote:
// "[^"]"
if s.scanString(lval, identQuote, false /* allowEscapes */, true /* requireUTF8 */) {
- lval.id = IDENT
+ lval.SetID(lexbase.IDENT)
}
return
case singleQuote:
// '[^']'
if s.scanString(lval, ch, false /* allowEscapes */, true /* requireUTF8 */) {
- lval.id = SCONST
+ lval.SetID(lexbase.SCONST)
}
return
@@ -150,7 +168,7 @@ func (s *scanner) scan(lval *sqlSymType) {
// b'[^']'
s.pos++
if s.scanString(lval, singleQuote, true /* allowEscapes */, false /* requireUTF8 */) {
- lval.id = BCONST
+ lval.SetID(lexbase.BCONST)
}
return
}
@@ -167,7 +185,7 @@ func (s *scanner) scan(lval *sqlSymType) {
// [eE]'[^']'
s.pos++
if s.scanString(lval, singleQuote, true /* allowEscapes */, true /* requireUTF8 */) {
- lval.id = SCONST
+ lval.SetID(lexbase.SCONST)
}
return
}
@@ -200,7 +218,7 @@ func (s *scanner) scan(lval *sqlSymType) {
switch t := s.peek(); {
case t == '.': // ..
s.pos++
- lval.id = DOT_DOT
+ lval.SetID(lexbase.DOT_DOT)
return
case lexbase.IsDigit(t):
s.scanNumber(lval, ch)
@@ -212,17 +230,17 @@ func (s *scanner) scan(lval *sqlSymType) {
switch s.peek() {
case '=': // !=
s.pos++
- lval.id = NOT_EQUALS
+ lval.SetID(lexbase.NOT_EQUALS)
return
case '~': // !~
s.pos++
switch s.peek() {
case '*': // !~*
s.pos++
- lval.id = NOT_REGIMATCH
+ lval.SetID(lexbase.NOT_REGIMATCH)
return
}
- lval.id = NOT_REGMATCH
+ lval.SetID(lexbase.NOT_REGMATCH)
return
}
return
@@ -231,15 +249,15 @@ func (s *scanner) scan(lval *sqlSymType) {
switch s.peek() {
case '?': // ??
s.pos++
- lval.id = HELPTOKEN
+ lval.SetID(lexbase.HELPTOKEN)
return
case '|': // ?|
s.pos++
- lval.id = JSON_SOME_EXISTS
+ lval.SetID(lexbase.JSON_SOME_EXISTS)
return
case '&': // ?&
s.pos++
- lval.id = JSON_ALL_EXISTS
+ lval.SetID(lexbase.JSON_ALL_EXISTS)
return
}
return
@@ -251,22 +269,22 @@ func (s *scanner) scan(lval *sqlSymType) {
switch s.peek() {
case '=': // <<=
s.pos++
- lval.id = INET_CONTAINED_BY_OR_EQUALS
+ lval.SetID(lexbase.INET_CONTAINED_BY_OR_EQUALS)
return
}
- lval.id = LSHIFT
+ lval.SetID(lexbase.LSHIFT)
return
case '>': // <>
s.pos++
- lval.id = NOT_EQUALS
+ lval.SetID(lexbase.NOT_EQUALS)
return
case '=': // <=
s.pos++
- lval.id = LESS_EQUALS
+ lval.SetID(lexbase.LESS_EQUALS)
return
case '@': // <@
s.pos++
- lval.id = CONTAINED_BY
+ lval.SetID(lexbase.CONTAINED_BY)
return
}
return
@@ -278,14 +296,14 @@ func (s *scanner) scan(lval *sqlSymType) {
switch s.peek() {
case '=': // >>=
s.pos++
- lval.id = INET_CONTAINS_OR_EQUALS
+ lval.SetID(lexbase.INET_CONTAINS_OR_EQUALS)
return
}
- lval.id = RSHIFT
+ lval.SetID(lexbase.RSHIFT)
return
case '=': // >=
s.pos++
- lval.id = GREATER_EQUALS
+ lval.SetID(lexbase.GREATER_EQUALS)
return
}
return
@@ -296,11 +314,11 @@ func (s *scanner) scan(lval *sqlSymType) {
if s.peekN(1) == ':' {
// :::
s.pos += 2
- lval.id = TYPEANNOTATE
+ lval.SetID(lexbase.TYPEANNOTATE)
return
}
s.pos++
- lval.id = TYPECAST
+ lval.SetID(lexbase.TYPECAST)
return
}
return
@@ -312,14 +330,14 @@ func (s *scanner) scan(lval *sqlSymType) {
switch s.peek() {
case '/': // ||/
s.pos++
- lval.id = CBRT
+ lval.SetID(lexbase.CBRT)
return
}
- lval.id = CONCAT
+ lval.SetID(lexbase.CONCAT)
return
case '/': // |/
s.pos++
- lval.id = SQRT
+ lval.SetID(lexbase.SQRT)
return
}
return
@@ -328,7 +346,7 @@ func (s *scanner) scan(lval *sqlSymType) {
switch s.peek() {
case '/': // //
s.pos++
- lval.id = FLOORDIV
+ lval.SetID(lexbase.FLOORDIV)
return
}
return
@@ -337,7 +355,7 @@ func (s *scanner) scan(lval *sqlSymType) {
switch s.peek() {
case '*': // ~*
s.pos++
- lval.id = REGIMATCH
+ lval.SetID(lexbase.REGIMATCH)
return
}
return
@@ -346,7 +364,7 @@ func (s *scanner) scan(lval *sqlSymType) {
switch s.peek() {
case '>': // @>
s.pos++
- lval.id = CONTAINS
+ lval.SetID(lexbase.CONTAINS)
return
}
return
@@ -355,7 +373,7 @@ func (s *scanner) scan(lval *sqlSymType) {
switch s.peek() {
case '&': // &&
s.pos++
- lval.id = AND_AND
+ lval.SetID(lexbase.AND_AND)
return
}
return
@@ -366,11 +384,11 @@ func (s *scanner) scan(lval *sqlSymType) {
if s.peekN(1) == '>' {
// ->>
s.pos += 2
- lval.id = FETCHTEXT
+ lval.SetID(lexbase.FETCHTEXT)
return
}
s.pos++
- lval.id = FETCHVAL
+ lval.SetID(lexbase.FETCHVAL)
return
}
return
@@ -381,15 +399,15 @@ func (s *scanner) scan(lval *sqlSymType) {
if s.peekN(1) == '>' {
// #>>
s.pos += 2
- lval.id = FETCHTEXT_PATH
+ lval.SetID(lexbase.FETCHTEXT_PATH)
return
}
s.pos++
- lval.id = FETCHVAL_PATH
+ lval.SetID(lexbase.FETCHVAL_PATH)
return
case '-': // #-
s.pos++
- lval.id = REMOVE_PATH
+ lval.SetID(lexbase.REMOVE_PATH)
return
}
return
@@ -409,14 +427,14 @@ func (s *scanner) scan(lval *sqlSymType) {
// lval for above.
}
-func (s *scanner) peek() int {
+func (s *Scanner) peek() int {
if s.pos >= len(s.in) {
return eof
}
return int(s.in[s.pos])
}
-func (s *scanner) peekN(n int) int {
+func (s *Scanner) peekN(n int) int {
pos := s.pos + n
if pos >= len(s.in) {
return eof
@@ -424,7 +442,7 @@ func (s *scanner) peekN(n int) int {
return int(s.in[pos])
}
-func (s *scanner) next() int {
+func (s *Scanner) next() int {
ch := s.peek()
if ch != eof {
s.pos++
@@ -432,7 +450,7 @@ func (s *scanner) next() int {
return ch
}
-func (s *scanner) skipWhitespace(lval *sqlSymType, allowComments bool) (newline, ok bool) {
+func (s *Scanner) skipWhitespace(lval ScanSymType, allowComments bool) (newline, ok bool) {
newline = false
for {
ch := s.peek()
@@ -446,7 +464,7 @@ func (s *scanner) skipWhitespace(lval *sqlSymType, allowComments bool) (newline,
continue
}
if allowComments {
- if present, cok := s.scanComment(lval); !cok {
+ if present, cok := s.ScanComment(lval); !cok {
return false, false
} else if present {
continue
@@ -457,7 +475,8 @@ func (s *scanner) skipWhitespace(lval *sqlSymType, allowComments bool) (newline,
return newline, true
}
-func (s *scanner) scanComment(lval *sqlSymType) (present, ok bool) {
+// ScanComment scans the input as a comment.
+func (s *Scanner) ScanComment(lval ScanSymType) (present, ok bool) {
start := s.pos
ch := s.peek()
@@ -489,9 +508,9 @@ func (s *scanner) scanComment(lval *sqlSymType) (present, ok bool) {
}
case eof:
- lval.id = ERROR
- lval.pos = int32(start)
- lval.str = "unterminated comment"
+ lval.SetID(lexbase.ERROR)
+ lval.SetPos(int32(start))
+ lval.SetStr("unterminated comment")
return false, false
}
}
@@ -514,13 +533,13 @@ func (s *scanner) scanComment(lval *sqlSymType) (present, ok bool) {
return false, true
}
-func (s *scanner) scanIdent(lval *sqlSymType) {
+func (s *Scanner) scanIdent(lval ScanSymType) {
s.pos--
start := s.pos
isASCII := true
isLower := true
- // Consume the scanner character by character, stopping after the last legal
+ // Consume the Scanner character by character, stopping after the last legal
// identifier character. By the end of this function, we need to
// lowercase and unicode normalize this identifier, which is expensive if
// there are actual unicode characters in it. If not, it's quite cheap - and
@@ -546,7 +565,7 @@ func (s *scanner) scanIdent(lval *sqlSymType) {
if isLower {
// Already lowercased - nothing to do.
- lval.str = s.in[start:s.pos]
+ lval.SetStr(s.in[start:s.pos])
} else if isASCII {
// We know that the identifier we've seen so far is ASCII, so we don't need
// to unicode normalize. Instead, just lowercase as normal.
@@ -558,45 +577,45 @@ func (s *scanner) scanIdent(lval *sqlSymType) {
}
b[i] = byte(c)
}
- lval.str = *(*string)(unsafe.Pointer(&b))
+ lval.SetStr(*(*string)(unsafe.Pointer(&b)))
} else {
// The string has unicode in it. No choice but to run Normalize.
- lval.str = lexbase.NormalizeName(s.in[start:s.pos])
+ lval.SetStr(lexbase.NormalizeName(s.in[start:s.pos]))
}
isExperimental := false
- kw := lval.str
+ kw := lval.Str()
switch {
- case strings.HasPrefix(lval.str, "experimental_"):
- kw = lval.str[13:]
+ case strings.HasPrefix(lval.Str(), "experimental_"):
+ kw = lval.Str()[13:]
isExperimental = true
- case strings.HasPrefix(lval.str, "testing_"):
- kw = lval.str[8:]
+ case strings.HasPrefix(lval.Str(), "testing_"):
+ kw = lval.Str()[8:]
isExperimental = true
}
- lval.id = lexbase.GetKeywordID(kw)
- if lval.id != lexbase.IDENT {
+ lval.SetID(lexbase.GetKeywordID(kw))
+ if lval.ID() != lexbase.IDENT {
if isExperimental {
if _, ok := lexbase.AllowedExperimental[kw]; !ok {
// If the parsed token is not on the allowlisted set of keywords,
// then it might have been intended to be parsed as something else.
// In that case, re-tokenize the original string.
- lval.id = lexbase.GetKeywordID(lval.str)
+ lval.SetID(lexbase.GetKeywordID(lval.Str()))
} else {
// It is a allowlisted keyword, so remember the shortened
// keyword for further processing.
- lval.str = kw
+ lval.SetStr(kw)
}
}
} else {
// If the word after experimental_ or testing_ is an identifier,
// then we might have classified it incorrectly after removing the
// experimental_/testing_ prefix.
- lval.id = lexbase.GetKeywordID(lval.str)
+ lval.SetID(lexbase.GetKeywordID(lval.Str()))
}
}
-func (s *scanner) scanNumber(lval *sqlSymType, ch int) {
+func (s *Scanner) scanNumber(lval ScanSymType, ch int) {
start := s.pos - 1
isHex := false
hasDecimal := ch == '.'
@@ -610,8 +629,8 @@ func (s *scanner) scanNumber(lval *sqlSymType, ch int) {
}
if ch == 'x' || ch == 'X' {
if isHex || s.in[start] != '0' || s.pos != start+1 {
- lval.id = ERROR
- lval.str = errInvalidHexNumeric
+ lval.SetID(lexbase.ERROR)
+ lval.SetStr(errInvalidHexNumeric)
return
}
s.pos++
@@ -647,8 +666,8 @@ func (s *scanner) scanNumber(lval *sqlSymType, ch int) {
}
ch = s.peek()
if !lexbase.IsDigit(ch) {
- lval.id = ERROR
- lval.str = "invalid floating point literal"
+ lval.SetID(lexbase.ERROR)
+ lval.SetStr("invalid floating point literal")
return
}
continue
@@ -656,20 +675,20 @@ func (s *scanner) scanNumber(lval *sqlSymType, ch int) {
break
}
- lval.str = s.in[start:s.pos]
+ lval.SetStr(s.in[start:s.pos])
if hasDecimal || hasExponent {
- lval.id = FCONST
- floatConst := constant.MakeFromLiteral(lval.str, token.FLOAT, 0)
+ lval.SetID(lexbase.FCONST)
+ floatConst := constant.MakeFromLiteral(lval.Str(), token.FLOAT, 0)
if floatConst.Kind() == constant.Unknown {
- lval.id = ERROR
- lval.str = fmt.Sprintf("could not make constant float from literal %q", lval.str)
+ lval.SetID(lexbase.ERROR)
+ lval.SetStr(fmt.Sprintf("could not make constant float from literal %q", lval.Str()))
return
}
- lval.union.val = NewNumValFn(floatConst, lval.str, false /* negative */)
+ lval.SetUnionVal(NewNumValFn(floatConst, lval.Str(), false /* negative */))
} else {
if isHex && s.pos == start+2 {
- lval.id = ERROR
- lval.str = errInvalidHexNumeric
+ lval.SetID(lexbase.ERROR)
+ lval.SetStr(errInvalidHexNumeric)
return
}
@@ -678,41 +697,41 @@ func (s *scanner) scanNumber(lval *sqlSymType, ch int) {
// string as an octal literal. Note: we can't use strings.TrimLeft
// here, because it will truncate '0' to ''.
if !isHex {
- for len(lval.str) > 1 && lval.str[0] == '0' {
- lval.str = lval.str[1:]
+ for len(lval.Str()) > 1 && lval.Str()[0] == '0' {
+ lval.SetStr(lval.Str()[1:])
}
}
- lval.id = ICONST
- intConst := constant.MakeFromLiteral(lval.str, token.INT, 0)
+ lval.SetID(lexbase.ICONST)
+ intConst := constant.MakeFromLiteral(lval.Str(), token.INT, 0)
if intConst.Kind() == constant.Unknown {
- lval.id = ERROR
- lval.str = fmt.Sprintf("could not make constant int from literal %q", lval.str)
+ lval.SetID(lexbase.ERROR)
+ lval.SetStr(fmt.Sprintf("could not make constant int from literal %q", lval.Str()))
return
}
- lval.union.val = NewNumValFn(intConst, lval.str, false /* negative */)
+ lval.SetUnionVal(NewNumValFn(intConst, lval.Str(), false /* negative */))
}
}
-func (s *scanner) scanPlaceholder(lval *sqlSymType) {
+func (s *Scanner) scanPlaceholder(lval ScanSymType) {
start := s.pos
for lexbase.IsDigit(s.peek()) {
s.pos++
}
- lval.str = s.in[start:s.pos]
+ lval.SetStr(s.in[start:s.pos])
- placeholder, err := NewPlaceholderFn(lval.str)
+ placeholder, err := NewPlaceholderFn(lval.Str())
if err != nil {
- lval.id = ERROR
- lval.str = err.Error()
+ lval.SetID(lexbase.ERROR)
+ lval.SetStr(err.Error())
return
}
- lval.id = PLACEHOLDER
- lval.union.val = placeholder
+ lval.SetID(lexbase.PLACEHOLDER)
+ lval.SetUnionVal(placeholder)
}
// scanHexString scans the content inside x'....'.
-func (s *scanner) scanHexString(lval *sqlSymType, ch int) bool {
+func (s *Scanner) scanHexString(lval ScanSymType, ch int) bool {
buf := s.buffer()
var curbyte byte
@@ -744,8 +763,8 @@ outer:
case 'A', 'B', 'C', 'D', 'E', 'F':
curbyte = (curbyte << 4) | byte(b-'A'+10)
default:
- lval.id = ERROR
- lval.str = errInvalidBytesLiteral
+ lval.SetID(lexbase.ERROR)
+ lval.SetStr(errInvalidBytesLiteral)
return false
}
bytep++
@@ -758,18 +777,18 @@ outer:
}
if bytep != 0 {
- lval.id = ERROR
- lval.str = errInvalidBytesLiteral
+ lval.SetID(lexbase.ERROR)
+ lval.SetStr(errInvalidBytesLiteral)
return false
}
- lval.id = BCONST
- lval.str = s.finishString(buf)
+ lval.SetID(lexbase.BCONST)
+ lval.SetStr(s.finishString(buf))
return true
}
// scanBitString scans the content inside B'....'.
-func (s *scanner) scanBitString(lval *sqlSymType, ch int) bool {
+func (s *Scanner) scanBitString(lval ScanSymType, ch int) bool {
buf := s.buffer()
outer:
for {
@@ -793,21 +812,21 @@ outer:
case '0', '1':
buf = append(buf, byte(b))
default:
- lval.id = ERROR
- lval.str = fmt.Sprintf(`"%c" is not a valid binary digit`, rune(b))
+ lval.SetID(lexbase.ERROR)
+ lval.SetStr(fmt.Sprintf(`"%c" is not a valid binary digit`, rune(b)))
return false
}
}
- lval.id = BITCONST
- lval.str = s.finishString(buf)
+ lval.SetID(lexbase.BITCONST)
+ lval.SetStr(s.finishString(buf))
return true
}
// scanString scans the content inside '...'. This is used for simple
// string literals '...' but also e'....' and b'...'. For x'...', see
// scanHexString().
-func (s *scanner) scanString(lval *sqlSymType, ch int, allowEscapes, requireUTF8 bool) bool {
+func (s *Scanner) scanString(lval ScanSymType, ch int, allowEscapes, requireUTF8 bool) bool {
buf := s.buffer()
var runeTmp [utf8.UTFMax]byte
start := s.pos
@@ -864,8 +883,8 @@ outer:
}
v, multibyte, tail, err := strconv.UnquoteChar(tmp, byte(ch))
if err != nil {
- lval.id = ERROR
- lval.str = err.Error()
+ lval.SetID(lexbase.ERROR)
+ lval.SetStr(err.Error())
return false
}
if v < utf8.RuneSelf || !multibyte {
@@ -887,25 +906,25 @@ outer:
}
case eof:
- lval.id = ERROR
- lval.str = errUnterminated
+ lval.SetID(lexbase.ERROR)
+ lval.SetStr(errUnterminated)
return false
}
}
if requireUTF8 && !utf8.Valid(buf) {
- lval.id = ERROR
- lval.str = errInvalidUTF8
+ lval.SetID(lexbase.ERROR)
+ lval.SetStr(errInvalidUTF8)
return false
}
- lval.str = s.finishString(buf)
+ lval.SetStr(s.finishString(buf))
return true
}
// scanDollarQuotedString scans for so called dollar-quoted strings, which start/end with either $$ or $tag$, where
// tag is some arbitrary string. e.g. $$a string$$ or $escaped$a string$escaped$.
-func (s *scanner) scanDollarQuotedString(lval *sqlSymType) bool {
+func (s *Scanner) scanDollarQuotedString(lval ScanSymType) bool {
buf := s.buffer()
start := s.pos
@@ -943,8 +962,8 @@ outer:
case eof:
if foundStartTag {
// A start tag was found, therefore we expect an end tag before the eof, otherwise it is an error.
- lval.id = ERROR
- lval.str = errUnterminated
+ lval.SetID(lexbase.ERROR)
+ lval.SetStr(errUnterminated)
} else {
// This is not a dollar-quoted string, reset the pos back to the start.
s.pos = start
@@ -971,65 +990,11 @@ outer:
}
if !utf8.Valid(buf) {
- lval.id = ERROR
- lval.str = errInvalidUTF8
+ lval.SetID(lexbase.ERROR)
+ lval.SetStr(errInvalidUTF8)
return false
}
- lval.str = s.finishString(buf)
+ lval.SetStr(s.finishString(buf))
return true
}
-
-// SplitFirstStatement returns the length of the prefix of the string up to and
-// including the first semicolon that separates statements. If there is no
-// semicolon, returns ok=false.
-func SplitFirstStatement(sql string) (pos int, ok bool) {
- s := makeScanner(sql)
- var lval sqlSymType
- for {
- s.scan(&lval)
- switch lval.id {
- case 0, ERROR:
- return 0, false
- case ';':
- return s.pos, true
- }
- }
-}
-
-// Tokens decomposes the input into lexical tokens.
-func Tokens(sql string) (tokens []TokenString, ok bool) {
- s := makeScanner(sql)
- for {
- var lval sqlSymType
- s.scan(&lval)
- if lval.id == ERROR {
- return nil, false
- }
- if lval.id == 0 {
- break
- }
- tokens = append(tokens, TokenString{TokenID: lval.id, Str: lval.str})
- }
- return tokens, true
-}
-
-// TokenString is the unit value returned by Tokens.
-type TokenString struct {
- TokenID int32
- Str string
-}
-
-// LastLexicalToken returns the last lexical token. If the string has no lexical
-// tokens, returns 0 and ok=false.
-func LastLexicalToken(sql string) (lastTok int, ok bool) {
- s := makeScanner(sql)
- var lval sqlSymType
- for {
- last := lval.id
- s.scan(&lval)
- if lval.id == 0 {
- return int(last), last != 0
- }
- }
-}
diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go
index 8af3073f9a5f..5c1cedced61a 100644
--- a/pkg/sql/sem/builtins/builtins.go
+++ b/pkg/sql/sem/builtins/builtins.go
@@ -12,6 +12,7 @@ package builtins
import (
"bytes"
+ "compress/gzip"
"crypto/md5"
cryptorand "crypto/rand"
"crypto/sha1"
@@ -22,6 +23,7 @@ import (
"hash"
"hash/crc32"
"hash/fnv"
+ "io/ioutil"
"math"
"math/rand"
"net"
@@ -1073,6 +1075,63 @@ var builtins = map[string]builtinDefinition{
},
),
+ "compress": makeBuiltin(defProps(),
+ tree.Overload{
+ Types: tree.ArgTypes{{"data", types.Bytes}, {"codec", types.String}},
+ ReturnType: tree.FixedReturnType(types.Bytes),
+ Fn: func(evalCtx *tree.EvalContext, args tree.Datums) (_ tree.Datum, err error) {
+ uncompressedData := []byte(tree.MustBeDBytes(args[0]))
+ codec := string(tree.MustBeDString(args[1]))
+ switch strings.ToUpper(codec) {
+ case "GZIP":
+ gzipBuf := bytes.NewBuffer([]byte{})
+ gz := gzip.NewWriter(gzipBuf)
+ if _, err := gz.Write(uncompressedData); err != nil {
+ return nil, err
+ }
+ if err := gz.Close(); err != nil {
+ return nil, err
+ }
+ return tree.NewDBytes(tree.DBytes(gzipBuf.Bytes())), nil
+ default:
+ return nil, pgerror.New(pgcode.InvalidParameterValue,
+ "only 'gzip' codec is supported for compress()")
+ }
+ },
+ Info: "Compress `data` with the specified `codec` (`gzip`).",
+ Volatility: tree.VolatilityImmutable,
+ },
+ ),
+
+ "decompress": makeBuiltin(defProps(),
+ tree.Overload{
+ Types: tree.ArgTypes{{"data", types.Bytes}, {"codec", types.String}},
+ ReturnType: tree.FixedReturnType(types.Bytes),
+ Fn: func(evalCtx *tree.EvalContext, args tree.Datums) (_ tree.Datum, err error) {
+ compressedData := []byte(tree.MustBeDBytes(args[0]))
+ codec := string(tree.MustBeDString(args[1]))
+ switch strings.ToUpper(codec) {
+ case "GZIP":
+ r, err := gzip.NewReader(bytes.NewBuffer(compressedData))
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to decompress")
+ }
+ defer r.Close()
+ decompressedBytes, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ return tree.NewDBytes(tree.DBytes(decompressedBytes)), nil
+ default:
+ return nil, pgerror.New(pgcode.InvalidParameterValue,
+ "only 'gzip' codec is supported for decompress()")
+ }
+ },
+ Info: "Decompress `data` with the specified `codec` (`gzip`).",
+ Volatility: tree.VolatilityImmutable,
+ },
+ ),
+
"ascii": makeBuiltin(tree.FunctionProperties{Category: categoryString},
stringOverload1(
func(_ *tree.EvalContext, s string) (tree.Datum, error) {
|