diff --git a/docs/generated/sql/functions.md b/docs/generated/sql/functions.md
index 44a995cbc69f..4c84c9d3d037 100644
--- a/docs/generated/sql/functions.md
+++ b/docs/generated/sql/functions.md
@@ -1386,11 +1386,7 @@ the locality flag on node startup. Returns an error if no region is set.
Immutable |
workload_index_recs() → string | Returns set of index recommendations
| Immutable |
-workload_index_recs(budget: string) → string | Returns set of index recommendations
- | Immutable |
workload_index_recs(timestamptz: timestamptz) → string | Returns set of index recommendations
- | Immutable |
-workload_index_recs(timestamptz: timestamptz, budget: string) → string | Returns set of index recommendations
| Immutable |
diff --git a/pkg/BUILD.bazel b/pkg/BUILD.bazel
index b1d4caa355f1..90fb610fa22b 100644
--- a/pkg/BUILD.bazel
+++ b/pkg/BUILD.bazel
@@ -486,6 +486,7 @@ ALL_TESTS = [
"//pkg/sql/opt/testutils/opttester:opttester_test",
"//pkg/sql/opt/testutils/testcat:testcat_test",
"//pkg/sql/opt/testutils:testutils_test",
+ "//pkg/sql/opt/workloadindexrec:workloadindexrec_test",
"//pkg/sql/opt/xform:xform_test",
"//pkg/sql/opt:opt_test",
"//pkg/sql/parser:parser_disallowed_imports_test",
@@ -1892,6 +1893,8 @@ GO_TARGETS = [
"//pkg/sql/opt/testutils/testexpr:testexpr",
"//pkg/sql/opt/testutils:testutils",
"//pkg/sql/opt/testutils:testutils_test",
+ "//pkg/sql/opt/workloadindexrec:workloadindexrec",
+ "//pkg/sql/opt/workloadindexrec:workloadindexrec_test",
"//pkg/sql/opt/xform:xform",
"//pkg/sql/opt/xform:xform_test",
"//pkg/sql/opt:opt",
diff --git a/pkg/sql/logictest/testdata/logic_test/workload_indexrecs b/pkg/sql/logictest/testdata/logic_test/workload_indexrecs
index ed82054769e6..fdbc80e9dee8 100644
--- a/pkg/sql/logictest/testdata/logic_test/workload_indexrecs
+++ b/pkg/sql/logictest/testdata/logic_test/workload_indexrecs
@@ -1,32 +1,403 @@
-# The returned data now is just dummy data
+# Give root role permission to insert into system tables.
+# DO NOT DO THIS IN PRODUCTION.
+statement ok
+INSERT INTO system.users VALUES ('node', NULL, true, 3);
+GRANT NODE TO root;
+
+statement ok
+CREATE TABLE t1 (k INT, i INT, f FLOAT, s STRING)
+
+
+# Basic tests for creation
+statement ok
+INSERT INTO system.statement_statistics (
+ index_recommendations,
+ aggregated_ts,
+ fingerprint_id,
+ transaction_fingerprint_id,
+ plan_hash,
+ app_name,
+ node_id,
+ agg_interval,
+ metadata,
+ statistics,
+ plan
+)
+VALUES (
+ ARRAY['creation : CREATE INDEX t1_k ON t1(k)'],
+ '2023-07-05 15:10:11+00:00',
+ 'fp_1',
+ 'tfp_1',
+ 'ph_1',
+ 'app_1',
+ 1,
+ '1 hr',
+ 'null',
+ '{"statistics": {"lastExecAt" : "2023-07-05 15:10:10+00:00"}}'::JSONB,
+ 'null'
+);
+
+# get workload index-recs
+query T rowsort
+SELECT workload_index_recs();
+----
+CREATE INDEX ON t1 (k);
+
+
+# get workload index-recs with time filter
+query T rowsort
+SELECT workload_index_recs('2023-07-05 15:10:10+00:00'::TIMESTAMPTZ - '2 weeks'::interval);
+----
+CREATE INDEX ON t1 (k);
+
+
+# Basic tests for replacement
+statement ok
+CREATE INDEX t1_i ON t1(i);
+
+statement ok
+INSERT INTO system.statement_statistics (
+ index_recommendations,
+ aggregated_ts,
+ fingerprint_id,
+ transaction_fingerprint_id,
+ plan_hash,
+ app_name,
+ node_id,
+ agg_interval,
+ metadata,
+ statistics,
+ plan
+)
+VALUES (
+ ARRAY['replacement : CREATE INDEX t1_i2 ON t1(i) storing (k); DROP INDEX t1_i;'],
+ '2023-07-05 15:10:12+00:00',
+ 'fp_2',
+ 'tfp_2',
+ 'ph_2',
+ 'app_2',
+ 2,
+ '1 hr',
+ 'null',
+ '{"statistics": {"lastExecAt" : "2023-06-15 15:10:10+00:00"}}'::JSONB,
+ 'null'
+);
+
+# get workload index-recs
+query T rowsort
+SELECT workload_index_recs();
+----
+CREATE INDEX ON t1 (k);
+CREATE INDEX ON t1 (i) STORING (k);
+DROP INDEX t1_i;
+
+
+# index recommendations created before the given time (as shown below) are omitted
+# get workload index-recs with time filter
+query T rowsort
+SELECT workload_index_recs('2023-07-05 15:10:10+00:00'::TIMESTAMPTZ - '2 weeks'::interval);
+----
+CREATE INDEX ON t1 (k);
+
+
+# Basic tests for alteration to show it is skipped
+statement ok
+INSERT INTO system.statement_statistics (
+ index_recommendations,
+ aggregated_ts,
+ fingerprint_id,
+ transaction_fingerprint_id,
+ plan_hash,
+ app_name,
+ node_id,
+ agg_interval,
+ metadata,
+ statistics,
+ plan
+)
+VALUES (
+ ARRAY['alteration : ALTER INDEX t1_i NOT VISIBLE'],
+ '2023-07-05 15:10:13+00:00',
+ 'fp_3',
+ 'tfp_3',
+ 'ph_3',
+ 'app_3',
+ 3,
+ '1 hr',
+ 'null',
+ '{"statistics": {"lastExecAt" : "2023-06-29 15:10:10+00:00"}}'::JSONB,
+ 'null'
+);
+
+# get workload index-recs
+query T rowsort
+SELECT workload_index_recs();
+----
+CREATE INDEX ON t1 (k);
+CREATE INDEX ON t1 (i) STORING (k);
+DROP INDEX t1_i;
+
+
+# index recommendations created before the given time (as shown below) are omitted
+# get workload index-recs with time filter
+query T rowsort
+SELECT workload_index_recs('2023-07-05 15:10:10+00:00'::TIMESTAMPTZ - '2 weeks'::interval);
+----
+CREATE INDEX ON t1 (k);
+
+
+# Test for the new index "t1(k, i)" covering the previous one "t1(k)"
+statement ok
+INSERT INTO system.statement_statistics (
+ index_recommendations,
+ aggregated_ts,
+ fingerprint_id,
+ transaction_fingerprint_id,
+ plan_hash,
+ app_name,
+ node_id,
+ agg_interval,
+ metadata,
+ statistics,
+ plan
+)
+VALUES (
+ ARRAY['creation : CREATE INDEX t1_k_i ON t1(k, i)'],
+ '2023-07-05 15:10:14+00:00',
+ 'fp_4',
+ 'tfp_4',
+ 'ph_4',
+ 'app_4',
+ 4,
+ '1 hr',
+ 'null',
+ '{"statistics": {"lastExecAt" : "2023-07-05 15:10:10+00:00"}}'::JSONB,
+ 'null'
+);
+
+# get workload index-recs
+query T rowsort
+SELECT workload_index_recs();
+----
+CREATE INDEX ON t1 (k, i);
+CREATE INDEX ON t1 (i) STORING (k);
+DROP INDEX t1_i;
+
+
+# index recommendations created before the given time (as shown below) are omitted
+# get workload index-recs with time filter
+query T rowsort
+SELECT workload_index_recs('2023-07-05 15:10:10+00:00'::TIMESTAMPTZ - '2 weeks'::interval);
+----
+CREATE INDEX ON t1 (k, i);
+
+
+# Test for the storing part "t1(i) storing (k)" covered by one index "t1(i, k)"
+statement ok
+INSERT INTO system.statement_statistics (
+ index_recommendations,
+ aggregated_ts,
+ fingerprint_id,
+ transaction_fingerprint_id,
+ plan_hash,
+ app_name,
+ node_id,
+ agg_interval,
+ metadata,
+ statistics,
+ plan
+)
+VALUES (
+ ARRAY['creation : CREATE INDEX t1_k_i ON t1(i, k)'],
+ '2023-07-05 15:10:15+00:00',
+ 'fp_5',
+ 'tfp_5',
+ 'ph_5',
+ 'app_5',
+ 5,
+ '1 hr',
+ 'null',
+ '{"statistics": {"lastExecAt" : "2023-07-05 15:10:10+00:00"}}'::JSONB,
+ 'null'
+);
+
+# get workload index-recs
+query T rowsort
+SELECT workload_index_recs();
+----
+CREATE INDEX ON t1 (k, i);
+CREATE INDEX ON t1 (i, k);
+DROP INDEX t1_i;
+
+
+# index recommendations created before the given time (as shown below) are omitted
+# get workload index-recs with time filter
+query T rowsort
+SELECT workload_index_recs('2023-07-05 15:10:10+00:00'::TIMESTAMPTZ - '2 weeks'::interval);
+----
+CREATE INDEX ON t1 (i, k);
+CREATE INDEX ON t1 (k, i);
+
+
+# Test for duplicate DROP INDEX t1_i
+statement ok
+INSERT INTO system.statement_statistics (
+ index_recommendations,
+ aggregated_ts,
+ fingerprint_id,
+ transaction_fingerprint_id,
+ plan_hash,
+ app_name,
+ node_id,
+ agg_interval,
+ metadata,
+ statistics,
+ plan
+)
+VALUES (
+ ARRAY['replacement : CREATE INDEX t1_i2 ON t1(i) storing (k); DROP INDEX t1_i;'],
+ '2023-07-05 15:10:16+00:00',
+ 'fp_6',
+ 'tfp_6',
+ 'ph_6',
+ 'app_6',
+ 6,
+ '1 hr',
+ 'null',
+ '{"statistics": {"lastExecAt" : "2023-07-05 15:10:10+00:00"}}'::JSONB,
+ 'null'
+);
+
+# get workload index-recs
+query T rowsort
+SELECT workload_index_recs();
+----
+CREATE INDEX ON t1 (k, i);
+CREATE INDEX ON t1 (i, k);
+DROP INDEX t1_i;
+
+
+# index recommendations created before the given time (as shown below) are omitted
+# get workload index-recs with time filter
+query T rowsort
+SELECT workload_index_recs('2023-07-05 15:10:10+00:00'::TIMESTAMPTZ - '2 weeks'::interval);
+----
+CREATE INDEX ON t1 (k, i);
+CREATE INDEX ON t1 (i, k);
+DROP INDEX t1_i;
+
+
+statement ok
+CREATE TABLE t2 (k INT, i INT, f FLOAT, s STRING)
+
+
+# Test for multi-table (t1, t2)
+statement ok
+INSERT INTO system.statement_statistics (
+ index_recommendations,
+ aggregated_ts,
+ fingerprint_id,
+ transaction_fingerprint_id,
+ plan_hash,
+ app_name,
+ node_id,
+ agg_interval,
+ metadata,
+ statistics,
+ plan
+)
+VALUES (
+ ARRAY['creation : CREATE INDEX t2_k ON t2(k) storing (i, f)', 'creation : CREATE INDEX t2_k_f ON t2(k, f)', 'creation : CREATE INDEX t2_k_i_s ON t2(k, i, s)'],
+ '2023-07-05 15:10:17+00:00',
+ 'fp_7',
+ 'tfp_7',
+ 'ph_7',
+ 'app_7',
+ 7,
+ '1 hr',
+ 'null',
+ '{"statistics": {"lastExecAt" : "2023-07-05 15:10:10+00:00"}}'::JSONB,
+ 'null'
+);
+
# get workload index-recs
-query T nosort
+query T rowsort
SELECT workload_index_recs();
----
-1
-2
-3
+CREATE INDEX ON t1 (k, i);
+CREATE INDEX ON t1 (i, k);
+CREATE INDEX ON t2 (k, f) STORING (i);
+CREATE INDEX ON t2 (k, i, s);
+DROP INDEX t1_i;
+
+# index recommendations created before the given time (as shown below) are omitted
# get workload index-recs with time filter
-query T nosort
-SELECT workload_index_recs(now() - '2 weeks'::interval);
+query T rowsort
+SELECT workload_index_recs('2023-07-05 15:10:10+00:00'::TIMESTAMPTZ - '2 weeks'::interval);
----
-1
-2
-3
+CREATE INDEX ON t1 (k, i);
+CREATE INDEX ON t1 (i, k);
+CREATE INDEX ON t2 (k, f) STORING (i);
+CREATE INDEX ON t2 (k, i, s);
+DROP INDEX t1_i;
+
+
+statement ok
+CREATE TABLE t3 (k INT, i INT, f FLOAT, s STRING)
+
+
+# Test for multi-table (t1, t2, t3)
+statement ok
+INSERT INTO system.statement_statistics (
+ index_recommendations,
+ aggregated_ts,
+ fingerprint_id,
+ transaction_fingerprint_id,
+ plan_hash,
+ app_name,
+ node_id,
+ agg_interval,
+ metadata,
+ statistics,
+ plan
+)
+VALUES (
+ ARRAY['creation : CREATE INDEX t3_k_i_f ON t3(k, i, f)', 'creation : CREATE INDEX t3_k_i_s ON t3(k, i, s)', 'creation : CREATE INDEX t3_k1 ON t3(k) storing (i, f)'],
+ '2023-07-05 15:10:18+00:00',
+ 'fp_8',
+ 'tfp_8',
+ 'ph_8',
+ 'app_8',
+ 8,
+ '1 hr',
+ 'null',
+ '{"statistics": {"lastExecAt" : "2023-07-05 15:10:10+00:00"}}'::JSONB,
+ 'null'
+);
-# get workload index-recs with budget limit
-query T nosort
-SELECT workload_index_recs('42MB');
+# get workload index-recs
+query T rowsort
+SELECT workload_index_recs();
----
-1
-2
-3
+CREATE INDEX ON t3 (k, i, f);
+CREATE INDEX ON t3 (k, i, s);
+CREATE INDEX ON t1 (k, i);
+CREATE INDEX ON t1 (i, k);
+CREATE INDEX ON t2 (k, i, s);
+CREATE INDEX ON t2 (k, f) STORING (i);
+DROP INDEX t1_i;
-# get workload index-recs with time filter and budget limit
-query T nosort
-SELECT workload_index_recs('2023-06-13 10:10:10-05:00', '58GiB');
+
+# index recommendations created before the given time (as shown below) are omitted
+# get workload index-recs with time filter
+query T rowsort
+SELECT workload_index_recs('2023-07-05 15:10:10+00:00'::TIMESTAMPTZ - '2 weeks'::interval);
----
-1
-2
-3
+CREATE INDEX ON t1 (k, i);
+CREATE INDEX ON t1 (i, k);
+CREATE INDEX ON t2 (k, f) STORING (i);
+CREATE INDEX ON t2 (k, i, s);
+CREATE INDEX ON t3 (k, i, f);
+CREATE INDEX ON t3 (k, i, s);
+DROP INDEX t1_i;
diff --git a/pkg/sql/opt/workloadindexrec/BUILD.bazel b/pkg/sql/opt/workloadindexrec/BUILD.bazel
new file mode 100644
index 000000000000..317af69959ea
--- /dev/null
+++ b/pkg/sql/opt/workloadindexrec/BUILD.bazel
@@ -0,0 +1,26 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "workloadindexrec",
+ srcs = [
+ "index_trie.go",
+ "workload_indexrecs.go",
+ ],
+ importpath = "github.com/cockroachdb/cockroach/pkg/sql/opt/workloadindexrec",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//pkg/sql/parser",
+ "//pkg/sql/sem/eval",
+ "//pkg/sql/sem/tree",
+ "//pkg/sql/sessiondata",
+ "@com_github_cockroachdb_errors//:errors",
+ ],
+)
+
+go_test(
+ name = "workloadindexrec_test",
+ srcs = ["index_trie_test.go"],
+ args = ["-test.timeout=295s"],
+ embed = [":workloadindexrec"],
+ deps = ["//pkg/sql/sem/tree"],
+)
diff --git a/pkg/sql/opt/workloadindexrec/index_trie.go b/pkg/sql/opt/workloadindexrec/index_trie.go
new file mode 100644
index 000000000000..95d1e1ab3f56
--- /dev/null
+++ b/pkg/sql/opt/workloadindexrec/index_trie.go
@@ -0,0 +1,255 @@
+// Copyright 2023 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package workloadindexrec
+
+import (
+ "math"
+
+ "github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
+)
+
+// TODO: Add nullsOrder once it can be specified in index recommendation
+type indexedColumn struct {
+ column tree.Name
+ direction tree.Direction
+}
+
+// TrieNode is an implementation of the node of a IndexTrie-tree.
+//
+// TrieNode stores the indexed columns, storing columns, parent node and the
+// indexed column represented by the node (used to assign storings).
+type indexTrieNode struct {
+ children map[indexedColumn]*indexTrieNode
+ storing map[string]struct{}
+ parent *indexTrieNode
+ col indexedColumn
+}
+
+// indexTrie is an implementation of a indexTrie-tree specific for indexes of
+// one table.
+//
+// indexTrie stores all the indexed columns in each node with/without storing
+// columns, allowing Insert, RemoveStorings, AssignStoring. Check the
+// corresponding methods for more details.
+type indexTrie struct {
+ root *indexTrieNode
+}
+
+// NewTrie returns a new trie tree.
+func NewTrie() *indexTrie {
+ return &indexTrie{
+ root: &indexTrieNode{},
+ }
+}
+
+// Insert parses the columns in ci (CreateIndex) and updates the trie.
+func (trie *indexTrie) Insert(indexedCols tree.IndexElemList, storingCols tree.NameList) {
+ node := trie.root
+ for _, indexedCol := range indexedCols {
+ indexCol := indexedColumn{
+ column: indexedCol.Column,
+ direction: indexedCol.Direction,
+ }
+
+ // To avoid duplicate branches, there are only 2 cases for child nodes with
+ // the same column, (col1, ASC), (col1, DESC). The (col1, Default) is the
+ // same as (col1, ASC).
+ if indexCol.direction == tree.DefaultDirection {
+ indexCol.direction = tree.Ascending
+ }
+
+ if node.children == nil {
+ node.children = make(map[indexedColumn]*indexTrieNode)
+ }
+
+ if _, ok := node.children[indexCol]; !ok {
+ node.children[indexCol] = &indexTrieNode{
+ parent: node,
+ col: indexCol,
+ }
+ }
+ node = node.children[indexCol]
+ }
+
+ if len(storingCols) > 0 {
+ if node.storing == nil {
+ node.storing = make(map[string]struct{})
+ }
+ for _, storingCol := range storingCols {
+ node.storing[string(storingCol)] = struct{}{}
+ }
+ }
+}
+
+// RemoveStorings removes those storings that are covered by the leaf nodes.
+// It iterates the whole trie for each table by breadth-first search (BFS).
+// Whenever there is a node with storing, it will invoke the removeStoringCoveredByLeaf
+// to check whether there exists a leaf node covering its storing.
+func (trie *indexTrie) RemoveStorings() {
+ var queue []*indexTrieNode
+ queue = append(queue, trie.root)
+ for len(queue) > 0 {
+ node := queue[0]
+ queue = queue[1:]
+ if len(node.storing) > 0 {
+ if node.removeStoringCoveredByLeaf(node.storing) {
+ node.storing = make(map[string]struct{})
+ }
+ }
+ for _, child := range node.children {
+ queue = append(queue, child)
+ }
+ }
+}
+
+// removeStoringCoveredByLeaf checks whether the storing is covered by the leaf
+// nodes by depth-first search (DFS).
+// TODO: Think of algorithm to remove subset of the storing part.
+func (node *indexTrieNode) removeStoringCoveredByLeaf(restStoring map[string]struct{}) bool {
+ // Nothing else we need to cover for the storing even if we have not reached
+ // the leaf node.
+ if len(restStoring) == 0 {
+ return true
+ }
+
+ // leaf node
+ if len(node.children) == 0 {
+ return false
+ }
+
+ for indexCol, child := range node.children {
+ // Delete the element covered by the child
+ var found = false
+ if _, ok := restStoring[string(indexCol.column)]; ok {
+ found = true
+ delete(restStoring, string(indexCol.column))
+ }
+
+ if child.removeStoringCoveredByLeaf(restStoring) {
+ return true
+ }
+
+ // Recover the deleted element so that we can reuse the restStoring for all
+ // the children.
+ if found {
+ restStoring[string(indexCol.column)] = struct{}{}
+ }
+ }
+
+ return false
+}
+
+// AssignStoring assigns the storings for all the tables in tm, see
+// assignStoringToShallowestLeaf for its detailed functionality.
+func (trie *indexTrie) AssignStoring() {
+ trie.root.assignStoringToShallowestLeaf(0)
+}
+
+// assignStoringToShallowestLeaf assign the storing of each node to
+// the shallowest leaf node inside its subtree.
+func (node *indexTrieNode) assignStoringToShallowestLeaf(curDep int) (*indexTrieNode, int) {
+ if len(node.children) == 0 {
+ return node, curDep
+ }
+
+ var shallowLeaf *indexTrieNode
+ // largest depth
+ dep := math.MaxInt64
+ for _, child := range node.children {
+ tempLeaf, tempDep := child.assignStoringToShallowestLeaf(curDep + 1)
+ if tempDep < dep {
+ dep = tempDep
+ shallowLeaf = tempLeaf
+ }
+ }
+
+ // Assign the storing of node to the shallowLeaf, some columns may be covered
+ // along the path from node to the shallowLeaf. As shown in the example below:
+ //
+ // a storing (b, c, d)
+ // / \
+ // c b
+ // | |
+ // e c
+ // |
+ // f
+ //
+ // When we finish traversing the tree and go back to the node "a", the
+ // shallowest leaf node will be the right "c". Then we would like to assign
+ // the storing part of node "a" to that shallowest leaf node "c". Before
+ // assign all of them, we need to remove all the columns that covered by the
+ // path from "c" to "a", which is "b" and "c" in this example. The following
+ // code traverses from the shallowest leaf node to the current node and remove
+ // all the columns covered by the path. Finally, the trie will be like:
+ //
+ // a
+ // / \
+ // c b
+ // | |
+ // e c storing (d)
+ // |
+ // f
+ //
+ // The right leaf node "c" will represent the index (a, b, c) storing (d).
+ if len(node.storing) > 0 {
+ tempNode := shallowLeaf
+ for tempNode != node {
+ delete(node.storing, string(tempNode.col.column))
+ tempNode = tempNode.parent
+ }
+
+ if len(node.storing) > 0 {
+ if shallowLeaf.storing == nil {
+ shallowLeaf.storing = make(map[string]struct{})
+ }
+ for col := range node.storing {
+ shallowLeaf.storing[col] = struct{}{}
+ }
+ node.storing = nil
+ }
+ }
+
+ return shallowLeaf, dep
+}
+
+// collectAllLeavesForTables collects all the indexes represented by the leaf
+// nodes of trie.
+func collectAllLeavesForTable(trie *indexTrie) ([][]indexedColumn, [][]tree.Name) {
+ var indexedColsArray [][]indexedColumn
+ var storingColsArray [][]tree.Name
+ collectAllLeaves(trie.root, &indexedColsArray, &storingColsArray, []indexedColumn{})
+ return indexedColsArray, storingColsArray
+}
+
+// collectAllLeaves collects all the indexes represented by the leaf nodes
+// recursively.
+func collectAllLeaves(
+ node *indexTrieNode,
+ indexedCols *[][]indexedColumn,
+ storingCols *[][]tree.Name,
+ curIndexedCols []indexedColumn,
+) {
+ if len(node.children) == 0 {
+ curStoringCols := make([]tree.Name, len(node.storing))
+ var idx = 0
+ for storingCol := range node.storing {
+ curStoringCols[idx] = tree.Name(storingCol)
+ idx++
+ }
+ *indexedCols = append(*indexedCols, curIndexedCols)
+ *storingCols = append(*storingCols, curStoringCols)
+ return
+ }
+
+ for indexCol, child := range node.children {
+ collectAllLeaves(child, indexedCols, storingCols, append(curIndexedCols, indexCol))
+ }
+}
diff --git a/pkg/sql/opt/workloadindexrec/index_trie_test.go b/pkg/sql/opt/workloadindexrec/index_trie_test.go
new file mode 100644
index 000000000000..320ed2aeb85f
--- /dev/null
+++ b/pkg/sql/opt/workloadindexrec/index_trie_test.go
@@ -0,0 +1,382 @@
+// Copyright 2023 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package workloadindexrec
+
+import (
+ "testing"
+
+ "github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
+)
+
+func TestIndexTrie(t *testing.T) {
+ type testType int8
+
+ const (
+ insertOnly testType = iota
+ insertRemoveStoring
+ insertAssignStoring
+ insertRemoveAndAssignStoring
+ )
+
+ testCases := []struct {
+ testType testType
+ // The following two variables mean the indexed columns and storing columns for CREATE INDEX.
+ indexedColLists []tree.IndexElemList
+ storingColLists []tree.NameList
+ // The following two variables represent the indexed columns and storing columns the system should return.
+ expectedIndexedColLists [][]indexedColumn
+ expectedStoringColLists [][]tree.Name
+ }{
+ {
+ // 0:
+ // Test for insert-only;
+ //
+ // Input indexes:
+ // (i)
+ // (i) STORING (j)
+ //
+ // Output indexes:
+ // (i) STORING (j)
+ //
+ // The trie is like:
+ // i store (j) -> i store (j)
+ testType: insertOnly,
+ indexedColLists: []tree.IndexElemList{
+ {
+ tree.IndexElem{Column: "i", Direction: tree.DefaultDirection},
+ },
+ {
+ tree.IndexElem{Column: "i", Direction: tree.DefaultDirection},
+ },
+ },
+ storingColLists: []tree.NameList{
+ {},
+ {"j"},
+ },
+ expectedIndexedColLists: [][]indexedColumn{
+ {
+ {column: "i", direction: tree.Ascending},
+ },
+ },
+ expectedStoringColLists: [][]tree.Name{
+ {"j"},
+ },
+ },
+ {
+ // 1:
+ // Test for insert-only (different kinds of orders);
+ //
+ // Input indexes:
+ // (i DESC)
+ // (i DESC, j DESC)
+ //
+ // Output indexes:
+ // (i DESC, j DESC)
+ //
+ // The trie is like:
+ // i DESC i DESC
+ // | -> |
+ // j DESC i DESC
+ testType: insertOnly,
+ indexedColLists: []tree.IndexElemList{
+ {
+ tree.IndexElem{Column: "i", Direction: tree.Descending},
+ },
+ {
+ tree.IndexElem{Column: "i", Direction: tree.Descending},
+ tree.IndexElem{Column: "j", Direction: tree.Descending},
+ },
+ },
+ storingColLists: []tree.NameList{
+ {},
+ {},
+ },
+ expectedIndexedColLists: [][]indexedColumn{
+ {
+ {column: "i", direction: tree.Descending},
+ {column: "j", direction: tree.Descending},
+ },
+ },
+ expectedStoringColLists: [][]tree.Name{
+ {},
+ },
+ },
+ {
+ // 2:
+ // Test for insert + removeStoring;
+ //
+ // Input indexes:
+ // (i) STORING (j)
+ // (i, j)
+ //
+ // Output indexes:
+ // (i, j)
+ //
+ // The trie is like:
+ // i store (j) i
+ // | -> |
+ // j j
+ testType: insertRemoveStoring,
+ indexedColLists: []tree.IndexElemList{
+ {
+ tree.IndexElem{Column: "i", Direction: tree.DefaultDirection},
+ },
+ {
+ tree.IndexElem{Column: "i", Direction: tree.DefaultDirection},
+ tree.IndexElem{Column: "j", Direction: tree.DefaultDirection},
+ },
+ },
+ storingColLists: []tree.NameList{
+ {"j"},
+ {},
+ },
+ expectedIndexedColLists: [][]indexedColumn{
+ {
+ {column: "i", direction: tree.Ascending},
+ {column: "j", direction: tree.Ascending},
+ },
+ },
+ expectedStoringColLists: [][]tree.Name{
+ {},
+ },
+ },
+ {
+ // 3:
+ // Test for insert + assignStoring;
+ //
+ // Input indexes:
+ // (i) STORING (k)
+ // (i, j)
+ //
+ // Output indexes:
+ // (i, j) STORING (k)
+ //
+ // The trie is like:
+ // i store (k) i
+ // | -> |
+ // j j store (k)
+ testType: insertAssignStoring,
+ indexedColLists: []tree.IndexElemList{
+ {
+ tree.IndexElem{Column: "i", Direction: tree.DefaultDirection},
+ },
+ {
+ tree.IndexElem{Column: "i", Direction: tree.DefaultDirection},
+ tree.IndexElem{Column: "j", Direction: tree.DefaultDirection},
+ },
+ },
+ storingColLists: []tree.NameList{
+ {"k"},
+ {},
+ },
+ expectedIndexedColLists: [][]indexedColumn{
+ {
+ {column: "i", direction: tree.Ascending},
+ {column: "j", direction: tree.Ascending},
+ },
+ },
+ expectedStoringColLists: [][]tree.Name{
+ {"k"},
+ },
+ },
+ {
+ // 4:
+ // Test for insert + assignStoring;
+ //
+ // Input indexes:
+ // (i DESC, j, k)
+ // (i DESC, k)
+ // (i DESC) STORING (j)
+ //
+ // Output indexes:
+ // (i DESC, j, k)
+ // (i DESC, k) STORING (j)
+ //
+ // The trie is like:
+ // i DESC store (j) i DESC
+ // / \ / \
+ // j k -> j k store (j)
+ // | |
+ // k k
+ testType: insertAssignStoring,
+ indexedColLists: []tree.IndexElemList{
+ {
+ tree.IndexElem{Column: "i", Direction: tree.Descending},
+ tree.IndexElem{Column: "j", Direction: tree.DefaultDirection},
+ tree.IndexElem{Column: "k", Direction: tree.DefaultDirection},
+ },
+ {
+ tree.IndexElem{Column: "i", Direction: tree.Descending},
+ tree.IndexElem{Column: "k", Direction: tree.DefaultDirection},
+ },
+ {
+ tree.IndexElem{Column: "i", Direction: tree.Descending},
+ },
+ },
+ storingColLists: []tree.NameList{
+ {},
+ {},
+ {"j"},
+ },
+ expectedIndexedColLists: [][]indexedColumn{
+ {
+ {column: "i", direction: tree.Descending},
+ {column: "j", direction: tree.Ascending},
+ {column: "k", direction: tree.Ascending},
+ },
+ {
+ {column: "i", direction: tree.Descending},
+ {column: "k", direction: tree.Ascending},
+ },
+ },
+ expectedStoringColLists: [][]tree.Name{
+ {},
+ {"j"},
+ },
+ },
+ {
+ // 5:
+ // Test for insert + removeStoring + assignStoring;
+ //
+ // Input indexes:
+ // (i, j, k)
+ // (i, k)
+ // (i) STORING (j)
+ //
+ // Output indexes:
+ // (i, j, k)
+ // (i, k)
+ //
+ // The trie is like:
+ // i store (j) i
+ // / \ / \
+ // j k -> j k
+ // | |
+ // k k
+ indexedColLists: []tree.IndexElemList{
+ {
+ tree.IndexElem{Column: "i", Direction: tree.DefaultDirection},
+ tree.IndexElem{Column: "j", Direction: tree.DefaultDirection},
+ tree.IndexElem{Column: "k", Direction: tree.DefaultDirection},
+ },
+ {
+ tree.IndexElem{Column: "i", Direction: tree.DefaultDirection},
+ tree.IndexElem{Column: "k", Direction: tree.DefaultDirection},
+ },
+ {
+ tree.IndexElem{Column: "i", Direction: tree.DefaultDirection},
+ },
+ },
+ storingColLists: []tree.NameList{
+ {},
+ {},
+ {"j"},
+ },
+ expectedIndexedColLists: [][]indexedColumn{
+ {
+ {column: "i", direction: tree.Ascending},
+ {column: "j", direction: tree.Ascending},
+ {column: "k", direction: tree.Ascending},
+ },
+ {
+ {column: "i", direction: tree.Ascending},
+ {column: "k", direction: tree.Ascending},
+ },
+ },
+ expectedStoringColLists: [][]tree.Name{
+ {},
+ {},
+ },
+ },
+ }
+
+ indexOutputFunc := func(testIdx int, indexIdx int) string {
+ res := "("
+ for i, indexedCol := range testCases[testIdx].expectedIndexedColLists[indexIdx] {
+ if i > 0 {
+ res += ", "
+ }
+ res += string(indexedCol.column)
+ if indexedCol.direction == tree.Descending {
+ res += " DESC"
+ }
+ }
+ res += ")"
+ for i, storingCol := range testCases[testIdx].expectedStoringColLists[indexIdx] {
+ if i == 0 {
+ res += " STORING ("
+ } else {
+ res += ", "
+ }
+ res += string(storingCol)
+ }
+ if len(testCases[testIdx].expectedStoringColLists[indexIdx]) > 0 {
+ res += ")"
+ }
+ return res
+ }
+
+ for idx, testCase := range testCases {
+ trie := NewTrie()
+ for i, indexedCols := range testCase.indexedColLists {
+ trie.Insert(indexedCols, testCase.storingColLists[i])
+ }
+
+ if testCase.testType == insertRemoveStoring || testCase.testType == insertRemoveAndAssignStoring {
+ trie.RemoveStorings()
+ }
+
+ if testCase.testType == insertAssignStoring || testCase.testType == insertRemoveAndAssignStoring {
+ trie.AssignStoring()
+ }
+
+ retIndexedColLists, retStoringColLists := collectAllLeavesForTable(trie)
+ if len(retIndexedColLists) == len(testCase.expectedIndexedColLists) && len(retStoringColLists) == len(testCase.expectedStoringColLists) {
+ expectedIndexHit := make([]bool, len(testCase.expectedIndexedColLists))
+ for i, retIndexedColList := range retIndexedColLists {
+ // Find one matched indexes.
+ for j, expectedIndexedColList := range testCase.expectedIndexedColLists {
+ if !expectedIndexHit[j] && len(retIndexedColList) == len(expectedIndexedColList) && len(retStoringColLists[i]) == len(testCase.expectedStoringColLists[j]) {
+ var same = true
+ // Compare the indexedCol.
+ for k, retIndexedCol := range retIndexedColList {
+ if retIndexedCol != expectedIndexedColList[k] {
+ same = false
+ break
+ }
+ }
+ // Compare the storingCol.
+ if same {
+ for k, retStoringCol := range retStoringColLists[i] {
+ if retStoringCol != testCase.expectedStoringColLists[j][k] {
+ same = false
+ break
+ }
+ }
+ }
+ if same {
+ expectedIndexHit[j] = true
+ }
+ }
+ }
+ }
+
+ // Check whether all the indexes are covered.
+ for i, hit := range expectedIndexHit {
+ if !hit {
+ t.Errorf("failed test case %d since the expected index %s is missing!", idx, indexOutputFunc(idx, i))
+ }
+ }
+ } else {
+ t.Errorf("failed test case %d since the length of returned indexes is not the same as the expected length", idx)
+ }
+ }
+}
diff --git a/pkg/sql/opt/workloadindexrec/workload_indexrecs.go b/pkg/sql/opt/workloadindexrec/workload_indexrecs.go
new file mode 100644
index 000000000000..82d3e6640624
--- /dev/null
+++ b/pkg/sql/opt/workloadindexrec/workload_indexrecs.go
@@ -0,0 +1,197 @@
+// Copyright 2023 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package workloadindexrec
+
+import (
+ "context"
+ "regexp"
+
+ "github.com/cockroachdb/cockroach/pkg/sql/parser"
+ "github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
+ "github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
+ "github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
+ "github.com/cockroachdb/errors"
+)
+
+// FindWorkloadRecs finds index recommendations for the whole workload after the
+// timestamp ts within the space budget represented by budgetBytes.
+func FindWorkloadRecs(
+ ctx context.Context, evalCtx *eval.Context, ts *tree.DTimestampTZ,
+) ([]string, error) {
+ cis, dis, err := collectIndexRecs(ctx, evalCtx, ts)
+ if err != nil {
+ return nil, err
+ }
+
+ trieMap := buildTrieForIndexRecs(cis)
+ newCis, err := extractIndexCovering(trieMap)
+ if err != nil {
+ return nil, err
+ }
+
+ var res = make([]string, len(newCis))
+
+ for i, ci := range newCis {
+ res[i] = ci.String() + ";"
+ }
+
+ // Since we collect all the indexes represented by the leaf nodes, all the
+ // indexes with "DROP INDEX" has been covered, so we can directly drop all of
+ // them without duplicates.
+ var disMap = make(map[tree.TableIndexName]bool)
+ for _, di := range dis {
+ for _, index := range di.IndexList {
+ disMap[*index] = true
+ }
+ }
+
+ for index := range disMap {
+ dropCmd := tree.DropIndex{
+ IndexList: []*tree.TableIndexName{&index},
+ }
+ res = append(res, dropCmd.String()+";")
+ }
+
+ return res, nil
+}
+
+// collectIndexRecs collects all the index recommendations stored in the
+// system.statement_statistics with the time later than ts.
+func collectIndexRecs(
+ ctx context.Context, evalCtx *eval.Context, ts *tree.DTimestampTZ,
+) ([]tree.CreateIndex, []tree.DropIndex, error) {
+ query := `SELECT index_recommendations FROM system.statement_statistics
+ WHERE (statistics -> 'statistics' ->> 'lastExecAt')::TIMESTAMPTZ > $1
+ AND array_length(index_recommendations, 1) > 0;`
+ indexRecs, err := evalCtx.Planner.QueryIteratorEx(ctx, "get-candidates-for-workload-indexrecs",
+ sessiondata.NoSessionDataOverride, query, ts.Time)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var p parser.Parser
+ var cis []tree.CreateIndex
+ var dis []tree.DropIndex
+ var ok bool
+
+ // The index recommendation starts with "creation", "replacement" or
+ // "alteration".
+ var r = regexp.MustCompile(`\s*(creation|replacement|alteration)\s*:\s*(.*)`)
+
+ for ok, err = indexRecs.Next(ctx); ; ok, err = indexRecs.Next(ctx) {
+ if err != nil {
+ err = errors.CombineErrors(err, indexRecs.Close())
+ indexRecs = nil
+ return cis, dis, err
+ }
+
+ if !ok {
+ break
+ }
+
+ indexes := tree.MustBeDArray(indexRecs.Cur()[0])
+ for _, index := range indexes.Array {
+ indexStr, ok := index.(*tree.DString)
+ if !ok {
+ err = errors.CombineErrors(errors.Newf("%s is not a string!", index.String()), indexRecs.Close())
+ indexRecs = nil
+ return cis, dis, err
+ }
+
+ indexStrArr := r.FindStringSubmatch(string(*indexStr))
+ if indexStrArr == nil {
+ err = errors.CombineErrors(errors.Newf("%s is not a valid index recommendation!", string(*indexStr)), indexRecs.Close())
+ indexRecs = nil
+ return cis, dis, err
+ }
+
+ // Since Alter index recommendation only makes invisible indexes visible,
+ // so we skip it for now.
+ if indexStrArr[1] == "alteration" {
+ continue
+ }
+
+ stmts, err := p.Parse(indexStrArr[2])
+ if err != nil {
+ err = errors.CombineErrors(errors.Newf("%s is not a valid index operation!", indexStrArr[2]), indexRecs.Close())
+ indexRecs = nil
+ return cis, dis, err
+ }
+
+ for _, stmt := range stmts {
+ switch stmt := stmt.AST.(type) {
+ case *tree.CreateIndex:
+ // Ignore all the inverted, partial and sharded indexes right now.
+ if !stmt.Inverted && stmt.Predicate == nil && stmt.Sharded == nil {
+ cis = append(cis, *stmt)
+ }
+ case *tree.DropIndex:
+ dis = append(dis, *stmt)
+ }
+ }
+ }
+ }
+
+ return cis, dis, nil
+}
+
+// buildTrieForIndexRecs builds the relation among all the indexRecs by a trie tree.
+func buildTrieForIndexRecs(cis []tree.CreateIndex) map[tree.TableName]*indexTrie {
+ trieMap := make(map[tree.TableName]*indexTrie)
+ for _, ci := range cis {
+ if _, ok := trieMap[ci.Table]; !ok {
+ trieMap[ci.Table] = NewTrie()
+ }
+
+ trieMap[ci.Table].Insert(ci.Columns, ci.Storing)
+ }
+ return trieMap
+}
+
+// extractIndexCovering pushes down the storing part of the internal nodes: find
+// whether it is covered by some leaf nodes. If yes, discard it; Otherwise,
+// assign it to the shallowest leaf node. Then extractIndexCovering collects all
+// the indexes represented by the leaf node.
+func extractIndexCovering(tm map[tree.TableName]*indexTrie) ([]tree.CreateIndex, error) {
+ for _, t := range tm {
+ t.RemoveStorings()
+ }
+ for _, t := range tm {
+ t.AssignStoring()
+ }
+ var cis []tree.CreateIndex
+ for table, trie := range tm {
+ indexedColsArray, storingColsArray := collectAllLeavesForTable(trie)
+ // The length of indexedCols and storingCols must be equal
+ if len(indexedColsArray) != len(storingColsArray) {
+ return nil, errors.Newf("The length of indexedColsArray and storingColsArray after collecting leaves from table %s is not equal!", table)
+ }
+ for i, indexedCols := range indexedColsArray {
+ cisIndexedCols := make([]tree.IndexElem, len(indexedCols))
+ for j, col := range indexedCols {
+ cisIndexedCols[j] = tree.IndexElem{
+ Column: col.column,
+ Direction: col.direction,
+ }
+ // Recover the ASC to Default direction.
+ if col.direction == tree.Ascending {
+ cisIndexedCols[j].Direction = tree.DefaultDirection
+ }
+ }
+ cis = append(cis, tree.CreateIndex{
+ Table: table,
+ Columns: cisIndexedCols,
+ Storing: storingColsArray[i],
+ })
+ }
+ }
+ return cis, nil
+}
diff --git a/pkg/sql/sem/builtins/BUILD.bazel b/pkg/sql/sem/builtins/BUILD.bazel
index 5292399d19b0..bcf3fdeed0b4 100644
--- a/pkg/sql/sem/builtins/BUILD.bazel
+++ b/pkg/sql/sem/builtins/BUILD.bazel
@@ -67,6 +67,7 @@ go_library(
"//pkg/sql/lexbase",
"//pkg/sql/memsize",
"//pkg/sql/oidext",
+ "//pkg/sql/opt/workloadindexrec",
"//pkg/sql/parser",
"//pkg/sql/pgwire/pgcode",
"//pkg/sql/pgwire/pgerror",
diff --git a/pkg/sql/sem/builtins/fixed_oids.go b/pkg/sql/sem/builtins/fixed_oids.go
index 47d98a8c2a7d..b474a3950e48 100644
--- a/pkg/sql/sem/builtins/fixed_oids.go
+++ b/pkg/sql/sem/builtins/fixed_oids.go
@@ -2434,8 +2434,6 @@ var builtinOidsArray = []string{
2461: `crdb_internal.plpgsql_raise(severity: string, message: string, detail: string, hint: string, code: string) -> int`,
2462: `workload_index_recs() -> string`,
2463: `workload_index_recs(timestamptz: timestamptz) -> string`,
- 2464: `workload_index_recs(budget: string) -> string`,
- 2465: `workload_index_recs(timestamptz: timestamptz, budget: string) -> string`,
2466: `crdb_internal.setup_span_configs_stream(tenant_name: string) -> bytes`,
2467: `crdb_internal.request_statement_bundle(stmtFingerprint: string, planGist: string, samplingProbability: float, minExecutionLatency: interval, expiresAfter: interval) -> bool`,
2468: `crdb_internal.request_statement_bundle(stmtFingerprint: string, planGist: string, antiPlanGist: bool, samplingProbability: float, minExecutionLatency: interval, expiresAfter: interval) -> bool`,
diff --git a/pkg/sql/sem/builtins/generator_builtins.go b/pkg/sql/sem/builtins/generator_builtins.go
index 733f641579fa..f6cd0b833474 100644
--- a/pkg/sql/sem/builtins/generator_builtins.go
+++ b/pkg/sql/sem/builtins/generator_builtins.go
@@ -24,6 +24,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/lexbase"
+ "github.com/cockroachdb/cockroach/pkg/sql/opt/workloadindexrec"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/protoreflect"
@@ -284,31 +285,14 @@ var generators = map[string]builtinDefinition{
makeGeneratorOverload(
tree.ParamTypes{},
types.String,
- makeWorkloadIndexRecsGeneratorFactory(false /* hasTimstamp */, false /* hasBudget */),
+ makeWorkloadIndexRecsGeneratorFactory(false /* hasTimestamp */),
"Returns set of index recommendations",
volatility.Immutable,
),
makeGeneratorOverload(
tree.ParamTypes{{Name: "timestamptz", Typ: types.TimestampTZ}},
types.String,
- makeWorkloadIndexRecsGeneratorFactory(true /* hasTimstamp */, false /* hasBudget */),
- "Returns set of index recommendations",
- volatility.Immutable,
- ),
- makeGeneratorOverload(
- tree.ParamTypes{{Name: "budget", Typ: types.String}},
- types.String,
- makeWorkloadIndexRecsGeneratorFactory(false /* hasTimstamp */, true /* hasBudget */),
- "Returns set of index recommendations",
- volatility.Immutable,
- ),
- makeGeneratorOverload(
- tree.ParamTypes{
- {Name: "timestamptz", Typ: types.TimestampTZ},
- {Name: "budget", Typ: types.String},
- },
- types.String,
- makeWorkloadIndexRecsGeneratorFactory(true /* hasTimstamp */, true /* hasBudget */),
+ makeWorkloadIndexRecsGeneratorFactory(true /* hasTimestamp */),
"Returns set of index recommendations",
volatility.Immutable,
),
@@ -1161,19 +1145,28 @@ func (s *multipleArrayValueGenerator) Values() (tree.Datums, error) {
}
// makeWorkloadIndexRecsGeneratorFactory uses the arrayValueGenerator to return
-// all the index recommendations as an array of strings. When the hasTimestamp
-// is true, it means that we only care about the index after some timestamp. The
-// hasBudget represents that there is a space limit if it is true.
-func makeWorkloadIndexRecsGeneratorFactory(
- hasTimestamp bool, hasBudget bool,
-) eval.GeneratorOverload {
- return func(_ context.Context, _ *eval.Context, _ tree.Datums) (eval.ValueGenerator, error) {
- // Invoke the workloadindexrec.FindWorkloadRecs() to get indexRecs, err once
- // it is implemented. The string array {"1", "2", "3"} is just dummy data
- indexRecs := []string{"1", "2", "3"}
+// all the index recommendations as an array of strings. The hasTimestamp
+// represents whether there is a timestamp filter.
+func makeWorkloadIndexRecsGeneratorFactory(hasTimestamp bool) eval.GeneratorOverload {
+ return func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (eval.ValueGenerator, error) {
+ var ts tree.DTimestampTZ
+ var err error
+
+ if hasTimestamp {
+ ts = tree.MustBeDTimestampTZ(args[0])
+ } else {
+ ts = tree.DTimestampTZ{Time: tree.MinSupportedTime}
+ }
+
+ var indexRecs []string
+ indexRecs, err = workloadindexrec.FindWorkloadRecs(ctx, evalCtx, &ts)
+ if err != nil {
+ return &arrayValueGenerator{}, err
+ }
+
arr := tree.NewDArray(types.String)
for _, indexRec := range indexRecs {
- if err := arr.Append(tree.NewDString(indexRec)); err != nil {
+ if err = arr.Append(tree.NewDString(indexRec)); err != nil {
return nil, err
}
}