Skip to content

Commit

Permalink
sql/physicalplan: make more tests compatible with secondary tenants
Browse files Browse the repository at this point in the history
Release note: None
  • Loading branch information
knz committed Aug 15, 2023
1 parent 40e7758 commit 371c1c5
Show file tree
Hide file tree
Showing 4 changed files with 74 additions and 57 deletions.
1 change: 0 additions & 1 deletion pkg/sql/physicalplan/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ go_test(
shard_count = 16,
deps = [
"//pkg/base",
"//pkg/gossip",
"//pkg/keys",
"//pkg/kv",
"//pkg/kv/kvclient/kvcoord",
Expand Down
35 changes: 19 additions & 16 deletions pkg/sql/physicalplan/aggregator_funcs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ import (

"github.com/cockroachdb/apd/v3"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
Expand Down Expand Up @@ -147,17 +146,17 @@ func checkDistAggregationInfo(
Spans: make([]roachpb.Span, 1),
}
if err := rowenc.InitIndexFetchSpec(
&tr.FetchSpec, keys.SystemSQLCodec, tableDesc, tableDesc.GetPrimaryIndex(), columnIDs,
&tr.FetchSpec, srv.Codec(), tableDesc, tableDesc.GetPrimaryIndex(), columnIDs,
); err != nil {
t.Fatal(err)
}

var err error
tr.Spans[0].Key, err = randgen.TestingMakePrimaryIndexKey(tableDesc, startPK)
tr.Spans[0].Key, err = randgen.TestingMakePrimaryIndexKeyForTenant(tableDesc, srv.Codec(), startPK)
if err != nil {
t.Fatal(err)
}
tr.Spans[0].EndKey, err = randgen.TestingMakePrimaryIndexKey(tableDesc, endPK)
tr.Spans[0].EndKey, err = randgen.TestingMakePrimaryIndexKeyForTenant(tableDesc, srv.Codec(), endPK)
if err != nil {
t.Fatal(err)
}
Expand Down Expand Up @@ -449,8 +448,9 @@ func TestSingleArgumentDistAggregateFunctions(t *testing.T) {
defer log.Scope(t).Close(t)
const numRows = 100

tc := serverutils.StartCluster(t, 1, base.TestClusterArgs{})
defer tc.Stopper().Stop(context.Background())
srv, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{})
defer srv.Stopper().Stop(context.Background())
ts := srv.ApplicationLayer()

// Create a table with a few columns:
// - k - primary key with values from 0 to number of rows
Expand All @@ -466,7 +466,7 @@ func TestSingleArgumentDistAggregateFunctions(t *testing.T) {
// - random ten bytes
rng, _ := randutil.NewTestRand()
sqlutils.CreateTable(
t, tc.ServerConn(0), "t",
t, db, "t",
"k INT PRIMARY KEY, int1 INT, int2 INT, int3 INT, bool1 BOOL, bool2 BOOL, dec1 DECIMAL, dec2 DECIMAL, float1 FLOAT, float2 FLOAT, b BYTES",
numRows,
func(row int) []tree.Datum {
Expand All @@ -489,8 +489,7 @@ func TestSingleArgumentDistAggregateFunctions(t *testing.T) {
},
)

kvDB := tc.Server(0).DB()
desc := desctestutils.TestingGetPublicTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t")
desc := desctestutils.TestingGetPublicTableDescriptor(kvDB, ts.Codec(), "test", "t")

for fn, info := range physicalplan.DistAggregationTable {
if fn == execinfrapb.AnyNotNull {
Expand Down Expand Up @@ -528,7 +527,8 @@ func TestSingleArgumentDistAggregateFunctions(t *testing.T) {
name := fmt.Sprintf("%s/%s/%d", fn, col.GetName(), numRows)
t.Run(name, func(t *testing.T) {
checkDistAggregationInfo(
context.Background(), t, tc.Server(0), desc, []int{col.Ordinal()},
// TODO(#76378): pass ts, not srv, here.
context.Background(), t, srv, desc, []int{col.Ordinal()},
numRows, fn, info,
)
})
Expand All @@ -550,8 +550,11 @@ func TestTwoArgumentRegressionAggregateFunctions(t *testing.T) {
defer log.Scope(t).Close(t)
const numRows = 100

tc := serverutils.StartCluster(t, 1, base.TestClusterArgs{})
defer tc.Stopper().Stop(context.Background())
srv, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{
DefaultTestTenant: base.TestIsForStuffThatShouldWorkWithSecondaryTenantsButDoesntYet(108763),
})
defer srv.Stopper().Stop(context.Background())
ts := srv.ApplicationLayer()

// Create a table with a few columns:
// - k - primary key with values from 0 to number of rows
Expand All @@ -563,7 +566,7 @@ func TestTwoArgumentRegressionAggregateFunctions(t *testing.T) {
// - random decimals (with some NULLs)
rng, _ := randutil.NewTestRand()
sqlutils.CreateTable(
t, tc.ServerConn(0), "t",
t, db, "t",
"k INT PRIMARY KEY, int1 INT, dec1 DECIMAL, float1 FLOAT, int2 INT, dec2 DECIMAL, float2 FLOAT",
numRows,
func(row int) []tree.Datum {
Expand All @@ -579,8 +582,7 @@ func TestTwoArgumentRegressionAggregateFunctions(t *testing.T) {
},
)

kvDB := tc.Server(0).DB()
desc := desctestutils.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "public", "t")
desc := desctestutils.TestingGetTableDescriptor(kvDB, ts.Codec(), "test", "public", "t")

for fn, info := range physicalplan.DistAggregationTable {
if !isTwoArgumentFunction(fn) {
Expand All @@ -598,7 +600,8 @@ func TestTwoArgumentRegressionAggregateFunctions(t *testing.T) {
name := fmt.Sprintf("%s/%s-%s/%d", fn, cols[i].GetName(), cols[j].GetName(), numRows)
t.Run(name, func(t *testing.T) {
checkDistAggregationInfo(
context.Background(), t, tc.Server(0), desc, []int{i, j}, numRows,
// TODO(#76378): pass ts, not srv, here.
context.Background(), t, srv, desc, []int{i, j}, numRows,
fn, info,
)
})
Expand Down
6 changes: 5 additions & 1 deletion pkg/sql/physicalplan/fake_span_resolver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,11 @@ func TestFakeSpanResolver(t *testing.T) {
defer log.Scope(t).Close(t)
ctx := context.Background()

tc := serverutils.StartCluster(t, 3, base.TestClusterArgs{})
tc := serverutils.StartCluster(t, 3, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
DefaultTestTenant: base.TestIsForStuffThatShouldWorkWithSecondaryTenantsButDoesntYet(108763),
},
})
defer tc.Stopper().Stop(ctx)

sqlutils.CreateTable(
Expand Down
89 changes: 50 additions & 39 deletions pkg/sql/physicalplan/span_resolver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@ import (
"testing"

"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
Expand All @@ -45,13 +43,17 @@ func TestSpanResolverUsesCaches(t *testing.T) {
base.TestClusterArgs{
ReplicationMode: base.ReplicationManual,
ServerArgs: base.TestServerArgs{
UseDatabase: "t",
DefaultTestTenant: base.TestIsForStuffThatShouldWorkWithSecondaryTenantsButDoesntYet(108763),
UseDatabase: "t",
},
})
defer tc.Stopper().Stop(context.Background())

rowRanges, _ := setupRanges(
tc.Conns[0], tc.Servers[0], tc.Servers[0].DB(), t)
tc.ServerConn(0),
tc.Server(0).ApplicationLayer(),
tc.Server(0).StorageLayer(),
t)

// Replicate the row ranges on all of the first 3 nodes. Save the 4th node in
// a pristine state, with empty caches.
Expand Down Expand Up @@ -83,13 +85,13 @@ func TestSpanResolverUsesCaches(t *testing.T) {
}

// Create a SpanResolver using the 4th node, with empty caches.
s3 := tc.Servers[3]
s3 := tc.Server(3).ApplicationLayer()

lr := physicalplan.NewSpanResolver(
s3.ClusterSettings(),
s3.DistSenderI().(*kvcoord.DistSender),
s3.GossipI().(*gossip.Gossip),
s3.NodeID(),
s3.NodeDescStoreI().(kvcoord.NodeDescStore),
s3.DistSQLPlanningNodeID(),
s3.Locality(),
s3.Clock(),
nil, // rpcCtx
Expand Down Expand Up @@ -169,18 +171,21 @@ func populateCache(db *gosql.DB, expectedNumRows int) error {
// `CREATE TABLE test (k INT PRIMARY KEY)` at row with value pk (the row will be
// the first on the right of the split).
func splitRangeAtVal(
ts serverutils.TestServerInterface, tableDesc catalog.TableDescriptor, pk int,
s serverutils.ApplicationLayerInterface,
stg serverutils.StorageLayerInterface,
tableDesc catalog.TableDescriptor,
pk int,
) (roachpb.RangeDescriptor, roachpb.RangeDescriptor, error) {
if len(tableDesc.PublicNonPrimaryIndexes()) != 0 {
return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{},
errors.AssertionFailedf("expected table with just a PK, got: %+v", tableDesc)
}
pik, err := randgen.TestingMakePrimaryIndexKey(tableDesc, pk)
pik, err := randgen.TestingMakePrimaryIndexKeyForTenant(tableDesc, s.Codec(), pk)
if err != nil {
return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{}, err
}

leftRange, rightRange, err := ts.SplitRange(pik)
leftRange, rightRange, err := stg.SplitRange(pik)
if err != nil {
return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{},
errors.Wrapf(err, "failed to split at row: %d", pk)
Expand All @@ -191,17 +196,18 @@ func splitRangeAtVal(
func TestSpanResolver(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
s, db, cdb := serverutils.StartServer(t, base.TestServerArgs{
ts, db, _ := serverutils.StartServer(t, base.TestServerArgs{
UseDatabase: "t",
})
defer s.Stopper().Stop(context.Background())
defer ts.Stopper().Stop(context.Background())
s := ts.ApplicationLayer()

rowRanges, tableDesc := setupRanges(db, s, cdb, t)
rowRanges, tableDesc := setupRanges(db, s, ts.StorageLayer(), t)
lr := physicalplan.NewSpanResolver(
s.ClusterSettings(),
s.DistSenderI().(*kvcoord.DistSender),
s.GossipI().(*gossip.Gossip),
s.NodeID(),
s.NodeDescStoreI().(kvcoord.NodeDescStore),
s.DistSQLPlanningNodeID(),
s.Locality(),
s.Clock(),
nil, // rpcCtx
Expand All @@ -215,17 +221,17 @@ func TestSpanResolver(t *testing.T) {
expected [][]rngInfo
}{
{
[]roachpb.Span{makeSpan(tableDesc, 0, 10000)},
[]roachpb.Span{makeSpan(tableDesc, s.Codec(), 0, 10000)},
[][]rngInfo{{
onlyReplica(rowRanges[0]),
onlyReplica(rowRanges[1]),
onlyReplica(rowRanges[2])}},
},
{
[]roachpb.Span{
makeSpan(tableDesc, 0, 9),
makeSpan(tableDesc, 11, 19),
makeSpan(tableDesc, 21, 29),
makeSpan(tableDesc, s.Codec(), 0, 9),
makeSpan(tableDesc, s.Codec(), 11, 19),
makeSpan(tableDesc, s.Codec(), 21, 29),
},
[][]rngInfo{
{onlyReplica(rowRanges[0])},
Expand All @@ -235,8 +241,8 @@ func TestSpanResolver(t *testing.T) {
},
{
[]roachpb.Span{
makeSpan(tableDesc, 0, 20),
makeSpan(tableDesc, 20, 29),
makeSpan(tableDesc, s.Codec(), 0, 20),
makeSpan(tableDesc, s.Codec(), 20, 29),
},
[][]rngInfo{
{onlyReplica(rowRanges[0]), onlyReplica(rowRanges[1])},
Expand All @@ -245,12 +251,12 @@ func TestSpanResolver(t *testing.T) {
},
{
[]roachpb.Span{
makeSpan(tableDesc, 0, 1),
makeSpan(tableDesc, 1, 2),
makeSpan(tableDesc, 2, 3),
makeSpan(tableDesc, 3, 4),
makeSpan(tableDesc, 5, 11),
makeSpan(tableDesc, 20, 29),
makeSpan(tableDesc, s.Codec(), 0, 1),
makeSpan(tableDesc, s.Codec(), 1, 2),
makeSpan(tableDesc, s.Codec(), 2, 3),
makeSpan(tableDesc, s.Codec(), 3, 4),
makeSpan(tableDesc, s.Codec(), 5, 11),
makeSpan(tableDesc, s.Codec(), 20, 29),
},
[][]rngInfo{
{onlyReplica(rowRanges[0])},
Expand Down Expand Up @@ -290,17 +296,19 @@ func TestSpanResolver(t *testing.T) {
func TestMixedDirections(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
s, db, cdb := serverutils.StartServer(t, base.TestServerArgs{
ts, db, _ := serverutils.StartServer(t, base.TestServerArgs{
UseDatabase: "t",
})
defer s.Stopper().Stop(context.Background())
defer ts.Stopper().Stop(context.Background())

rowRanges, tableDesc := setupRanges(db, s, cdb, t)
s := ts.ApplicationLayer()

rowRanges, tableDesc := setupRanges(db, s, ts.StorageLayer(), t)
lr := physicalplan.NewSpanResolver(
s.ClusterSettings(),
s.DistSenderI().(*kvcoord.DistSender),
s.GossipI().(*gossip.Gossip),
s.NodeID(),
s.NodeDescStoreI().(kvcoord.NodeDescStore),
s.DistSQLPlanningNodeID(),
s.Locality(),
s.Clock(),
nil, // rpcCtx
Expand All @@ -310,8 +318,8 @@ func TestMixedDirections(t *testing.T) {
it := lr.NewSpanResolverIterator(nil, nil)

spans := []spanWithDir{
orient(kvcoord.Ascending, makeSpan(tableDesc, 11, 15))[0],
orient(kvcoord.Descending, makeSpan(tableDesc, 1, 14))[0],
orient(kvcoord.Ascending, makeSpan(tableDesc, s.Codec(), 11, 15))[0],
orient(kvcoord.Descending, makeSpan(tableDesc, s.Codec(), 1, 14))[0],
}
replicas, err := resolveSpans(ctx, it, spans...)
if err != nil {
Expand All @@ -327,7 +335,10 @@ func TestMixedDirections(t *testing.T) {
}

func setupRanges(
db *gosql.DB, s serverutils.TestServerInterface, cdb *kv.DB, t *testing.T,
db *gosql.DB,
s serverutils.ApplicationLayerInterface,
stg serverutils.StorageLayerInterface,
t *testing.T,
) ([]roachpb.RangeDescriptor, catalog.TableDescriptor) {
if _, err := db.Exec(`CREATE DATABASE t`); err != nil {
t.Fatal(err)
Expand All @@ -344,13 +355,13 @@ func setupRanges(
}
}

tableDesc := desctestutils.TestingGetPublicTableDescriptor(cdb, keys.SystemSQLCodec, "t", "test")
tableDesc := desctestutils.TestingGetPublicTableDescriptor(s.DB(), s.Codec(), "t", "test")
// Split every SQL row to its own range.
rowRanges := make([]roachpb.RangeDescriptor, len(values))
for i, val := range values {
var err error
var l roachpb.RangeDescriptor
l, rowRanges[i], err = splitRangeAtVal(s, tableDesc, val)
l, rowRanges[i], err = splitRangeAtVal(s, stg, tableDesc, val)
if err != nil {
t.Fatal(err)
}
Expand Down Expand Up @@ -458,9 +469,9 @@ func expectResolved(actual [][]rngInfo, expected ...[]rngInfo) error {
return nil
}

func makeSpan(tableDesc catalog.TableDescriptor, i, j int) roachpb.Span {
func makeSpan(tableDesc catalog.TableDescriptor, codec keys.SQLCodec, i, j int) roachpb.Span {
makeKey := func(val int) roachpb.Key {
key, err := randgen.TestingMakePrimaryIndexKey(tableDesc, val)
key, err := randgen.TestingMakePrimaryIndexKeyForTenant(tableDesc, codec, val)
if err != nil {
panic(err)
}
Expand Down

0 comments on commit 371c1c5

Please sign in to comment.