Skip to content

Commit

Permalink
statusccl: stop serving /_status/nodes to tenants
Browse files Browse the repository at this point in the history
Fixes #98057.

This reverts the work of #93268, which is no longer necessary now that
we are eagerly capturing region information at execution time in #95449.

Release note: None
  • Loading branch information
matthewtodd committed Mar 14, 2023
1 parent ca5ae38 commit ebf9ac7
Show file tree
Hide file tree
Showing 4 changed files with 1 addition and 105 deletions.
51 changes: 1 addition & 50 deletions pkg/ccl/serverccl/statusccl/tenant_status_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,21 +66,7 @@ func TestTenantStatusAPI(t *testing.T) {
StoreDisableCoalesceAdjacent: true,
}

tenantLocalities := []roachpb.Locality{
{Tiers: []roachpb.Tier{{Key: "region", Value: "gcp-us-west1"}}},
{Tiers: []roachpb.Tier{{Key: "region", Value: "gcp-us-central1"}}},
{Tiers: []roachpb.Tier{{Key: "region", Value: "gcp-us-east1"}}},
}

testHelper := serverccl.NewTestTenantHelperWithTenantArgs(
t,
3, /* tenantClusterSize */
knobs,
func(tenantIdx int, tenantArgs *base.TestTenantArgs) {
tenantArgs.Locality = tenantLocalities[tenantIdx]
},
)

testHelper := serverccl.NewTestTenantHelper(t, 3 /* tenantClusterSize */, knobs)
defer testHelper.Cleanup(ctx, t)

// Speed up propagation of tenant capability changes.
Expand Down Expand Up @@ -141,10 +127,6 @@ func TestTenantStatusAPI(t *testing.T) {
testTenantLogs(ctx, t, testHelper)
})

t.Run("tenant_nodes", func(t *testing.T) {
testTenantNodes(ctx, t, testHelper, tenantLocalities)
})

t.Run("tenant_hot_ranges", func(t *testing.T) {
testTenantHotRanges(ctx, t, testHelper)
})
Expand Down Expand Up @@ -1356,37 +1338,6 @@ func testTenantAuthOnStatements(
require.NoError(t, err)
}

func testTenantNodes(
ctx context.Context,
t *testing.T,
helper serverccl.TenantTestHelper,
tenantLocalities []roachpb.Locality,
) {
// TODO(todd): Get this test working over HTTP, #93267.
//
// When I first tried, I ran into errors about the SQL user
// `authentic_user_noadmin` already existing, but at first glance it seemed
// that the sync.Once mechanism used in its setup had already accounted
// for that scenario; that we're properly passing by reference all the way
// through; and that these tests aren't being run in parallel.
tenantA := helper.TestCluster().TenantStatusSrv(0)
resp, err := tenantA.Nodes(ctx, &serverpb.NodesRequest{})
require.NoError(t, err)

expected := map[roachpb.NodeID]roachpb.Locality{
1: tenantLocalities[0],
2: tenantLocalities[1],
3: tenantLocalities[2],
}

actual := make(map[roachpb.NodeID]roachpb.Locality)
for _, node := range resp.Nodes {
actual[node.Desc.NodeID] = node.Desc.Locality
}

require.Equal(t, expected, actual)
}

// assertStartKeyInRange compares the pretty printed startKey with the provided
// tenantPrefix key, ensuring that the startKey starts with the tenantPrefix.
func assertStartKeyInRange(t *testing.T, startKey string, tenantPrefix roachpb.Key) {
Expand Down
16 changes: 0 additions & 16 deletions pkg/ccl/serverccl/tenant_test_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,18 +145,6 @@ func NewTestTenantHelper(
) TenantTestHelper {
t.Helper()

return NewTestTenantHelperWithTenantArgs(t, tenantClusterSize, knobs, func(int, *base.TestTenantArgs) {})
}

// NewTestTenantHelperWithTenantArgs constructs a TenantTestHelper instance,
// offering the caller the opportunity to modify the arguments passed when
// starting each tenant.
func NewTestTenantHelperWithTenantArgs(
t *testing.T,
tenantClusterSize int,
knobs base.TestingKnobs,
customizeTenantArgs func(tenantIdx int, tenantArgs *base.TestTenantArgs),
) TenantTestHelper {
t.Helper()

params, _ := tests.CreateTestServerParams()
Expand All @@ -176,7 +164,6 @@ func NewTestTenantHelperWithTenantArgs(
tenantClusterSize,
security.EmbeddedTenantIDs()[0],
knobs,
customizeTenantArgs,
),
// Spin up a small tenant cluster under a different tenant ID to test
// tenant isolation.
Expand All @@ -186,7 +173,6 @@ func NewTestTenantHelperWithTenantArgs(
1, /* tenantClusterSize */
security.EmbeddedTenantIDs()[1],
knobs,
func(int, *base.TestTenantArgs) {},
),
}
}
Expand Down Expand Up @@ -234,15 +220,13 @@ func newTenantClusterHelper(
tenantClusterSize int,
tenantID uint64,
knobs base.TestingKnobs,
customizeTenantArgs func(tenantIdx int, tenantArgs *base.TestTenantArgs),
) TenantClusterHelper {
t.Helper()

var cluster tenantCluster = make([]TestTenant, tenantClusterSize)
for i := 0; i < tenantClusterSize; i++ {
args := tests.CreateTestTenantParams(roachpb.MustMakeTenantID(tenantID))
args.TestingKnobs = knobs
customizeTenantArgs(i, &args)
cluster[i] = newTestTenant(t, server, args)
}

Expand Down
1 change: 0 additions & 1 deletion pkg/server/serverpb/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ type SQLStatusServer interface {
UserSQLRoles(context.Context, *UserSQLRolesRequest) (*UserSQLRolesResponse, error)
TxnIDResolution(context.Context, *TxnIDResolutionRequest) (*TxnIDResolutionResponse, error)
TransactionContentionEvents(context.Context, *TransactionContentionEventsRequest) (*TransactionContentionEventsResponse, error)
Nodes(context.Context, *NodesRequest) (*NodesResponse, error)
NodesList(context.Context, *NodesListRequest) (*NodesListResponse, error)
ListExecutionInsights(context.Context, *ListExecutionInsightsRequest) (*ListExecutionInsightsResponse, error)
LogFilesList(context.Context, *LogFilesListRequest) (*LogFilesListResponse, error)
Expand Down
38 changes: 0 additions & 38 deletions pkg/server/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -1557,44 +1557,6 @@ func (s *statusServer) NodesList(
return s.serverIterator.nodesList(ctx)
}

// Nodes returns a limited subset of the full NodesResponse.
//
// Its existence is a stop-gap measure to support running much of our UI code
// on multiregion tenant clusters. Longer-term, we will need to reconsider
// the overall architecture (e.g. store node ids in sqlstats, map node id ->
// region at UI display time) because sql instances are far more ephemeral
// than cluster nodes, and this Nodes endpoint is only able to answer for
// currently-live sql instances. What I think we want is to store locality
// information directly in the sqlstats tables, sourced from some new tracing
// in the distsql flow.
func (s *statusServer) Nodes(
ctx context.Context, req *serverpb.NodesRequest,
) (*serverpb.NodesResponse, error) {
ctx = forwardSQLIdentityThroughRPCCalls(ctx)
ctx = s.AnnotateCtx(ctx)

if err := s.privilegeChecker.requireViewActivityOrViewActivityRedactedPermission(ctx); err != nil {
// NB: not using serverError() here since the priv checker
// already returns a proper gRPC error status.
return nil, err
}

instances, err := s.sqlServer.sqlInstanceReader.GetAllInstances(ctx)
if err != nil {
return nil, err
}
var resp serverpb.NodesResponse
for _, instance := range instances {
resp.Nodes = append(resp.Nodes, statuspb.NodeStatus{
Desc: roachpb.NodeDescriptor{
NodeID: roachpb.NodeID(instance.InstanceID),
Locality: instance.Locality,
},
})
}
return &resp, err
}

// Nodes returns all node statuses.
//
// Do not use this method inside the server code! Use
Expand Down

0 comments on commit ebf9ac7

Please sign in to comment.