From 916b3554c8cb7408f42b5fb231202e8783a606db Mon Sep 17 00:00:00 2001 From: Thomas Hardy Date: Tue, 21 Mar 2023 02:23:30 -0400 Subject: [PATCH] ui: fix inflight request replacement, cleanup database details api Changes to allow in-flight request replacement in this PR: https://github.com/cockroachdb/cockroach/pull/98331 caused breaking changes to `KeyedCachedDataReducer`s using the sql api. A `KeyedCachedDataReducer` using the sql api would issue the requests to the same endpoint and despite the added `allowReplacementRequests` flag being `false`, in-flight requests would be replaced (specifically, when fetching for initial state). This would prevent all requests but the last one from being stored in the reducer state and put them in a permanent `inFlight` status. Consequently, our pages would break as the `inFlight` status would prevent us from ever fetching data, putting these pages in a permanent loading state. This PR adds checks to ensure `allowReplacementRequests` is enabled when we receive a response, before deciding whether to omit it from the reducer state. It's important to note that this is still not an ideal solution for `KeyedCachedDataReducer`s, should we ever want to replace inflight requests for one. The checks to replace an in-flight request still occur at the reducer-level, whereas in a `KeyedCachedDataReducer` we'd like them to occur at the state-level (i.e. instead of replacement checks for all requests across the reducer, we should check for requests that impact a specific key's state). This way we scope replacements to each key in the reducer, allowing us to fetch data for each key (which is the intended functionality). Enabling this for a `KeyedCachedDataReducer` without making this change will cause the breaking behaviour above. This PR also includes some cleanup to the database details API, namely: - camel casing response fields - encapsulating the database span stats response into its own object (instead of across multiple objects), this helps scope any errors deriving from the query to the single stats object Release note (bug fix): Fixed replacement of in-flight requests for `KeyedCachedDataReducer`s to prevent permanent loading on requests stuck on an `inFlight` status. https://cockroachlabs.atlassian.net/browse/DOC-1355 #Informs: https://github.com/cockroachdb/cockroach/issues/33316 #Epic: CRDB-8035 --- .../cluster-ui/src/api/databaseDetailsApi.ts | 109 ++++++++---------- .../cluster-ui/src/sql/highlight.tsx | 2 +- .../db-console/src/redux/cachedDataReducer.ts | 19 ++- .../db-console/src/util/api.spec.ts | 23 ++-- .../databases/databaseDetailsPage/redux.ts | 2 +- .../views/databases/databasesPage/redux.ts | 11 +- 6 files changed, 82 insertions(+), 84 deletions(-) diff --git a/pkg/ui/workspaces/cluster-ui/src/api/databaseDetailsApi.ts b/pkg/ui/workspaces/cluster-ui/src/api/databaseDetailsApi.ts index 5c5e1a522a88..c8db0e7e6803 100644 --- a/pkg/ui/workspaces/cluster-ui/src/api/databaseDetailsApi.ts +++ b/pkg/ui/workspaces/cluster-ui/src/api/databaseDetailsApi.ts @@ -33,20 +33,20 @@ type ZoneConfigType = cockroach.config.zonepb.ZoneConfig; type ZoneConfigLevelType = cockroach.server.serverpb.ZoneConfigurationLevel; export type DatabaseDetailsResponse = { - id_resp: SqlApiQueryResponse; - grants_resp: SqlApiQueryResponse; - tables_resp: SqlApiQueryResponse; - zone_config_resp: SqlApiQueryResponse; + idResp: SqlApiQueryResponse; + grantsResp: SqlApiQueryResponse; + tablesResp: SqlApiQueryResponse; + zoneConfigResp: SqlApiQueryResponse; stats?: DatabaseDetailsStats; error?: SqlExecutionErrorMessage; }; function newDatabaseDetailsResponse(): DatabaseDetailsResponse { return { - id_resp: { database_id: "" }, - grants_resp: { grants: [] }, - tables_resp: { tables: [] }, - zone_config_resp: { + idResp: { database_id: "" }, + grantsResp: { grants: [] }, + tablesResp: { tables: [] }, + zoneConfigResp: { zone_config: new ZoneConfig({ inherited_constraints: true, inherited_lease_preferences: true, @@ -54,18 +54,17 @@ function newDatabaseDetailsResponse(): DatabaseDetailsResponse { zone_config_level: ZoneConfigurationLevel.CLUSTER, }, stats: { - ranges_data: { - range_count: 0, + spanStats: { + approximate_disk_bytes: 0, live_bytes: 0, total_bytes: 0, - // Note: we are currently populating this with replica ids which do not map 1 to 1 - node_ids: [], - regions: [], + range_count: 0, }, - pebble_data: { - approximate_disk_bytes: 0, + replicaData: { + replicas: [], + regions: [], }, - index_stats: { num_index_recommendations: 0 }, + indexStats: { num_index_recommendations: 0 }, }, }; } @@ -88,10 +87,10 @@ const getDatabaseId: DatabaseDetailsQuery = { ) => { // Check that txn_result.rows[0].database_id is not null if (!txnResultIsEmpty(txn_result) && txn_result.rows[0].database_id) { - resp.id_resp.database_id = txn_result.rows[0].database_id; + resp.idResp.database_id = txn_result.rows[0].database_id; } if (txn_result.error) { - resp.id_resp.error = txn_result.error; + resp.idResp.error = txn_result.error; } }, }; @@ -109,9 +108,12 @@ type DatabaseGrantsRow = { const getDatabaseGrantsQuery: DatabaseDetailsQuery = { createStmt: dbName => { return { - sql: `SELECT grantee as user, array_agg(privilege_type) as privileges - FROM crdb_internal.cluster_database_privileges + sql: Format( + `SELECT grantee as user, array_agg(privilege_type) as privileges + FROM %1.crdb_internal.cluster_database_privileges WHERE database_name = $1 group by grantee`, + [new Identifier(dbName)], + ), arguments: [dbName], }; }, @@ -120,9 +122,9 @@ const getDatabaseGrantsQuery: DatabaseDetailsQuery = { resp: DatabaseDetailsResponse, ) => { if (!txnResultIsEmpty(txn_result)) { - resp.grants_resp.grants = txn_result.rows; + resp.grantsResp.grants = txn_result.rows; if (txn_result.error) { - resp.grants_resp.error = txn_result.error; + resp.grantsResp.error = txn_result.error; } } }, @@ -155,7 +157,7 @@ const getDatabaseTablesQuery: DatabaseDetailsQuery = { resp: DatabaseDetailsResponse, ) => { if (!txnResultIsEmpty(txn_result)) { - resp.tables_resp.tables = txn_result.rows.map(row => { + resp.tablesResp.tables = txn_result.rows.map(row => { const escTableName = new QualifiedIdentifier([ row.table_schema, row.table_name, @@ -164,7 +166,7 @@ const getDatabaseTablesQuery: DatabaseDetailsQuery = { }); } if (txn_result.error) { - resp.tables_resp.error = txn_result.error; + resp.tablesResp.error = txn_result.error; } }, }; @@ -209,47 +211,32 @@ const getDatabaseZoneConfig: DatabaseDetailsQuery = { // Parse the bytes from the hex string. const zoneConfigBytes = fromHexString(zoneConfigHexString); // Decode the bytes using ZoneConfig protobuf. - resp.zone_config_resp.zone_config = ZoneConfig.decode( + resp.zoneConfigResp.zone_config = ZoneConfig.decode( new Uint8Array(zoneConfigBytes), ); - resp.zone_config_resp.zone_config_level = - ZoneConfigurationLevel.DATABASE; + resp.zoneConfigResp.zone_config_level = ZoneConfigurationLevel.DATABASE; } catch (e) { console.error( `Database Details API - encountered an error decoding zone config string: ${zoneConfigHexString}`, ); // Catch and assign the error if we encounter one decoding. - resp.zone_config_resp.error = e; - resp.zone_config_resp.zone_config_level = - ZoneConfigurationLevel.UNKNOWN; + resp.zoneConfigResp.error = e; + resp.zoneConfigResp.zone_config_level = ZoneConfigurationLevel.UNKNOWN; } } if (txn_result.error) { - resp.id_resp.error = txn_result.error; + resp.idResp.error = txn_result.error; } }, }; // Database Stats type DatabaseDetailsStats = { - pebble_data: DatabasePebbleData; - ranges_data: DatabaseRangesData; - index_stats: DatabaseIndexUsageStatsResponse; + spanStats: SqlApiQueryResponse; + replicaData: SqlApiQueryResponse; + indexStats: SqlApiQueryResponse; }; -type DatabasePebbleData = { - approximate_disk_bytes: number; -}; - -type DatabaseRangesData = SqlApiQueryResponse<{ - range_count: number; - live_bytes: number; - total_bytes: number; - // Note: we are currently populating this with replica ids which do not map 1 to 1 - node_ids: number[]; - regions: string[]; -}>; - type DatabaseSpanStatsRow = { approximate_disk_bytes: number; live_bytes: number; @@ -274,20 +261,19 @@ const getDatabaseSpanStats: DatabaseDetailsQuery = { resp: DatabaseDetailsResponse, ) => { if (txn_result && txn_result.error) { - resp.stats.ranges_data.error = txn_result.error; + resp.stats.spanStats.error = txn_result.error; } if (txnResultIsEmpty(txn_result)) { return; } if (txn_result.rows.length === 1) { const row = txn_result.rows[0]; - resp.stats.pebble_data.approximate_disk_bytes = - row.approximate_disk_bytes; - resp.stats.ranges_data.range_count = row.range_count; - resp.stats.ranges_data.live_bytes = row.live_bytes; - resp.stats.ranges_data.total_bytes = row.total_bytes; + resp.stats.spanStats.approximate_disk_bytes = row.approximate_disk_bytes; + resp.stats.spanStats.range_count = row.range_count; + resp.stats.spanStats.live_bytes = row.live_bytes; + resp.stats.spanStats.total_bytes = row.total_bytes; } else { - resp.stats.ranges_data.error = new Error( + resp.stats.spanStats.error = new Error( `DatabaseDetails - Span Stats, expected 1 row, got ${txn_result.rows.length}`, ); } @@ -327,18 +313,18 @@ const getDatabaseReplicasAndRegions: DatabaseDetailsQuery { if (!txnResultIsEmpty(txn_result)) { - resp.stats.ranges_data.regions = txn_result.rows[0].regions; - resp.stats.ranges_data.node_ids = txn_result.rows[0].replicas; + resp.stats.replicaData.regions = txn_result.rows[0].regions; + resp.stats.replicaData.replicas = txn_result.rows[0].replicas; } if (txn_result.error) { - resp.stats.ranges_data.error = txn_result.error; + resp.stats.replicaData.error = txn_result.error; } }, }; -type DatabaseIndexUsageStatsResponse = SqlApiQueryResponse<{ +type DatabaseIndexUsageStatsResponse = { num_index_recommendations?: number; -}>; +}; const getDatabaseIndexUsageStats: DatabaseDetailsQuery = { createStmt: dbName => { @@ -374,11 +360,11 @@ const getDatabaseIndexUsageStats: DatabaseDetailsQuery = { txn_result.rows?.forEach(row => { const rec = recommendDropUnusedIndex(row); if (rec.recommend) { - resp.stats.index_stats.num_index_recommendations += 1; + resp.stats.indexStats.num_index_recommendations += 1; } }); if (txn_result.error) { - resp.stats.index_stats.error = txn_result.error; + resp.stats.indexStats.error = txn_result.error; } }, }; @@ -414,6 +400,7 @@ export function createDatabaseDetailsReq(dbName: string): SqlExecutionRequest { return { execute: true, statements: databaseDetailQueries.map(query => query.createStmt(dbName)), + database: dbName, max_result_size: LARGE_RESULT_SIZE, timeout: LONG_TIMEOUT, }; diff --git a/pkg/ui/workspaces/cluster-ui/src/sql/highlight.tsx b/pkg/ui/workspaces/cluster-ui/src/sql/highlight.tsx index 43cf4747bf55..28340d5051df 100644 --- a/pkg/ui/workspaces/cluster-ui/src/sql/highlight.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/sql/highlight.tsx @@ -39,7 +39,7 @@ export class Highlight extends React.Component { renderZone = (): React.ReactElement => { const { zone } = this.props; - const zoneConfig = zone.zone_config_resp.zone_config; + const zoneConfig = zone.zoneConfigResp.zone_config; return ( CONFIGURE ZONE USING diff --git a/pkg/ui/workspaces/db-console/src/redux/cachedDataReducer.ts b/pkg/ui/workspaces/db-console/src/redux/cachedDataReducer.ts index 9d77b4908161..98d5557f6a22 100644 --- a/pkg/ui/workspaces/db-console/src/redux/cachedDataReducer.ts +++ b/pkg/ui/workspaces/db-console/src/redux/cachedDataReducer.ts @@ -297,14 +297,22 @@ export class CachedDataReducer< return this.apiEndpoint(req, this.requestTimeout) .then( data => { - // Dispatch the results to the store. - if (this.pendingRequestStarted !== pendingRequestStarted) { + // Check if we are replacing requests. If so, do not update the reducer + // state if this is not the latest request. + if ( + this.allowReplacementRequests && + this.pendingRequestStarted !== pendingRequestStarted + ) { return; } + // Dispatch the results to the store. dispatch(this.receiveData(data, req)); }, (error: Error) => { - if (this.pendingRequestStarted !== pendingRequestStarted) { + if ( + this.allowReplacementRequests && + this.pendingRequestStarted !== pendingRequestStarted + ) { return; } @@ -328,7 +336,10 @@ export class CachedDataReducer< }, ) .then(() => { - if (this.pendingRequestStarted !== pendingRequestStarted) { + if ( + this.allowReplacementRequests && + this.pendingRequestStarted !== pendingRequestStarted + ) { return; } // Invalidate data after the invalidation period if one exists. diff --git a/pkg/ui/workspaces/db-console/src/util/api.spec.ts b/pkg/ui/workspaces/db-console/src/util/api.spec.ts index 88fed76b0d50..854418b78a5a 100644 --- a/pkg/ui/workspaces/db-console/src/util/api.spec.ts +++ b/pkg/ui/workspaces/db-console/src/util/api.spec.ts @@ -180,6 +180,7 @@ describe("rest api", function () { }, ], }, + // Database span stats query { rows: [ { @@ -195,24 +196,22 @@ describe("rest api", function () { return clusterUiApi.getDatabaseDetails(dbName).then(result => { expect(fetchMock.calls(clusterUiApi.SQL_API_PATH).length).toBe(1); - expect(result.results.id_resp.database_id).toEqual("1"); - expect(result.results.tables_resp.tables.length).toBe(1); - expect(result.results.grants_resp.grants.length).toBe(2); - expect(result.results.stats.index_stats.num_index_recommendations).toBe( + expect(result.results.idResp.database_id).toEqual("1"); + expect(result.results.tablesResp.tables.length).toBe(1); + expect(result.results.grantsResp.grants.length).toBe(2); + expect(result.results.stats.indexStats.num_index_recommendations).toBe( 1, ); - expect(result.results.zone_config_resp.zone_config).toEqual( + expect(result.results.zoneConfigResp.zone_config).toEqual( mockZoneConfig, ); - expect(result.results.zone_config_resp.zone_config_level).toBe( + expect(result.results.zoneConfigResp.zone_config_level).toBe( ZoneConfigurationLevel.DATABASE, ); - expect(result.results.stats.pebble_data.approximate_disk_bytes).toBe( - 100, - ); - expect(result.results.stats.ranges_data.live_bytes).toBe(200); - expect(result.results.stats.ranges_data.total_bytes).toBe(300); - expect(result.results.stats.ranges_data.range_count).toBe(400); + expect(result.results.stats.spanStats.approximate_disk_bytes).toBe(100); + expect(result.results.stats.spanStats.live_bytes).toBe(200); + expect(result.results.stats.spanStats.total_bytes).toBe(300); + expect(result.results.stats.spanStats.range_count).toBe(400); }); }); diff --git a/pkg/ui/workspaces/db-console/src/views/databases/databaseDetailsPage/redux.ts b/pkg/ui/workspaces/db-console/src/views/databases/databaseDetailsPage/redux.ts index ba1c30215cfc..2c8a0606b373 100644 --- a/pkg/ui/workspaces/db-console/src/views/databases/databaseDetailsPage/redux.ts +++ b/pkg/ui/workspaces/db-console/src/views/databases/databaseDetailsPage/redux.ts @@ -130,7 +130,7 @@ export const mapStateToProps = createSelector( nodeRegions: nodeRegions, isTenant: isTenant, tables: _.map( - databaseDetails[database]?.data?.results.tables_resp.tables, + databaseDetails[database]?.data?.results.tablesResp.tables, table => { const tableId = generateTableID(database, table); const details = tableDetails[tableId]; diff --git a/pkg/ui/workspaces/db-console/src/views/databases/databasesPage/redux.ts b/pkg/ui/workspaces/db-console/src/views/databases/databasesPage/redux.ts index e808c7a7c8ed..03595410aa09 100644 --- a/pkg/ui/workspaces/db-console/src/views/databases/databasesPage/redux.ts +++ b/pkg/ui/workspaces/db-console/src/views/databases/databasesPage/redux.ts @@ -81,16 +81,17 @@ const selectDatabases = createSelector( (databases?.databases || []).map(database => { const details = databaseDetails[database]; const stats = details?.data?.results.stats; - const sizeInBytes = stats?.pebble_data?.approximate_disk_bytes || 0; - const rangeCount = stats?.ranges_data.range_count || 0; - const nodes = stats?.ranges_data.node_ids || []; + const sizeInBytes = stats?.spanStats?.approximate_disk_bytes || 0; + const rangeCount = stats?.spanStats.range_count || 0; + // TODO(thomas): Eventually, we should populate this will real node IDs. + const nodes = stats?.replicaData.replicas || []; const nodesByRegionString = getNodesByRegionString( nodes, nodeRegions, isTenant, ); const numIndexRecommendations = - stats?.index_stats.num_index_recommendations || 0; + stats?.indexStats.num_index_recommendations || 0; const combinedErr = combineLoadingErrors( details?.lastError, @@ -104,7 +105,7 @@ const selectDatabases = createSelector( lastError: combinedErr, name: database, sizeInBytes: sizeInBytes, - tableCount: details?.data?.results.tables_resp.tables?.length || 0, + tableCount: details?.data?.results.tablesResp.tables?.length || 0, rangeCount: rangeCount, nodes: nodes, nodesByRegionString,