diff --git a/packages/next/src/client/components/app-router.tsx b/packages/next/src/client/components/app-router.tsx index 6bcf12ef6cbbe..d92dd93b6178d 100644 --- a/packages/next/src/client/components/app-router.tsx +++ b/packages/next/src/client/components/app-router.tsx @@ -280,7 +280,12 @@ function Router({ ? // Unlike the old implementation, the Segment Cache doesn't store its // data in the router reducer state; it writes into a global mutable // cache. So we don't need to dispatch an action. - (href) => prefetchWithSegmentCache(href, actionQueue.state.nextUrl) + (href) => + prefetchWithSegmentCache( + href, + actionQueue.state.nextUrl, + actionQueue.state.tree + ) : (href, options) => { // Use the old prefetch implementation. const url = createPrefetchURL(href) diff --git a/packages/next/src/client/components/router-reducer/prefetch-cache-utils.ts b/packages/next/src/client/components/router-reducer/prefetch-cache-utils.ts index f7ceea98c41f5..116777389cc75 100644 --- a/packages/next/src/client/components/router-reducer/prefetch-cache-utils.ts +++ b/packages/next/src/client/components/router-reducer/prefetch-cache-utils.ts @@ -397,7 +397,7 @@ export function prunePrefetchCache( const DYNAMIC_STALETIME_MS = Number(process.env.__NEXT_CLIENT_ROUTER_DYNAMIC_STALETIME) * 1000 -const STATIC_STALETIME_MS = +export const STATIC_STALETIME_MS = Number(process.env.__NEXT_CLIENT_ROUTER_STATIC_STALETIME) * 1000 function getPrefetchEntryCacheStatus({ diff --git a/packages/next/src/client/components/segment-cache/cache.ts b/packages/next/src/client/components/segment-cache/cache.ts index e65810aae1d9b..9ad326824c015 100644 --- a/packages/next/src/client/components/segment-cache/cache.ts +++ b/packages/next/src/client/components/segment-cache/cache.ts @@ -3,15 +3,20 @@ import type { RootTreePrefetch, SegmentPrefetch, } from '../../../server/app-render/collect-segment-data' -import type { Segment as FlightRouterStateSegment } from '../../../server/app-render/types' import type { HeadData, LoadingModuleData, } from '../../../shared/lib/app-router-context.shared-runtime' +import type { + CacheNodeSeedData, + Segment as FlightRouterStateSegment, +} from '../../../server/app-render/types' import { NEXT_DID_POSTPONE_HEADER, NEXT_ROUTER_PREFETCH_HEADER, NEXT_ROUTER_SEGMENT_PREFETCH_HEADER, + NEXT_ROUTER_STALE_TIME_HEADER, + NEXT_ROUTER_STATE_TREE_HEADER, NEXT_URL, RSC_CONTENT_TYPE_HEADER, RSC_HEADER, @@ -23,9 +28,9 @@ import { type RequestHeaders, } from '../router-reducer/fetch-server-response' import { - trackPrefetchRequestBandwidth, pingPrefetchTask, type PrefetchTask, + type PrefetchSubtaskResult, } from './scheduler' import { getAppBuildId } from '../../app-build-id' import { createHrefFromUrl } from '../router-reducer/create-href-from-url' @@ -35,12 +40,18 @@ import type { RouteCacheKey, } from './cache-key' import { createTupleMap, type TupleMap, type Prefix } from './tuple-map' -import { createLRU, type LRU } from './lru' +import { createLRU } from './lru' import { encodeChildSegmentKey, encodeSegment, ROOT_SEGMENT_KEY, } from '../../../server/app-render/segment-value-encoding' +import type { + FlightRouterState, + NavigationFlightResponse, +} from '../../../server/app-render/types' +import { normalizeFlightData } from '../../flight-data-helpers' +import { STATIC_STALETIME_MS } from '../router-reducer/prefetch-cache-utils' // A note on async/await when working in the prefetch cache: // @@ -65,7 +76,7 @@ import { export type RouteTree = { key: string - token: string + token: string | null segment: FlightRouterStateSegment slots: null | { [parallelRouteKey: string]: RouteTree @@ -95,8 +106,8 @@ type RouteCacheEntryShared = { export const enum EntryStatus { Empty, Pending, - Rejected, Fulfilled, + Rejected, } type PendingRouteCacheEntry = RouteCacheEntryShared & { @@ -106,6 +117,7 @@ type PendingRouteCacheEntry = RouteCacheEntryShared & { tree: null head: HeadData | null isHeadPartial: true + isPPREnabled: false } type RejectedRouteCacheEntry = RouteCacheEntryShared & { @@ -115,6 +127,7 @@ type RejectedRouteCacheEntry = RouteCacheEntryShared & { tree: null head: null isHeadPartial: true + isPPREnabled: boolean } export type FulfilledRouteCacheEntry = RouteCacheEntryShared & { @@ -124,6 +137,7 @@ export type FulfilledRouteCacheEntry = RouteCacheEntryShared & { tree: RouteTree head: HeadData isHeadPartial: boolean + isPPREnabled: boolean } export type RouteCacheEntry = @@ -141,7 +155,7 @@ type SegmentCacheEntryShared = { size: number } -type PendingSegmentCacheEntry = SegmentCacheEntryShared & { +export type PendingSegmentCacheEntry = SegmentCacheEntryShared & { status: EntryStatus.Empty | EntryStatus.Pending rsc: null loading: null @@ -308,6 +322,8 @@ export function readOrCreateRouteCacheEntry( // could be intercepted. It's only set to false once we receive a response // from the server. couldBeIntercepted: true, + // Similarly, we don't yet know if the route supports PPR. + isPPREnabled: false, // LRU-related fields keypath: null, @@ -331,6 +347,7 @@ export function readOrCreateRouteCacheEntry( */ export function readOrCreateSegmentCacheEntry( now: number, + // TODO: Don't need to pass the whole route. Just `staleAt`. route: FulfilledRouteCacheEntry, path: string ): SegmentCacheEntry { @@ -421,13 +438,14 @@ function pingBlockedTasks(entry: { } function fulfillRouteCacheEntry( - entry: PendingRouteCacheEntry, + entry: RouteCacheEntry, tree: RouteTree, head: HeadData, isHeadPartial: boolean, staleAt: number, couldBeIntercepted: boolean, - canonicalUrl: string + canonicalUrl: string, + isPPREnabled: boolean ): FulfilledRouteCacheEntry { const fulfilledEntry: FulfilledRouteCacheEntry = entry as any fulfilledEntry.status = EntryStatus.Fulfilled @@ -437,6 +455,7 @@ function fulfillRouteCacheEntry( fulfilledEntry.staleAt = staleAt fulfilledEntry.couldBeIntercepted = couldBeIntercepted fulfilledEntry.canonicalUrl = canonicalUrl + fulfilledEntry.isPPREnabled = isPPREnabled pingBlockedTasks(entry) return fulfilledEntry } @@ -530,10 +549,83 @@ function convertTreePrefetchToRouteTree( } } +function convertRootFlightRouterStateToRouteTree( + flightRouterState: FlightRouterState +): RouteTree { + return convertFlightRouterStateToRouteTree( + flightRouterState, + ROOT_SEGMENT_KEY + ) +} + +function convertFlightRouterStateToRouteTree( + flightRouterState: FlightRouterState, + key: string +): RouteTree { + let slots: { [parallelRouteKey: string]: RouteTree } | null = null + + const parallelRoutes = flightRouterState[1] + for (let parallelRouteKey in parallelRoutes) { + const childRouterState = parallelRoutes[parallelRouteKey] + const childSegment = childRouterState[0] + // TODO: Eventually, the param values will not be included in the response + // from the server. We'll instead fill them in on the client by parsing + // the URL. This is where we'll do that. + const childKey = encodeChildSegmentKey( + key, + parallelRouteKey, + encodeSegment(childSegment) + ) + const childTree = convertFlightRouterStateToRouteTree( + childRouterState, + childKey + ) + if (slots === null) { + slots = { + [parallelRouteKey]: childTree, + } + } else { + slots[parallelRouteKey] = childTree + } + } + + return { + key, + // NOTE: Dynamic server responses do not currently include an access token. + // (They may in the future.) Which means this tree cannot be used to issue + // a per-segment prefetch. + token: null, + segment: flightRouterState[0], + slots, + isRootLayout: flightRouterState[4] === true, + } +} + +export function convertRouteTreeToFlightRouterState( + routeTree: RouteTree +): FlightRouterState { + const parallelRoutes: Record = {} + if (routeTree.slots !== null) { + for (const parallelRouteKey in routeTree.slots) { + parallelRoutes[parallelRouteKey] = convertRouteTreeToFlightRouterState( + routeTree.slots[parallelRouteKey] + ) + } + } + const flightRouterState: FlightRouterState = [ + routeTree.segment, + parallelRoutes, + null, + null, + routeTree.isRootLayout, + ] + return flightRouterState +} + export async function fetchRouteOnCacheMiss( entry: PendingRouteCacheEntry, task: PrefetchTask -): Promise { +): Promise { // This function is allowed to use async/await because it contains the actual // fetch that gets issued on a cache miss. Notice though that it does not // return anything; it writes the result to the cache entry directly, then @@ -550,34 +642,12 @@ export async function fetchRouteOnCacheMiss( // PPR is enabled, because we always respond to route tree requests, even // if it needs to be blockingly generated on demand. response.status === 204 || - // This checks whether the response was served from the per-segment cache, - // rather than the old prefetching flow. If it fails, it implies that PPR - // is disabled on this route. - // TODO: Add support for non-PPR routes. - response.headers.get(NEXT_DID_POSTPONE_HEADER) !== '2' || !response.body ) { // Server responded with an error, or with a miss. We should still cache // the response, but we can try again after 10 seconds. rejectRouteCacheEntry(entry, Date.now() + 10 * 1000) - return - } - const prefetchStream = createPrefetchResponseStream( - response.body, - routeCacheLru, - entry - ) - const serverData: RootTreePrefetch = await (createFromNextReadableStream( - prefetchStream - ) as Promise) - if (serverData.buildId !== getAppBuildId()) { - // The server build does not match the client. Treat as a 404. During - // an actual navigation, the router will trigger an MPA navigation. - // TODO: Consider moving the build ID to a response header so we can check - // it before decoding the response, and so there's one way of checking - // across all response types. - rejectRouteCacheEntry(entry, Date.now() + 10 * 1000) - return + return null } // This is a bit convoluted but it's taken from router-reducer and @@ -591,15 +661,73 @@ export async function fetchRouteOnCacheMiss( const couldBeIntercepted = varyHeader !== null && varyHeader.includes(NEXT_URL) - fulfillRouteCacheEntry( - entry, - convertRootTreePrefetchToRouteTree(serverData), - serverData.head, - serverData.isHeadPartial, - Date.now() + serverData.staleTime, - couldBeIntercepted, - canonicalUrl - ) + // Track when the network connection closes. + const closed = createPromiseWithResolvers() + + // This checks whether the response was served from the per-segment cache, + // rather than the old prefetching flow. If it fails, it implies that PPR + // is disabled on this route. + // TODO: Add support for non-PPR routes. + const routeIsPPREnabled = + response.headers.get(NEXT_DID_POSTPONE_HEADER) === '2' + if (routeIsPPREnabled) { + const prefetchStream = createPrefetchResponseStream( + response.body, + closed.resolve, + function onResponseSizeUpdate(size) { + routeCacheLru.updateSize(entry, size) + } + ) + const serverData = await (createFromNextReadableStream( + prefetchStream + ) as Promise) + if (serverData.buildId !== getAppBuildId()) { + // The server build does not match the client. Treat as a 404. During + // an actual navigation, the router will trigger an MPA navigation. + // TODO: Consider moving the build ID to a response header so we can check + // it before decoding the response, and so there's one way of checking + // across all response types. + rejectRouteCacheEntry(entry, Date.now() + 10 * 1000) + return null + } + + fulfillRouteCacheEntry( + entry, + convertRootTreePrefetchToRouteTree(serverData), + serverData.head, + serverData.isHeadPartial, + Date.now() + serverData.staleTime, + couldBeIntercepted, + canonicalUrl, + routeIsPPREnabled + ) + } else { + // PPR is not enabled for this route. The server responds with a + // different format (FlightRouterState) that we need to convert. + // TODO: We will unify the responses eventually. I'm keeping the types + // separate for now because FlightRouterState has so many + // overloaded concerns. + const prefetchStream = createPrefetchResponseStream( + response.body, + closed.resolve, + function onResponseSizeUpdate(size) { + routeCacheLru.updateSize(entry, size) + } + ) + const serverData = await (createFromNextReadableStream( + prefetchStream + ) as Promise) + + writeDynamicTreeResponseIntoCache( + Date.now(), + response, + serverData, + entry, + couldBeIntercepted, + canonicalUrl, + routeIsPPREnabled + ) + } if (!couldBeIntercepted && nextUrl !== null) { // This route will never be intercepted. So we can use this entry for all @@ -624,10 +752,14 @@ export async function fetchRouteOnCacheMiss( // just a performance optimization, we can safely skip it. } } + // Return a promise that resolves when the network connection closes, so + // the scheduler can track the number of concurrent network connections. + return { closed: closed.promise } } catch (error) { // Either the connection itself failed, or something bad happened while // decoding the response. rejectRouteCacheEntry(entry, Date.now() + 10 * 1000) + return null } } @@ -637,7 +769,7 @@ export async function fetchSegmentOnCacheMiss( routeKey: RouteCacheKey, segmentKeyPath: string, accessToken: string | null -): Promise { +): Promise { // This function is allowed to use async/await because it contains the actual // fetch that gets issued on a cache miss. Notice though that it does not // return anything; it writes the result to the cache entry directly. @@ -666,14 +798,20 @@ export async function fetchSegmentOnCacheMiss( // Server responded with an error, or with a miss. We should still cache // the response, but we can try again after 10 seconds. rejectSegmentCacheEntry(segmentCacheEntry, Date.now() + 10 * 1000) - return + return null } + + // Track when the network connection closes. + const closed = createPromiseWithResolvers() + // Wrap the original stream in a new stream that never closes. That way the // Flight client doesn't error if there's a hanging promise. const prefetchStream = createPrefetchResponseStream( response.body, - segmentCacheLru, - segmentCacheEntry + closed.resolve, + function onResponseSizeUpdate(size) { + segmentCacheLru.updateSize(segmentCacheEntry, size) + } ) const serverData = await (createFromNextReadableStream( prefetchStream @@ -685,7 +823,7 @@ export async function fetchSegmentOnCacheMiss( // it before decoding the response, and so there's one way of checking // across all response types. rejectSegmentCacheEntry(segmentCacheEntry, Date.now() + 10 * 1000) - return + return null } fulfillSegmentCacheEntry( segmentCacheEntry, @@ -696,10 +834,290 @@ export async function fetchSegmentOnCacheMiss( route.staleAt, serverData.isPartial ) + + // Return a promise that resolves when the network connection closes, so + // the scheduler can track the number of concurrent network connections. + return { closed: closed.promise } } catch (error) { // Either the connection itself failed, or something bad happened while // decoding the response. rejectSegmentCacheEntry(segmentCacheEntry, Date.now() + 10 * 1000) + return null + } +} + +export async function fetchSegmentPrefetchesForPPRDisabledRoute( + task: PrefetchTask, + route: FulfilledRouteCacheEntry, + dynamicRequestTree: FlightRouterState, + spawnedEntries: Map +): Promise { + const href = task.key.href + const nextUrl = task.key.nextUrl + const headers: RequestHeaders = { + [RSC_HEADER]: '1', + [NEXT_ROUTER_PREFETCH_HEADER]: '1', + [NEXT_ROUTER_STATE_TREE_HEADER]: encodeURIComponent( + JSON.stringify(dynamicRequestTree) + ), + } + if (nextUrl !== null) { + headers[NEXT_URL] = nextUrl + } + try { + const response = await fetchPrefetchResponse(href, headers) + if (!response || !response.ok || !response.body) { + // Server responded with an error, or with a miss. We should still cache + // the response, but we can try again after 10 seconds. + rejectSegmentEntriesIfStillPending(spawnedEntries, Date.now() + 10 * 1000) + return null + } + + // Track when the network connection closes. + const closed = createPromiseWithResolvers() + + let fulfilledEntries: Array | null = null + const prefetchStream = createPrefetchResponseStream( + response.body, + closed.resolve, + function onResponseSizeUpdate(totalBytesReceivedSoFar) { + // When processing a dynamic response, we don't know how large each + // individual segment is, so approximate by assiging each segment + // the average of the total response size. + if (fulfilledEntries === null) { + // Haven't received enough data yet to know which segments + // were included. + return + } + const averageSize = totalBytesReceivedSoFar / fulfilledEntries.length + for (const entry of fulfilledEntries) { + segmentCacheLru.updateSize(entry, averageSize) + } + } + ) + const serverData = await (createFromNextReadableStream( + prefetchStream + ) as Promise) + + // Aside from writing the data into the cache, this function also returns + // the entries that were fulfilled, so we can streamingly update their sizes + // in the LRU as more data comes in. + fulfilledEntries = writeDynamicRenderResponseIntoCache( + Date.now(), + response, + serverData, + route, + spawnedEntries + ) + + // Return a promise that resolves when the network connection closes, so + // the scheduler can track the number of concurrent network connections. + return { closed: closed.promise } + } catch (error) { + rejectSegmentEntriesIfStillPending(spawnedEntries, Date.now() + 10 * 1000) + return null + } +} + +function writeDynamicTreeResponseIntoCache( + now: number, + response: Response, + serverData: NavigationFlightResponse, + entry: PendingRouteCacheEntry, + couldBeIntercepted: boolean, + canonicalUrl: string, + routeIsPPREnabled: boolean +) { + if (serverData.b !== getAppBuildId()) { + // The server build does not match the client. Treat as a 404. During + // an actual navigation, the router will trigger an MPA navigation. + // TODO: Consider moving the build ID to a response header so we can check + // it before decoding the response, and so there's one way of checking + // across all response types. + rejectRouteCacheEntry(entry, now + 10 * 1000) + return + } + const normalizedFlightDataResult = normalizeFlightData(serverData.f) + if ( + // A string result means navigating to this route will result in an + // MPA navigation. + typeof normalizedFlightDataResult === 'string' || + normalizedFlightDataResult.length !== 1 + ) { + rejectRouteCacheEntry(entry, now + 10 * 1000) + return + } + const flightData = normalizedFlightDataResult[0] + if (!flightData.isRootRender) { + // Unexpected response format. + rejectRouteCacheEntry(entry, now + 10 * 1000) + return + } + + const flightRouterState = flightData.tree + // TODO: Extract to function + const staleTimeHeader = response.headers.get(NEXT_ROUTER_STALE_TIME_HEADER) + const staleTime = + staleTimeHeader !== null + ? parseInt(staleTimeHeader, 10) + : STATIC_STALETIME_MS + fulfillRouteCacheEntry( + entry, + convertRootFlightRouterStateToRouteTree(flightRouterState), + flightData.head, + flightData.isHeadPartial, + now + staleTime, + couldBeIntercepted, + canonicalUrl, + routeIsPPREnabled + ) +} + +function rejectSegmentEntriesIfStillPending( + entries: Map, + staleAt: number +): Array { + const fulfilledEntries = [] + for (const entry of entries.values()) { + if (entry.status === EntryStatus.Pending) { + rejectSegmentCacheEntry(entry, staleAt) + } else if (entry.status === EntryStatus.Fulfilled) { + fulfilledEntries.push(entry) + } + } + return fulfilledEntries +} + +function writeDynamicRenderResponseIntoCache( + now: number, + response: Response, + serverData: NavigationFlightResponse, + route: FulfilledRouteCacheEntry, + spawnedEntries: Map +): Array | null { + if (serverData.b !== getAppBuildId()) { + // The server build does not match the client. Treat as a 404. During + // an actual navigation, the router will trigger an MPA navigation. + // TODO: Consider moving the build ID to a response header so we can check + // it before decoding the response, and so there's one way of checking + // across all response types. + rejectSegmentEntriesIfStillPending(spawnedEntries, now + 10 * 1000) + return null + } + const flightDatas = normalizeFlightData(serverData.f) + if (typeof flightDatas === 'string') { + // This means navigating to this route will result in an MPA navigation. + // TODO: We should cache this, too, so that the MPA navigation is immediate. + return null + } + for (const flightData of flightDatas) { + const seedData = flightData.seedData + if (seedData !== null) { + // The data sent by the server represents only a subtree of the app. We + // need to find the part of the task tree that matches the response. + // + // segmentPath represents the parent path of subtree. It's a repeating + // pattern of parallel route key and segment: + // + // [string, Segment, string, Segment, string, Segment, ...] + const segmentPath = flightData.segmentPath + let segmentKey = ROOT_SEGMENT_KEY + for (let i = 0; i < segmentPath.length; i += 2) { + const parallelRouteKey: string = segmentPath[i] + const segment: FlightRouterStateSegment = segmentPath[i + 1] + segmentKey = encodeChildSegmentKey( + segmentKey, + parallelRouteKey, + encodeSegment(segment) + ) + } + const staleTimeHeader = response.headers.get( + NEXT_ROUTER_STALE_TIME_HEADER + ) + const staleTime = + staleTimeHeader !== null + ? parseInt(staleTimeHeader, 10) + : STATIC_STALETIME_MS + writeSeedDataIntoCache( + now, + route, + now + staleTime, + seedData, + segmentKey, + spawnedEntries + ) + } + } + // Any entry that's still pending was intentionally not rendered by the + // server, because it was inside the loading boundary. Mark them as rejected + // so we know not to fetch them again. + // TODO: If PPR is enabled on some routes but not others, then it's possible + // that a different page is able to do a per-segment prefetch of one of the + // segments we're marking as rejected here. We should mark on the segment + // somehow that the reason for the rejection is because of a non-PPR prefetch. + // That way a per-segment prefetch knows to disregard the rejection. + const fulfilledEntries = rejectSegmentEntriesIfStillPending( + spawnedEntries, + now + 10 * 1000 + ) + return fulfilledEntries +} + +function writeSeedDataIntoCache( + now: number, + route: FulfilledRouteCacheEntry, + staleAt: number, + seedData: CacheNodeSeedData, + key: string, + entriesOwnedByCurrentTask: Map +) { + // This function is used to write the result of a dynamic server request + // (CacheNodeSeedData) into the prefetch cache. It's used in cases where we + // want to treat a dynamic response as if it were static. The two examples + // where this happens are (which implicitly opts + // dynamic data into being static) and when prefetching a PPR-disabled route + const rsc = seedData[1] + const loading = seedData[3] + const isPartial = rsc === null + + // We should only write into cache entries that are owned by us. Or create + // a new one and write into that. We must never write over an entry that was + // created by a different task, because that causes data races. + const ownedEntry = entriesOwnedByCurrentTask.get(key) + if (ownedEntry !== undefined) { + fulfillSegmentCacheEntry(ownedEntry, rsc, loading, staleAt, isPartial) + } else { + // There's no matching entry. Attempt to create a new one. + const possiblyNewEntry = readOrCreateSegmentCacheEntry(now, route, key) + if (possiblyNewEntry.status === EntryStatus.Empty) { + // Confirmed this is a new entry. We can fulfill it. + const newEntry = possiblyNewEntry + fulfillSegmentCacheEntry(newEntry, rsc, loading, staleAt, isPartial) + } else { + // There was already an entry in the cache. We must not write over it. + } + } + // Recursively write the child data into the cache. + const seedDataChildren = seedData[2] + if (seedDataChildren !== null) { + for (const parallelRouteKey in seedDataChildren) { + const childSeedData = seedDataChildren[parallelRouteKey] + if (childSeedData !== null) { + const childSegment = childSeedData[0] + writeSeedDataIntoCache( + now, + route, + staleAt, + childSeedData, + encodeChildSegmentKey( + key, + parallelRouteKey, + encodeSegment(childSegment) + ), + entriesOwnedByCurrentTask + ) + } + } } } @@ -716,10 +1134,15 @@ async function fetchSegmentPrefetchResponse( if (nextUrl !== null) { headers[NEXT_URL] = nextUrl } + return fetchPrefetchResponse(href, headers) +} + +async function fetchPrefetchResponse( + href: NormalizedHref, + headers: RequestHeaders +): Promise { const fetchPriority = 'low' - const responsePromise = createFetch(new URL(href), headers, fetchPriority) - trackPrefetchRequestBandwidth(responsePromise) - const response = await responsePromise + const response = await createFetch(new URL(href), headers, fetchPriority) const contentType = response.headers.get('content-type') const isFlightResponse = contentType && contentType.startsWith(RSC_CONTENT_TYPE_HEADER) @@ -729,12 +1152,10 @@ async function fetchSegmentPrefetchResponse( return response } -function createPrefetchResponseStream< - T extends RouteCacheEntry | SegmentCacheEntry, ->( +function createPrefetchResponseStream( originalFlightStream: ReadableStream, - lru: LRU, - lruEntry: T + onStreamClose: () => void, + onResponseSizeUpdate: (size: number) => void ): ReadableStream { // When PPR is enabled, prefetch streams may contain references that never // resolve, because that's how we encode dynamic data access. In the decoded @@ -766,12 +1187,12 @@ function createPrefetchResponseStream< // it's not really necessary to do this streamingly, but I'm doing it // anyway in case this changes in the future. totalByteLength += value.byteLength - lru.updateSize(lruEntry, totalByteLength) - + onResponseSizeUpdate(totalByteLength) continue } // The server stream has closed. Exit, but intentionally do not close - // the target stream. + // the target stream. We do notify the caller, though. + onStreamClose() return } }, diff --git a/packages/next/src/client/components/segment-cache/navigation.ts b/packages/next/src/client/components/segment-cache/navigation.ts index 743d4ba040b1a..0ca68f6a8f1ab 100644 --- a/packages/next/src/client/components/segment-cache/navigation.ts +++ b/packages/next/src/client/components/segment-cache/navigation.ts @@ -136,6 +136,9 @@ function navigateUsingPrefetchedRouteTree( // TODO: Eventually updateCacheNodeOnNavigation (or the equivalent) should // read from the Segment Cache directly. It's only structured this way for now // so we can share code with the old prefetching implementation. + // TODO: Need to detect whether we're navigating to a new root layout, i.e. + // reimplement the isNavigatingToNewRootLayout logic + // inside updateCacheNodeOnNavigation. const task = updateCacheNodeOnNavigation( currentCacheNode, currentFlightRouterState, diff --git a/packages/next/src/client/components/segment-cache/prefetch.ts b/packages/next/src/client/components/segment-cache/prefetch.ts index 3b68677c93c8b..83f03014b0f2d 100644 --- a/packages/next/src/client/components/segment-cache/prefetch.ts +++ b/packages/next/src/client/components/segment-cache/prefetch.ts @@ -1,3 +1,4 @@ +import type { FlightRouterState } from '../../../server/app-render/types' import { createPrefetchURL } from '../../components/app-router' import { createCacheKey } from './cache-key' import { schedulePrefetchTask } from './scheduler' @@ -7,12 +8,16 @@ import { schedulePrefetchTask } from './scheduler' * @param href - The URL to prefetch. Typically this will come from a , * or router.prefetch. It must be validated before we attempt to prefetch it. */ -export function prefetch(href: string, nextUrl: string | null) { +export function prefetch( + href: string, + nextUrl: string | null, + treeAtTimeOfPrefetch: FlightRouterState +) { const url = createPrefetchURL(href) if (url === null) { // This href should not be prefetched. return } const cacheKey = createCacheKey(url.href, nextUrl) - schedulePrefetchTask(cacheKey) + schedulePrefetchTask(cacheKey, treeAtTimeOfPrefetch) } diff --git a/packages/next/src/client/components/segment-cache/scheduler.ts b/packages/next/src/client/components/segment-cache/scheduler.ts index 03fb5db7ba04e..60dead0edb900 100644 --- a/packages/next/src/client/components/segment-cache/scheduler.ts +++ b/packages/next/src/client/components/segment-cache/scheduler.ts @@ -1,3 +1,8 @@ +import type { + FlightRouterState, + Segment as FlightRouterStateSegment, +} from '../../../server/app-render/types' +import { matchSegment } from '../match-segments' import { readOrCreateRouteCacheEntry, readOrCreateSegmentCacheEntry, @@ -8,6 +13,9 @@ import { type RouteCacheEntry, type SegmentCacheEntry, type RouteTree, + fetchSegmentPrefetchesForPPRDisabledRoute, + type PendingSegmentCacheEntry, + convertRouteTreeToFlightRouterState, } from './cache' import type { RouteCacheKey } from './cache-key' @@ -26,6 +34,13 @@ const scheduleMicrotask = export type PrefetchTask = { key: RouteCacheKey + /** + * The FlightRouterState at the time the task was initiated. This is needed + * when falling back to the non-PPR behavior, which only prefetches up to + * the first loading boundary. + */ + treeAtTimeOfPrefetch: FlightRouterState + /** * sortId is an incrementing counter * @@ -90,6 +105,13 @@ const enum PrefetchTaskExitStatus { Done, } +export type PrefetchSubtaskResult = { + /** + * A promise that resolves when the network connection is closed. + */ + closed: Promise +} + const taskHeap: Array = [] // This is intentionally low so that when a navigation happens, the browser's @@ -108,11 +130,16 @@ let didScheduleMicrotask = false * expected to be validated and normalized. * * @param key The RouteCacheKey to prefetch. + * @param treeAtTimeOfPrefetch The app's current FlightRouterState */ -export function schedulePrefetchTask(key: RouteCacheKey): void { +export function schedulePrefetchTask( + key: RouteCacheKey, + treeAtTimeOfPrefetch: FlightRouterState +): void { // Spawn a new prefetch task const task: PrefetchTask = { key, + treeAtTimeOfPrefetch, sortId: sortIdCounter++, isBlocked: false, _heapIndex: -1, @@ -154,34 +181,33 @@ function hasNetworkBandwidth(): boolean { return inProgressRequests < MAX_CONCURRENT_PREFETCH_REQUESTS } -/** - * Notifies the scheduler of an in-progress prefetch request. This is used to - * control network bandwidth by limiting the number of concurrent requests. - * - * @param promise A promise that resolves when the request has finished. - */ -export function trackPrefetchRequestBandwidth( - promiseForServerData: Promise -) { - inProgressRequests++ - promiseForServerData.then( - onPrefetchRequestCompletion, - onPrefetchRequestCompletion - ) -} - -const noop = () => {} - -function spawnPrefetchSubtask(promise: Promise) { - // When the scheduler spawns an async task, we don't await its result - // directly. Instead, the async task writes its result directly into the - // cache, then pings the scheduler to continue. +function spawnPrefetchSubtask( + prefetchSubtask: Promise +): void { + // When the scheduler spawns an async task, we don't await its result. + // Instead, the async task writes its result directly into the cache, then + // pings the scheduler to continue. // - // This function only exists to prevent warnings about unhandled promises. - promise.then(noop, noop) + // We process server responses streamingly, so the prefetch subtask will + // likely resolve before we're finished receiving all the data. The subtask + // result includes a promise that resolves once the network connection is + // closed. The scheduler uses this to control network bandwidth by tracking + // and limiting the number of concurrent requests. + inProgressRequests++ + prefetchSubtask + .then((result) => { + if (result === null) { + // The prefetch task errored before it could start processing the + // network stream. Assume the connection is closed. + return + } + // Wait for the connection to close before freeing up more bandwidth. + return result.closed + }) + .then(onPrefetchConnectionClosed, onPrefetchConnectionClosed) } -function onPrefetchRequestCompletion(): void { +function onPrefetchConnectionClosed(): void { inProgressRequests-- // Notify the scheduler that we have more bandwidth, and can continue @@ -304,14 +330,48 @@ function pingRootRouteTree( return PrefetchTaskExitStatus.InProgress } const tree = route.tree - const segmentKey = tree.key - const segment = readOrCreateSegmentCacheEntry(now, route, segmentKey) - pingSegment(route, segment, task.key, segmentKey, tree.token) - if (!hasNetworkBandwidth()) { - // Stop prefetching segments until there's more bandwidth. - return PrefetchTaskExitStatus.InProgress + if (route.isPPREnabled) { + return pingRouteTree(now, task, route, tree) + } else { + // When PPR is disabled, we can't prefetch per segment. We must fallback + // to the old prefetch behavior and send a dynamic request. + // + // Construct a tree (currently a FlightRouterState) that represents + // which segments need to be prefetched and which ones are already + // cached. If the tree is empty, then we can exit. Otherwise, we'll send + // the request tree to the server and use the response to populate the + // segment cache. + // + // Only routes that include a loading boundary can be prefetched in this + // way. The server will only render up to the first loading boundary + // inside new part of the tree. If there's no loading boundary, the + // server will never return any data. + // TODO: When we prefetch the route tree, the server should + // indicate whether there's a loading boundary so the client doesn't + // send a second request for no reason. + const spawnedEntries = new Map() + const dynamicRequestTree = pingRouteTreeForPPRDisabledRoute( + now, + route, + task.treeAtTimeOfPrefetch, + tree, + spawnedEntries + ) + const needsDynamicRequest = spawnedEntries.size > 0 + if (needsDynamicRequest) { + // Perform a dynamic prefetch request and populate the cache with + // the result + spawnPrefetchSubtask( + fetchSegmentPrefetchesForPPRDisabledRoute( + task, + route, + dynamicRequestTree, + spawnedEntries + ) + ) + } + return PrefetchTaskExitStatus.Done } - return pingRouteTree(now, task, route, tree) } default: { const _exhaustiveCheck: never = route @@ -326,18 +386,16 @@ function pingRouteTree( route: FulfilledRouteCacheEntry, tree: RouteTree ): PrefetchTaskExitStatus.InProgress | PrefetchTaskExitStatus.Done { + const segment = readOrCreateSegmentCacheEntry(now, route, tree.key) + pingSegment(route, segment, task.key, tree.key, tree.token) if (tree.slots !== null) { + if (!hasNetworkBandwidth()) { + // Stop prefetching segments until there's more bandwidth. + return PrefetchTaskExitStatus.InProgress + } // Recursively ping the children. for (const parallelRouteKey in tree.slots) { const childTree = tree.slots[parallelRouteKey] - const childKey = childTree.key - const childToken = childTree.token - const segment = readOrCreateSegmentCacheEntry(now, route, childKey) - pingSegment(route, segment, task.key, childKey, childToken) - if (!hasNetworkBandwidth()) { - // Stop prefetching segments until there's more bandwidth. - return PrefetchTaskExitStatus.InProgress - } const childExitStatus = pingRouteTree(now, task, route, childTree) if (childExitStatus === PrefetchTaskExitStatus.InProgress) { // Child yielded without finishing. @@ -349,21 +407,191 @@ function pingRouteTree( return PrefetchTaskExitStatus.Done } +function pingRouteTreeForPPRDisabledRoute( + now: number, + route: FulfilledRouteCacheEntry, + oldTree: FlightRouterState, + newTree: RouteTree, + spawnedEntries: Map +): FlightRouterState { + // This is a single recursive traversal that does multiple things: + // - Finds the parts of the target route (newTree) that are not part of + // of the current page (oldTree) by diffing them, using the same algorithm + // as a real navigation. + // - Constructs a request tree (FlightRouterState) that describes which + // segments need to be prefetched and which ones are already cached. + // - Creates a set of pending cache entries for the segments that need to + // be prefetched, so that a subsequent prefetch task does not request the + // same segments again. + const oldTreeChildren = oldTree[1] + const newTreeChildren = newTree.slots + let requestTreeChildren: Record = {} + if (newTreeChildren !== null) { + for (const parallelRouteKey in newTreeChildren) { + const newTreeChild = newTreeChildren[parallelRouteKey] + const newTreeChildSegment = newTreeChild.segment + const oldTreeChild: FlightRouterState | void = + oldTreeChildren[parallelRouteKey] + const oldTreeChildSegment: FlightRouterStateSegment | void = + oldTreeChild?.[0] + let requestTreeChild + if ( + oldTreeChildSegment !== undefined && + matchSegment(newTreeChildSegment, oldTreeChildSegment) + ) { + // This segment is already part of the current route. Keep traversing. + requestTreeChild = pingRouteTreeForPPRDisabledRoute( + now, + route, + oldTreeChild, + newTreeChild, + spawnedEntries + ) + } else { + // This segment is not part of the current route. We're entering a + // part of the tree that we need to prefetch (unless everything is + // already cached). + requestTreeChild = createDynamicRequestTreeForPartiallyCachedSegments( + now, + route, + newTreeChild, + null, + spawnedEntries + ) + } + requestTreeChildren[parallelRouteKey] = requestTreeChild + } + } + const requestTree: FlightRouterState = [ + newTree.segment, + requestTreeChildren, + null, + null, + newTree.isRootLayout, + ] + return requestTree +} + +function createDynamicRequestTreeForPartiallyCachedSegments( + now: number, + route: FulfilledRouteCacheEntry, + tree: RouteTree, + refetchMarkerContext: 'refetch' | 'inside-shared-layout' | null, + spawnedEntries: Map +): FlightRouterState { + // The tree we're constructing is the same shape as the tree we're navigating + // to — specifically, it's the subtree that isn't present in the previous + // route. But even though this is a "new" tree, some of the individual + // segments may be cached as a result of other route prefetches. + // + // So we need to find the first uncached segment along each path + // add an explicit "refetch" marker so the server knows where to start + // rendering. Once the server starts rendering along a path, it keeps + // rendering until it hits a loading boundary. We use `refetchMarkerContext` + // to represent the nearest parent marker. + + let refetchMarker: 'refetch' | 'inside-shared-layout' | null = + refetchMarkerContext === null ? 'inside-shared-layout' : null + + const segment = readOrCreateSegmentCacheEntry(now, route, tree.key) + switch (segment.status) { + case EntryStatus.Empty: { + // This segment is not cached. Add a refetch marker so the server knows + // to start rendering here. + // TODO: Instead of a "refetch" marker, we could just omit this subtree's + // FlightRouterState from the request tree. I think this would probably + // already work even without any updates to the server. For consistency, + // though, I'll send the full tree and we'll look into this later as part + // of a larger redesign of the request protocol. + // Add the pending cache entry to the result map. + segment.status = EntryStatus.Pending + spawnedEntries.set(tree.key, segment) + if (refetchMarkerContext !== 'refetch') { + refetchMarker = refetchMarkerContext = 'refetch' + } else { + // There's already a parent with a refetch marker, so we don't need + // to add another one. + } + break + } + case EntryStatus.Fulfilled: { + // The segment is already cached. + // TODO: The server should include a `hasLoading` field as part of the + // route tree prefetch. + if (segment.loading !== null) { + // This segment has a loading boundary, which means the server won't + // render its children. So there's nothing left to prefetch along this + // path. We can bail out. + return convertRouteTreeToFlightRouterState(tree) + } + break + } + case EntryStatus.Pending: { + // There's another prefetch currently in progress. Don't add the refetch + // marker yet, so the server knows it can skip rendering this segment. + break + } + case EntryStatus.Rejected: { + // The segment failed to load. We shouldn't issue another request until + // the stale time has elapsed. + break + } + default: + segment satisfies never + } + const requestTreeChildren: Record = {} + if (tree.slots !== null) { + for (const parallelRouteKey in tree.slots) { + const childTree = tree.slots[parallelRouteKey] + requestTreeChildren[parallelRouteKey] = + createDynamicRequestTreeForPartiallyCachedSegments( + now, + route, + childTree, + refetchMarkerContext, + spawnedEntries + ) + } + } + const requestTree: FlightRouterState = [ + tree.segment, + requestTreeChildren, + null, + refetchMarker, + tree.isRootLayout, + ] + return requestTree +} + function pingSegment( route: FulfilledRouteCacheEntry, segment: SegmentCacheEntry, routeKey: RouteCacheKey, segmentKey: string, - accessToken: string + accessToken: string | null ): void { if (segment.status === EntryStatus.Empty) { - // Segment is not yet cached, and there's no request already in progress. - // Spawn a task to request the segment and load it into the cache. - spawnPrefetchSubtask( - fetchSegmentOnCacheMiss(route, segment, routeKey, segmentKey, accessToken) - ) - // Upgrade to Pending so we know there's already a request in progress - segment.status = EntryStatus.Pending + if (accessToken === null) { + // We don't have an access token for this segment, which means we can't + // do a per-segment prefetch. This happens when the route tree was + // returned by a dynamic server response. Or if the server has decided + // not to grant access to this segment. + } else { + // Segment is not yet cached, and there's no request already in progress. + // Spawn a task to request the segment and load it into the cache. + + // Upgrade to Pending so we know there's already a request in progress + segment.status = EntryStatus.Pending + spawnPrefetchSubtask( + fetchSegmentOnCacheMiss( + route, + segment, + routeKey, + segmentKey, + accessToken + ) + ) + } } // Segments do not have dependent tasks, so once the prefetch is initiated, diff --git a/packages/next/src/server/app-render/types.ts b/packages/next/src/server/app-render/types.ts index 4e6c8c703d052..ab0a48ef1014b 100644 --- a/packages/next/src/server/app-render/types.ts +++ b/packages/next/src/server/app-render/types.ts @@ -46,7 +46,15 @@ export const flightRouterStateSchema: s.Describe = s.tuple([ s.lazy(() => flightRouterStateSchema) ), s.optional(s.nullable(s.string())), - s.optional(s.nullable(s.union([s.literal('refetch'), s.literal('refresh')]))), + s.optional( + s.nullable( + s.union([ + s.literal('refetch'), + s.literal('refresh'), + s.literal('inside-shared-layout'), + ]) + ) + ), s.optional(s.boolean()), ]) @@ -57,13 +65,32 @@ export type FlightRouterState = [ segment: Segment, parallelRoutes: { [parallelRouterKey: string]: FlightRouterState }, url?: string | null, - /* - /* "refresh" and "refetch", despite being similarly named, have different semantics. - * - "refetch" is a server indicator which informs where rendering should start from. - * - "refresh" is a client router indicator that it should re-fetch the data from the server for the current segment. - * It uses the "url" property above to determine where to fetch from. + /** + * "refresh" and "refetch", despite being similarly named, have different + * semantics: + * - "refetch" is used during a request to inform the server where rendering + * should start from. + * + * - "refresh" is used by the client to mark that a segment should re-fetch the + * data from the server for the current segment. It uses the "url" property + * above to determine where to fetch from. + * + * - "inside-shared-layout" is used during a prefetch request to inform the + * server that even if the segment matches, it should be treated as if it's + * within the "new" part of a navigation — inside the shared layout. If + * the segment doesn't match, then it has no effect, since it would be + * treated as new regardless. If it does match, though, the server does not + * need to render it, because the client already has it. + * + * A bit confusing, but that's because it has only one extremely narrow use + * case — during a non-PPR prefetch, the server uses it to find the first + * loading boundary beneath a shared layout. + * + * TODO: We should rethink the protocol for dynamic requests. It might not + * make sense for the client to send a FlightRouterState, since this type is + * overloaded with concerns. */ - refresh?: 'refetch' | 'refresh' | null, + refresh?: 'refetch' | 'refresh' | 'inside-shared-layout' | null, isRootLayout?: boolean, ] diff --git a/packages/next/src/server/app-render/walk-tree-with-flight-router-state.tsx b/packages/next/src/server/app-render/walk-tree-with-flight-router-state.tsx index 337c3d73f6a4a..ef4b3b884c8bf 100644 --- a/packages/next/src/server/app-render/walk-tree-with-flight-router-state.tsx +++ b/packages/next/src/server/app-render/walk-tree-with-flight-router-state.tsx @@ -30,6 +30,7 @@ export async function walkTreeWithFlightRouterState({ loaderTreeToFilter, parentParams, flightRouterState, + parentIsInsideSharedLayout, rscHead, injectedCSS, injectedJS, @@ -44,6 +45,7 @@ export async function walkTreeWithFlightRouterState({ parentParams: { [key: string]: string | string[] } flightRouterState?: FlightRouterState rscHead: HeadData + parentIsInsideSharedLayout?: boolean injectedCSS: Set injectedJS: Set injectedFontPreloadTags: Set @@ -110,7 +112,24 @@ export async function walkTreeWithFlightRouterState({ // to ensure prefetches are quick and inexpensive. If there's no `loading` component anywhere in the tree being rendered, // the prefetch will be short-circuited to avoid requesting a potentially very expensive subtree. If there's a `loading` // somewhere in the tree, we'll recursively render the component tree up until we encounter that loading component, and then stop. - const shouldSkipComponentTree = + + // Check if we're inside the "new" part of the navigation — inside the + // shared layout. In the case of a prefetch, this can be true even if the + // segment matches, because the client might send a matching segment to + // indicate that it already has the data in its cache. But in order to find + // the correct loading boundary, we still need to track where the shared + // layout begins. + // + // TODO: We should rethink the protocol for dynamic requests. It might not + // make sense for the client to send a FlightRouterState, since that type is + // overloaded with other concerns. + const isInsideSharedLayout = + renderComponentsOnThisLevel || + parentIsInsideSharedLayout || + flightRouterState[3] === 'inside-shared-layout' + + if ( + isInsideSharedLayout && !experimental.isRoutePPREnabled && // If PPR is disabled, and this is a request for the route tree, then we // never render any components. Only send the router state. @@ -119,10 +138,44 @@ export async function walkTreeWithFlightRouterState({ (isPrefetch && !Boolean(modules.loading) && !hasLoadingComponentInTree(loaderTreeToFilter))) + ) { + // Send only the router state. + // TODO: Even for a dynamic route, we should cache these responses, + // because they do not contain any render data (neither segment data nor + // the head). They can be made even more cacheable once we move the route + // params into a separate data structure. + const overriddenSegment = + flightRouterState && + // TODO: Why does canSegmentBeOverridden exist? Why don't we always just + // use `actualSegment`? Is it to avoid overwriting some state that's + // tracked by the client? Dig deeper to see if we can simplify this. + canSegmentBeOverridden(actualSegment, flightRouterState[0]) + ? flightRouterState[0] + : actualSegment + + const routerState = createFlightRouterStateFromLoaderTree( + // Create router state using the slice of the loaderTree + loaderTreeToFilter, + getDynamicParamFromSegment, + query + ) + return [ + [ + overriddenSegment, + routerState, + null, + [null, null], + false, + ] satisfies FlightDataSegment, + ] + } if (renderComponentsOnThisLevel) { const overriddenSegment = flightRouterState && + // TODO: Why does canSegmentBeOverridden exist? Why don't we always just + // use `actualSegment`? Is it to avoid overwriting some state that's + // tracked by the client? Dig deeper to see if we can simplify this. canSegmentBeOverridden(actualSegment, flightRouterState[0]) ? flightRouterState[0] : actualSegment @@ -133,52 +186,34 @@ export async function walkTreeWithFlightRouterState({ getDynamicParamFromSegment, query ) + // Create component tree using the slice of the loaderTree + const seedData = await createComponentTree( + // This ensures flightRouterPath is valid and filters down the tree + { + ctx, + loaderTree: loaderTreeToFilter, + parentParams: currentParams, + injectedCSS, + injectedJS, + injectedFontPreloadTags, + // This is intentionally not "rootLayoutIncludedAtThisLevelOrAbove" as createComponentTree starts at the current level and does a check for "rootLayoutAtThisLevel" too. + rootLayoutIncluded, + getViewportReady, + getMetadataReady, + preloadCallbacks, + authInterrupts: experimental.authInterrupts, + } + ) - if (shouldSkipComponentTree) { - // Send only the router state. - // TODO: Even for a dynamic route, we should cache these responses, - // because they do not contain any render data (neither segment data nor - // the head). They can be made even more cacheable once we move the route - // params into a separate data structure. - return [ - [ - overriddenSegment, - routerState, - null, - [null, null], - false, - ] satisfies FlightDataSegment, - ] - } else { - // Create component tree using the slice of the loaderTree - const seedData = await createComponentTree( - // This ensures flightRouterPath is valid and filters down the tree - { - ctx, - loaderTree: loaderTreeToFilter, - parentParams: currentParams, - injectedCSS, - injectedJS, - injectedFontPreloadTags, - // This is intentionally not "rootLayoutIncludedAtThisLevelOrAbove" as createComponentTree starts at the current level and does a check for "rootLayoutAtThisLevel" too. - rootLayoutIncluded, - getViewportReady, - getMetadataReady, - preloadCallbacks, - authInterrupts: experimental.authInterrupts, - } - ) - - return [ - [ - overriddenSegment, - routerState, - seedData, - rscHead, - false, - ] satisfies FlightDataSegment, - ] - } + return [ + [ + overriddenSegment, + routerState, + seedData, + rscHead, + false, + ] satisfies FlightDataSegment, + ] } // If we are not rendering on this level we need to check if the current @@ -217,6 +252,7 @@ export async function walkTreeWithFlightRouterState({ parentParams: currentParams, flightRouterState: flightRouterState && flightRouterState[1][parallelRouteKey], + parentIsInsideSharedLayout: isInsideSharedLayout, rscHead, injectedCSS: injectedCSSWithCurrentLayout, injectedJS: injectedJSWithCurrentLayout, diff --git a/test/e2e/app-dir/segment-cache/incremental-opt-in/app/page.tsx b/test/e2e/app-dir/segment-cache/incremental-opt-in/app/page.tsx index e790ac9652647..1eb3e966b6d5a 100644 --- a/test/e2e/app-dir/segment-cache/incremental-opt-in/app/page.tsx +++ b/test/e2e/app-dir/segment-cache/incremental-opt-in/app/page.tsx @@ -1,19 +1,64 @@ +'use client' + import Link from 'next/link' +import { useState } from 'react' + +function LinkAccordion({ href, children }) { + const [isVisible, setIsVisible] = useState(false) + return ( + <> + setIsVisible(!isVisible)} + data-link-accordion={href} + /> + {isVisible ? ( + {children} + ) : ( + `${children} (link is hidden)` + )} + + ) +} export default function Page() { return ( -
    -
  • - Page with PPR enabled -
  • -
  • - - Page with PPR enabled but has dynamic param - -
  • -
  • - Page with PPR disabled -
  • -
+ <> +

+ This page is used to test that if you prefetch a link multiple times, + the prefetches are deduped by the client cache (unless/until they become + stale). The e2e associated with this page works by toggling the + visibility of the links and checking whether any prefetch requests are + issued. +

+

+ You can test the behavior manually by opening up the network tab in the + browser's DevTools and seeing what happens when you toggle a Link's + visibility. +

+
    +
  • + + Page with PPR enabled + +
  • +
  • + + Page with PPR enabled but has dynamic param + +
  • +
  • + + Page with PPR disabled + +
  • +
  • + + Page with PPR disabled, but has a loading boundary + +
  • +
+ ) } diff --git a/test/e2e/app-dir/segment-cache/incremental-opt-in/app/ppr-disabled-with-loading-boundary/page.tsx b/test/e2e/app-dir/segment-cache/incremental-opt-in/app/ppr-disabled-with-loading-boundary/page.tsx index b92fb53239b5c..379aaf559743a 100644 --- a/test/e2e/app-dir/segment-cache/incremental-opt-in/app/ppr-disabled-with-loading-boundary/page.tsx +++ b/test/e2e/app-dir/segment-cache/incremental-opt-in/app/ppr-disabled-with-loading-boundary/page.tsx @@ -2,5 +2,5 @@ import { connection } from 'next/server' export default async function PPRDisabledWithLoadingBoundary() { await connection() - return 'Dynamic Content' + return
Page content
} diff --git a/test/e2e/app-dir/segment-cache/incremental-opt-in/app/ppr-disabled/page.tsx b/test/e2e/app-dir/segment-cache/incremental-opt-in/app/ppr-disabled/page.tsx index 18ad01678962b..71adef20a785a 100644 --- a/test/e2e/app-dir/segment-cache/incremental-opt-in/app/ppr-disabled/page.tsx +++ b/test/e2e/app-dir/segment-cache/incremental-opt-in/app/ppr-disabled/page.tsx @@ -3,7 +3,7 @@ import { connection } from 'next/server' async function Content() { await connection() - return 'Dynamic Content' + return
Page content
} export default function PPRDisabled() { diff --git a/test/e2e/app-dir/segment-cache/incremental-opt-in/app/ppr-enabled/[dynamic-param]/page.tsx b/test/e2e/app-dir/segment-cache/incremental-opt-in/app/ppr-enabled/[dynamic-param]/page.tsx index 3a6a095d3c787..20f9d2f245e60 100644 --- a/test/e2e/app-dir/segment-cache/incremental-opt-in/app/ppr-enabled/[dynamic-param]/page.tsx +++ b/test/e2e/app-dir/segment-cache/incremental-opt-in/app/ppr-enabled/[dynamic-param]/page.tsx @@ -1,3 +1,3 @@ export default function Page() { - return '(intentionally empty)' + return
Page content
} diff --git a/test/e2e/app-dir/segment-cache/incremental-opt-in/app/ppr-enabled/page.tsx b/test/e2e/app-dir/segment-cache/incremental-opt-in/app/ppr-enabled/page.tsx index 270751dc9a6d6..b531e1eee30b6 100644 --- a/test/e2e/app-dir/segment-cache/incremental-opt-in/app/ppr-enabled/page.tsx +++ b/test/e2e/app-dir/segment-cache/incremental-opt-in/app/ppr-enabled/page.tsx @@ -1,3 +1,3 @@ export default function PPREnabled() { - return '(intentionally empty)' + return
Page content
} diff --git a/test/e2e/app-dir/segment-cache/incremental-opt-in/segment-cache-incremental-opt-in.test.ts b/test/e2e/app-dir/segment-cache/incremental-opt-in/segment-cache-incremental-opt-in.test.ts index 498174004fa76..4e5445320877c 100644 --- a/test/e2e/app-dir/segment-cache/incremental-opt-in/segment-cache-incremental-opt-in.test.ts +++ b/test/e2e/app-dir/segment-cache/incremental-opt-in/segment-cache-incremental-opt-in.test.ts @@ -1,4 +1,5 @@ import { nextTestSetup } from 'e2e-utils' +import type * as Playwright from 'playwright' describe('segment cache (incremental opt in)', () => { const { next, isNextDev, skipped } = nextTestSetup({ @@ -10,58 +11,185 @@ describe('segment cache (incremental opt in)', () => { return } - function extractPseudoJSONFromFlightResponse(flightText: string) { - // This is a cheat that takes advantage of the fact that the roots of the - // Flight responses in this test are JSON. This is just a temporary smoke test - // until the client part is implemented; we shouldn't rely on this as a - // general testing strategy. - const match = flightText.match(/^0:(.*)$/m) - if (match) { - return JSON.parse(match[1]) - } - return null - } + async function testPrefetchDeduping(linkHref) { + // This e2e test app is designed to verify that if you prefetch a link + // multiple times, the prefetches are deduped by the client cache + // (unless/until they become stale). It works by toggling the visibility of + // the links and checking whether any prefetch requests are issued. + // + // Throughout the duration of the test, we collect all the prefetch requests + // that occur. Then at the end we confirm there are no duplicates. + const prefetches = new Map() + const duplicatePrefetches = new Map() - // TODO: Replace with e2e test once the client part is implemented - it('route tree prefetch falls through to old prefetching implementation if PPR is disabled for a route', async () => { - await next.browser('/') - const response = await next.fetch('/ppr-disabled', { - headers: { - RSC: '1', - 'Next-Router-Prefetch': '1', - 'Next-Router-Segment-Prefetch': '/_tree', + const interceptor = createRequestInterceptor() + const browser = await next.browser('/', { + beforePageLoad(page: Playwright.Page) { + page.route('**/*', async (route: Playwright.Route) => { + const prefetchInfo = await interceptor.checkPrefetch(route) + if (prefetchInfo) { + const key = JSON.stringify(prefetchInfo) + if (prefetches.has(key)) { + duplicatePrefetches.set(key, prefetchInfo) + } else { + prefetches.set(key, prefetchInfo) + } + } + await interceptor.interceptRoute(page, route) + }) }, }) - expect(response.status).toBe(200) + // Each link on the test page has a checkbox that controls its visibility. + // It starts off as hidden. + const checkbox = await browser.elementByCss( + `input[data-link-accordion="${linkHref}"]` + ) + // Confirm the checkbox is not checked + expect(await checkbox.isChecked()).toBe(false) - // Smoke test to confirm that this returned a NavigationFlightResponse. - expect(response.headers.get('x-nextjs-postponed')).toBe(null) - const flightText = await response.text() - const result = extractPseudoJSONFromFlightResponse(flightText) - expect(typeof result.b === 'string').toBe(true) - }) - - // TODO: Replace with e2e test once the client part is implemented - it('route tree prefetch does not include any component data even if loading.tsx is defined', async () => { - await next.browser('/') - const response = await next.fetch('/ppr-disabled-with-loading-boundary', { - headers: { - RSC: '1', - 'Next-Router-Prefetch': '1', - 'Next-Router-Segment-Prefetch': '/_tree', - }, + // Click the checkbox to reveal the link and trigger a prefetch + await interceptor.waitForPrefetches(async () => { + await checkbox.click() + await browser.elementByCss(`a[href="${linkHref}"]`) }) - expect(response.status).toBe(200) - expect(response.headers.get('x-nextjs-postponed')).toBe(null) - - // Usually when PPR is disabled, a prefetch to a route that has a - // loading.tsx boundary will include component data in the response, up to - // the first loading boundary. But since this is specifically a prefetch - // of the route tree, it should skip all the component data and only return - // the router state. - const flightText = await response.text() - // Confirm that the response does not include any component data by checking - // for the absence of the loading component. - expect(flightText).not.toContain('Loading...') + + // Toggle the visibility of the link. Prefetches are initiated on viewport, + // so if the cache does not dedupe then properly, this test will detect it. + await checkbox.click() // hide + await checkbox.click() // show + const link = await browser.elementByCss(`a[href="${linkHref}"]`) + + // Navigate to the target link + await link.click() + + // Confirm the navigation happened + await browser.elementById('page-content') + expect(new URL(await browser.url()).pathname).toBe(linkHref) + + // Finally, assert there were no duplicate prefetches + expect(duplicatePrefetches.size).toBe(0) + } + + describe('multiple prefetches to same link are deduped', () => { + it('page with PPR enabled', () => testPrefetchDeduping('/ppr-enabled')) + it('page with PPR enabled, and has a dynamic param', () => + testPrefetchDeduping('/ppr-enabled/dynamic-param')) + it('page with PPR disabled', () => testPrefetchDeduping('/ppr-disabled')) + it('page with PPR disabled, and has a loading boundary', () => + testPrefetchDeduping('/ppr-disabled-with-loading-boundary')) }) }) + +function createRequestInterceptor() { + // Test utility for intercepting internal RSC requests so we can control the + // timing of when they resolve. We want to avoid relying on internals and + // implementation details as much as possible, so the only thing this does + // for now is let you block and release requests from happening based on + // their type (prefetch requests, navigation requests). + let prefetchesPromise: PromiseWithResolvers = null + let lastPrefetchRequest: Playwright.Request | null = null + + async function checkPrefetch(route: Playwright.Route): Promise<{ + href: string + segment: string | null + base: string | null + } | null> { + const request = route.request() + const requestHeaders = await request.allHeaders() + if ( + requestHeaders['RSC'.toLowerCase()] && + requestHeaders['Next-Router-Prefetch'.toLowerCase()] + ) { + return { + href: new URL(request.url()).pathname, + segment: requestHeaders['Next-Router-Segment-Prefetch'.toLowerCase()], + base: requestHeaders['Next-Router-State-Tree'.toLowerCase()] ?? null, + } + } + return null + } + + return { + checkPrefetch, + + /** + * Waits for the next for the next prefetch request, then keeps waiting + * until the prefetch queue is empty (to account for network throttling). + * + * If no prefetches are initiated, this will timeout. + */ + async waitForPrefetches( + scope: () => Promise | T = (): undefined => {} + ): Promise { + if (prefetchesPromise === null) { + let resolve + let reject + const promise: Promise = new Promise((res, rej) => { + resolve = res + reject = rej + }) + prefetchesPromise = { + resolve, + reject, + promise, + } + } + const result = await scope() + if (prefetchesPromise !== null) { + await prefetchesPromise.promise + } + return result + }, + + async interceptRoute(page: Playwright.Page, route: Playwright.Route) { + const request = route.request() + const requestHeaders = await request.allHeaders() + + if (requestHeaders['RSC'.toLowerCase()]) { + // This is an RSC request. Check if it's a prefetch or a navigation. + if (requestHeaders['Next-Router-Prefetch'.toLowerCase()]) { + // This is a prefetch request. + if (prefetchesPromise !== null) { + // Wait for the prefetch response to finish, then wait an additional + // async task for additional prefetches to be initiated. + lastPrefetchRequest = request + const waitForMorePrefetches = async () => { + const inBrowserResponse = await request.response() + await inBrowserResponse.finished() + + await page.evaluate( + () => + // If the prefetch queue is network throttled, the next + // request should be issued within a microtask of the previous + // one finishing. + new Promise((res) => requestIdleCallback(() => res())) + ) + if (request === lastPrefetchRequest) { + // No further prefetches were initiated. Assume the prefetch + // queue is now empty. + prefetchesPromise.resolve() + prefetchesPromise = null + lastPrefetchRequest = null + } + } + const response = await page.request.fetch(route.request()) + const responseText = await response.text() + await route.fulfill({ + body: responseText, + headers: response.headers(), + }) + waitForMorePrefetches().then( + () => {}, + () => {} + ) + return + } + } else { + // This is a navigation request. + } + } + + await route.continue() + }, + } +}