Skip to content

Commit

Permalink
[Segment Cache] Add CacheStatus.Empty
Browse files Browse the repository at this point in the history
This is a small refactor to allow creating a cache empty entry without
also triggering a server request. Currently these are combined into the
same phase, because there's no case where one operation happens
without the other.

However, I need to implement additional prefetching strategies. For
example, sometimes a segment's data will already be available as part of
a different server response.

To support this, I've split the Pending CacheStatus into two
separate fields:

- Empty: The cache entry has no data, and there's no pending request to
  fetch it.
- Pending: The cache entry has no data, and there _is_ a pending
  request to fetch it.

This is a refactor only, so there should be no change to
external behavior.
  • Loading branch information
acdlite committed Dec 12, 2024
1 parent 546f022 commit 2882abf
Show file tree
Hide file tree
Showing 3 changed files with 110 additions and 62 deletions.
74 changes: 27 additions & 47 deletions packages/next/src/client/components/segment-cache/cache.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import {
trackPrefetchRequestBandwidth,
pingPrefetchTask,
type PrefetchTask,
spawnPrefetchSubtask,
} from './scheduler'
import { getAppBuildId } from '../../app-build-id'
import { createHrefFromUrl } from '../router-reducer/create-href-from-url'
Expand Down Expand Up @@ -69,14 +68,20 @@ type RouteCacheEntryShared = {
size: number
}

/**
* Tracks the status of a cache entry as it progresses from no data (Empty),
* waiting for server data (Pending), and finished (either Fulfilled or
* Rejected depending on the response from the server.
*/
export const enum EntryStatus {
Empty,
Pending,
Rejected,
Fulfilled,
}

type PendingRouteCacheEntry = RouteCacheEntryShared & {
status: EntryStatus.Pending
status: EntryStatus.Empty | EntryStatus.Pending
blockedTasks: Set<PrefetchTask> | null
canonicalUrl: null
tree: null
Expand Down Expand Up @@ -118,7 +123,7 @@ type SegmentCacheEntryShared = {
}

type PendingSegmentCacheEntry = SegmentCacheEntryShared & {
status: EntryStatus.Pending
status: EntryStatus.Empty | EntryStatus.Pending
rsc: null
loading: null
isPartial: true
Expand Down Expand Up @@ -257,41 +262,29 @@ export function waitForSegmentCacheEntry(
}

/**
* Reads the route cache for a matching entry *and* spawns a request if there's
* no match. Because this may issue a network request, it should only be called
* from within the context of a prefetch task.
* Checks if an entry for a route exists in the cache. If so, it returns the
* entry, If not, it adds an empty entry to the cache and returns it.
*/
export function requestRouteCacheEntryFromCache(
export function readOrCreateRouteCacheEntry(
now: number,
task: PrefetchTask
): RouteCacheEntry {
const key = task.key
// First check if there's a non-intercepted entry. Most routes cannot be
// intercepted, so this is the common case.
const nonInterceptedEntry = readExactRouteCacheEntry(now, key.href, null)
if (nonInterceptedEntry !== null && !nonInterceptedEntry.couldBeIntercepted) {
// Found a match, and the route cannot be intercepted. We can reuse it.
return nonInterceptedEntry
}
// There was no match. Check again but include the Next-Url this time.
const exactEntry = readExactRouteCacheEntry(now, key.href, key.nextUrl)
if (exactEntry !== null) {
return exactEntry
const existingEntry = readRouteCacheEntry(now, key)
if (existingEntry !== null) {
return existingEntry
}
// Create a pending entry and spawn a request for its data.
// Create a pending entry and add it to the cache.
const pendingEntry: PendingRouteCacheEntry = {
canonicalUrl: null,
status: EntryStatus.Pending,
status: EntryStatus.Empty,
blockedTasks: null,
tree: null,
head: null,
isHeadPartial: true,
// If the request takes longer than a minute, a subsequent request should
// retry instead of waiting for this one.
//
// When the response is received, this value will be replaced by a new value
// based on the stale time sent from the server.
staleAt: now + 60 * 1000,
// Since this is an empty entry, there's no reason to ever evict it. It will
// be updated when the data is populated.
staleAt: Infinity,
// This is initialized to true because we don't know yet whether the route
// could be intercepted. It's only set to false once we receive a response
// from the server.
Expand All @@ -303,7 +296,6 @@ export function requestRouteCacheEntryFromCache(
prev: null,
size: 0,
}
spawnPrefetchSubtask(fetchRouteOnCacheMiss(pendingEntry, task))
const keypath: Prefix<RouteCacheKeypath> =
key.nextUrl === null ? [key.href] : [key.href, key.nextUrl]
routeCacheMap.set(keypath, pendingEntry)
Expand All @@ -315,24 +307,21 @@ export function requestRouteCacheEntryFromCache(
}

/**
* Reads the route cache for a matching entry *and* spawns a request if there's
* no match. Because this may issue a network request, it should only be called
* from within the context of a prefetch task.
* Checks if an entry for a segment exists in the cache. If so, it returns the
* entry, If not, it adds an empty entry to the cache and returns it.
*/
export function requestSegmentEntryFromCache(
export function readOrCreateSegmentCacheEntry(
now: number,
task: PrefetchTask,
route: FulfilledRouteCacheEntry,
path: string,
accessToken: string
path: string
): SegmentCacheEntry {
const existingEntry = readSegmentCacheEntry(now, path)
if (existingEntry !== null) {
return existingEntry
}
// Create a pending entry and spawn a request for its data.
// Create a pending entry and add it to the cache.
const pendingEntry: PendingSegmentCacheEntry = {
status: EntryStatus.Pending,
status: EntryStatus.Empty,
rsc: null,
loading: null,
staleAt: route.staleAt,
Expand All @@ -345,15 +334,6 @@ export function requestSegmentEntryFromCache(
prev: null,
size: 0,
}
spawnPrefetchSubtask(
fetchSegmentEntryOnCacheMiss(
route,
pendingEntry,
task.key,
path,
accessToken
)
)
segmentCacheMap.set(path, pendingEntry)
// Stash the keypath on the entry so we know how to remove it from the map
// if it gets evicted from the LRU.
Expand Down Expand Up @@ -488,7 +468,7 @@ function rejectSegmentCacheEntry(
}
}

async function fetchRouteOnCacheMiss(
export async function fetchRouteOnCacheMiss(
entry: PendingRouteCacheEntry,
task: PrefetchTask
): Promise<void> {
Expand Down Expand Up @@ -589,7 +569,7 @@ async function fetchRouteOnCacheMiss(
}
}

async function fetchSegmentEntryOnCacheMiss(
export async function fetchSegmentOnCacheMiss(
route: FulfilledRouteCacheEntry,
segmentCacheEntry: PendingSegmentCacheEntry,
routeKey: RouteCacheKey,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,7 @@ function readRenderSnapshotFromCache(
isPartial = segmentEntry.isPartial
break
}
case EntryStatus.Empty:
case EntryStatus.Pending: {
// We haven't received data for this segment yet, but there's already
// an in-progress request. Since it's extremely likely to arrive
Expand Down
97 changes: 82 additions & 15 deletions packages/next/src/client/components/segment-cache/scheduler.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
import type { TreePrefetch } from '../../../server/app-render/collect-segment-data'
import {
requestRouteCacheEntryFromCache,
requestSegmentEntryFromCache,
readOrCreateRouteCacheEntry,
readOrCreateSegmentCacheEntry,
fetchRouteOnCacheMiss,
fetchSegmentOnCacheMiss,
EntryStatus,
type FulfilledRouteCacheEntry,
type RouteCacheEntry,
type SegmentCacheEntry,
} from './cache'
import type { RouteCacheKey } from './cache-key'

Expand Down Expand Up @@ -169,7 +172,7 @@ export function trackPrefetchRequestBandwidth(

const noop = () => {}

export function spawnPrefetchSubtask(promise: Promise<any>) {
function spawnPrefetchSubtask(promise: Promise<any>) {
// When the scheduler spawns an async task, we don't await its result
// directly. Instead, the async task writes its result directly into the
// cache, then pings the scheduler to continue.
Expand Down Expand Up @@ -214,8 +217,8 @@ function processQueueInMicrotask() {
// Process the task queue until we run out of network bandwidth.
let task = heapPeek(taskHeap)
while (task !== null && hasNetworkBandwidth()) {
const route = requestRouteCacheEntryFromCache(now, task)
const exitStatus = pingRouteTree(now, task, route)
const route = readOrCreateRouteCacheEntry(now, task)
const exitStatus = pingRootRouteTree(now, task, route)
switch (exitStatus) {
case PrefetchTaskExitStatus.InProgress:
// The task yielded because there are too many requests in progress.
Expand Down Expand Up @@ -243,15 +246,45 @@ function processQueueInMicrotask() {
}
}

function pingRouteTree(
function pingRootRouteTree(
now: number,
task: PrefetchTask,
route: RouteCacheEntry
): PrefetchTaskExitStatus {
switch (route.status) {
case EntryStatus.Empty: {
// Route is not yet cached, and there's no request already in progress.
// Spawn a task to request the route, load it into the cache, and ping
// the task to continue.

// TODO: There are multiple strategies in the <Link> API for prefetching
// a route. Currently we've only implemented the main one: per-segment,
// static-data only.
//
// There's also <Link prefetch={true}> which prefetches both static *and*
// dynamic data. Similarly, we need to fallback to the old, per-page
// behavior if PPR is disabled for a route (via the incremental opt-in).
//
// Those cases will be handled here.
spawnPrefetchSubtask(fetchRouteOnCacheMiss(route, task))

// If the request takes longer than a minute, a subsequent request should
// retry instead of waiting for this one. When the response is received,
// this value will be replaced by a new value based on the stale time sent
// from the server.
// TODO: We should probably also manually abort the fetch task, to reclaim
// server bandwidth.
route.staleAt = now + 60 * 1000

// Upgrade to Pending so we know there's already a request in progress
route.status = EntryStatus.Pending

// Intentional fallthrough to the Pending branch
}
case EntryStatus.Pending: {
// Still pending. We can't start prefetching the segments until the route
// tree has loaded.
// tree has loaded. Add the task to the set of blocked tasks so that it
// is notified when the route tree is ready.
const blockedTasks = route.blockedTasks
if (blockedTasks === null) {
route.blockedTasks = new Set([task])
Expand All @@ -271,8 +304,14 @@ function pingRouteTree(
return PrefetchTaskExitStatus.InProgress
}
const tree = route.tree
requestSegmentEntryFromCache(now, task, route, tree.path, '')
return pingSegmentTree(now, task, route, tree)
const segmentPath = tree.path
const segment = readOrCreateSegmentCacheEntry(now, route, segmentPath)
pingSegment(route, segment, task.key, tree.path, tree.token)
if (!hasNetworkBandwidth()) {
// Stop prefetching segments until there's more bandwidth.
return PrefetchTaskExitStatus.InProgress
}
return pingRouteTree(now, task, route, tree)
}
default: {
const _exhaustiveCheck: never = route
Expand All @@ -281,7 +320,7 @@ function pingRouteTree(
}
}

function pingSegmentTree(
function pingRouteTree(
now: number,
task: PrefetchTask,
route: FulfilledRouteCacheEntry,
Expand All @@ -291,15 +330,15 @@ function pingSegmentTree(
// Recursively ping the children.
for (const parallelRouteKey in tree.slots) {
const childTree = tree.slots[parallelRouteKey]
const childPath = childTree.path
const childToken = childTree.token
const segment = readOrCreateSegmentCacheEntry(now, route, childPath)
pingSegment(route, segment, task.key, childPath, childToken)
if (!hasNetworkBandwidth()) {
// Stop prefetching segments until there's more bandwidth.
return PrefetchTaskExitStatus.InProgress
} else {
const childPath = childTree.path
const childToken = childTree.token
requestSegmentEntryFromCache(now, task, route, childPath, childToken)
}
const childExitStatus = pingSegmentTree(now, task, route, childTree)
const childExitStatus = pingRouteTree(now, task, route, childTree)
if (childExitStatus === PrefetchTaskExitStatus.InProgress) {
// Child yielded without finishing.
return PrefetchTaskExitStatus.InProgress
Expand All @@ -310,6 +349,34 @@ function pingSegmentTree(
return PrefetchTaskExitStatus.Done
}

function pingSegment(
route: FulfilledRouteCacheEntry,
segment: SegmentCacheEntry,
routeKey: RouteCacheKey,
segmentPath: string,
accessToken: string
): void {
if (segment.status === EntryStatus.Empty) {
// Segment is not yet cached, and there's no request already in progress.
// Spawn a task to request the segment and load it into the cache.
spawnPrefetchSubtask(
fetchSegmentOnCacheMiss(
route,
segment,
routeKey,
segmentPath,
accessToken
)
)
// Upgrade to Pending so we know there's already a request in progress
segment.status = EntryStatus.Pending
}

// Segments do not have dependent tasks, so once the prefetch is initiated,
// there's nothing else for us to do (except write the server data into the
// entry, which is handled by `fetchSegmentOnCacheMiss`).
}

// -----------------------------------------------------------------------------
// The remainider of the module is a MinHeap implementation. Try not to put any
// logic below here unless it's related to the heap algorithm. We can extract
Expand Down

0 comments on commit 2882abf

Please sign in to comment.