diff --git a/config/gni/devtools_grd_files.gni b/config/gni/devtools_grd_files.gni index 6560bf96aeb..8cb509d49eb 100644 --- a/config/gni/devtools_grd_files.gni +++ b/config/gni/devtools_grd_files.gni @@ -491,6 +491,7 @@ grd_files_release_sources = [ "front_end/models/trace/handlers/handlers.js", "front_end/models/trace/helpers/helpers.js", "front_end/models/trace/insights/insights.js", + "front_end/models/trace/lantern/lantern.js", "front_end/models/trace/root-causes/root-causes.js", "front_end/models/trace/trace.js", "front_end/models/trace/types/types.js", @@ -975,6 +976,7 @@ grd_files_debug_sources = [ "front_end/models/timeline_model/TimelineProfileTree.js", "front_end/models/timeline_model/TracingLayerTree.js", "front_end/models/trace/EntriesFilter.js", + "front_end/models/trace/LanternComputationData.js", "front_end/models/trace/ModelImpl.js", "front_end/models/trace/Processor.js", "front_end/models/trace/TracingManager.js", @@ -1024,6 +1026,29 @@ grd_files_debug_sources = [ "front_end/models/trace/insights/RenderBlocking.js", "front_end/models/trace/insights/Viewport.js", "front_end/models/trace/insights/types.js", + "front_end/models/trace/lantern/BaseNode.js", + "front_end/models/trace/lantern/CpuNode.js", + "front_end/models/trace/lantern/LanternError.js", + "front_end/models/trace/lantern/Metric.js", + "front_end/models/trace/lantern/NetworkNode.js", + "front_end/models/trace/lantern/PageDependencyGraph.js", + "front_end/models/trace/lantern/TBTUtils.js", + "front_end/models/trace/lantern/metrics/FirstContentfulPaint.js", + "front_end/models/trace/lantern/metrics/Interactive.js", + "front_end/models/trace/lantern/metrics/LargestContentfulPaint.js", + "front_end/models/trace/lantern/metrics/MaxPotentialFID.js", + "front_end/models/trace/lantern/metrics/SpeedIndex.js", + "front_end/models/trace/lantern/metrics/TotalBlockingTime.js", + "front_end/models/trace/lantern/metrics/metrics.js", + "front_end/models/trace/lantern/simulation/ConnectionPool.js", + "front_end/models/trace/lantern/simulation/Constants.js", + "front_end/models/trace/lantern/simulation/DNSCache.js", + "front_end/models/trace/lantern/simulation/NetworkAnalyzer.js", + "front_end/models/trace/lantern/simulation/SimulationTimingMap.js", + "front_end/models/trace/lantern/simulation/Simulator.js", + "front_end/models/trace/lantern/simulation/TcpConnection.js", + "front_end/models/trace/lantern/simulation/simulation.js", + "front_end/models/trace/lantern/types/lantern.js", "front_end/models/trace/root-causes/LayoutShift.js", "front_end/models/trace/root-causes/RootCauses.js", "front_end/models/trace/types/Configuration.js", diff --git a/front_end/BUILD.gn b/front_end/BUILD.gn index b54918bf887..bfcc78873fe 100644 --- a/front_end/BUILD.gn +++ b/front_end/BUILD.gn @@ -132,6 +132,7 @@ group("unittests") { "models/trace/handlers:unittests", "models/trace/helpers:unittests", "models/trace/insights:unittests", + "models/trace/lantern:unittests", "models/trace/root-causes:unittests", "models/trace/types:unittests", "models/workspace:unittests", diff --git a/front_end/models/trace/BUILD.gn b/front_end/models/trace/BUILD.gn index 01689ba9215..5f1370ffb28 100644 --- a/front_end/models/trace/BUILD.gn +++ b/front_end/models/trace/BUILD.gn @@ -10,6 +10,7 @@ import("../visibility.gni") devtools_module("trace") { sources = [ "EntriesFilter.ts", + "LanternComputationData.ts", "ModelImpl.ts", "Processor.ts", "TracingManager.ts", @@ -23,6 +24,7 @@ devtools_module("trace") { "handlers:bundle", "helpers:bundle", "insights:bundle", + "lantern:bundle", "root-causes:bundle", "types:bundle", ] @@ -47,7 +49,6 @@ devtools_entrypoint("bundle") { "../../services/trace_bounds/*", "../../services/tracing/*", "../../testing/*", - "../../testing/*", "../../ui/components/docs/*", "../../ui/legacy/components/utils/*", "../timeline_model/*", diff --git a/front_end/models/trace/LanternComputationData.ts b/front_end/models/trace/LanternComputationData.ts new file mode 100644 index 00000000000..c7a052422d5 --- /dev/null +++ b/front_end/models/trace/LanternComputationData.ts @@ -0,0 +1,435 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as Protocol from '../../generated/protocol.js'; + +import {MetricName} from './handlers/PageLoadMetricsHandler.js'; +import {type TraceParseData} from './handlers/types.js'; +import * as Lantern from './lantern/lantern.js'; +import {type MicroSeconds} from './types/Timing.js'; +import {type TraceEvents} from './types/types.js'; + +type NetworkRequest = Lantern.NetworkRequest; + +function createProcessedNavigation(traceEngineData: TraceParseData): Lantern.Simulation.ProcessedNavigation { + const Meta = traceEngineData.Meta; + const frameId = Meta.mainFrameId; + const scoresByNav = traceEngineData.PageLoadMetrics.metricScoresByFrameId.get(frameId); + if (!scoresByNav) { + throw new Error('missing metric scores for main frame'); + } + + const lastNavigationId = Meta.mainFrameNavigations.at(-1)?.args.data?.navigationId; + const scores = lastNavigationId && scoresByNav.get(lastNavigationId); + if (!scores) { + throw new Error('missing metric scores for specified navigation'); + } + + const getTimestampOrUndefined = (metric: MetricName): MicroSeconds|undefined => { + const metricScore = scores.get(metric); + if (!metricScore?.event) { + return; + } + return metricScore.event.ts; + }; + const getTimestamp = (metric: MetricName): MicroSeconds => { + const metricScore = scores.get(metric); + if (!metricScore?.event) { + throw new Error(`missing metric: ${metric}`); + } + return metricScore.event.ts; + }; + return { + timestamps: { + firstContentfulPaint: getTimestamp(MetricName.FCP), + largestContentfulPaint: getTimestampOrUndefined(MetricName.LCP), + }, + }; +} + +function createParsedUrl(url: URL|string): Lantern.ParsedURL { + if (typeof url === 'string') { + url = new URL(url); + } + return { + scheme: url.protocol.split(':')[0], + // Intentional, DevTools uses different terminology + host: url.hostname, + securityOrigin: url.origin, + }; +} + +/** + * Returns a map of `pid` -> `tid[]`. + */ +function findWorkerThreads(trace: Lantern.Trace): Map { + // TODO: WorkersHandler in TraceEngine needs to be updated to also include `pid` (only had `tid`). + const workerThreads = new Map(); + const workerCreationEvents = ['ServiceWorker thread', 'DedicatedWorker thread']; + + for (const event of trace.traceEvents) { + if (event.name !== 'thread_name' || !event.args.name) { + continue; + } + if (!workerCreationEvents.includes(event.args.name)) { + continue; + } + + const tids = workerThreads.get(event.pid); + if (tids) { + tids.push(event.tid); + } else { + workerThreads.set(event.pid, [event.tid]); + } + } + + return workerThreads; +} + +function createLanternRequest( + traceEngineData: TraceParseData, workerThreads: Map, + request: TraceEvents.SyntheticNetworkRequest): NetworkRequest|undefined { + if (request.args.data.connectionId === undefined || request.args.data.connectionReused === undefined) { + throw new Error('Trace is too old'); + } + + let url; + try { + url = new URL(request.args.data.url); + } catch (e) { + return; + } + + const timing = request.args.data.timing ? { + // These two timings are not included in the trace. + workerFetchStart: -1, + workerRespondWithSettled: -1, + ...request.args.data.timing, + } : + undefined; + + const networkRequestTime = timing ? timing.requestTime * 1000 : request.args.data.syntheticData.downloadStart / 1000; + + let fromWorker = false; + const tids = workerThreads.get(request.pid); + if (tids?.includes(request.tid)) { + fromWorker = true; + } + + // TraceEngine collects worker thread ids in a different manner than `workerThreads` does. + // AFAIK these should be equivalent, but in case they are not let's also check this for now. + if (traceEngineData.Workers.workerIdByThread.has(request.tid)) { + fromWorker = true; + } + + // `initiator` in the trace does not contain the stack trace for JS-initiated + // requests. Instead, that is stored in the `stackTrace` property of the SyntheticNetworkRequest. + // There are some minor differences in the fields, accounted for here. + // Most importantly, there seems to be fewer frames in the trace than the equivalent + // events over the CDP. This results in less accuracy in determining the initiator request, + // which means less edges in the graph, which mean worse results. + // TODO: Should fix in Chromium. + const initiator: Lantern.NetworkRequest['initiator'] = + request.args.data.initiator ?? {type: Protocol.Network.InitiatorType.Other}; + if (request.args.data.stackTrace) { + const callFrames = request.args.data.stackTrace.map(f => { + return { + scriptId: String(f.scriptId) as Protocol.Runtime.ScriptId, + url: f.url, + lineNumber: f.lineNumber - 1, + columnNumber: f.columnNumber - 1, + functionName: f.functionName, + }; + }); + initiator.stack = {callFrames}; + // Note: there is no `parent` to set ... + } + + let resourceType = request.args.data.resourceType; + if (request.args.data.initiator?.fetchType === 'xmlhttprequest') { + // @ts-expect-error yes XHR is a valid ResourceType. TypeScript const enums are so unhelpful. + resourceType = 'XHR'; + } else if (request.args.data.initiator?.fetchType === 'fetch') { + // @ts-expect-error yes Fetch is a valid ResourceType. TypeScript const enums are so unhelpful. + resourceType = 'Fetch'; + } + + // TODO: set decodedBodyLength for data urls in Trace Engine. + let resourceSize = request.args.data.decodedBodyLength ?? 0; + if (url.protocol === 'data:' && resourceSize === 0) { + const needle = 'base64,'; + const index = url.pathname.indexOf(needle); + if (index !== -1) { + resourceSize = atob(url.pathname.substring(index + needle.length)).length; + } + } + + return { + rawRequest: request, + requestId: request.args.data.requestId, + connectionId: request.args.data.connectionId, + connectionReused: request.args.data.connectionReused, + url: request.args.data.url, + protocol: request.args.data.protocol, + parsedURL: createParsedUrl(url), + documentURL: request.args.data.requestingFrameUrl, + rendererStartTime: request.ts / 1000, + networkRequestTime, + responseHeadersEndTime: request.args.data.syntheticData.downloadStart / 1000, + networkEndTime: request.args.data.syntheticData.finishTime / 1000, + transferSize: request.args.data.encodedDataLength, + resourceSize, + fromDiskCache: request.args.data.syntheticData.isDiskCached, + fromMemoryCache: request.args.data.syntheticData.isMemoryCached, + isLinkPreload: request.args.data.isLinkPreload, + finished: request.args.data.finished, + failed: request.args.data.failed, + statusCode: request.args.data.statusCode, + initiator, + timing, + resourceType, + mimeType: request.args.data.mimeType, + priority: request.args.data.priority, + frameId: request.args.data.frame, + fromWorker, + // Set later. + redirects: undefined, + redirectSource: undefined, + redirectDestination: undefined, + initiatorRequest: undefined, + }; +} + +/** + * @param request The request to find the initiator of + */ +function chooseInitiatorRequest(request: Lantern.NetworkRequest, requestsByURL: Map): + Lantern.NetworkRequest|null { + if (request.redirectSource) { + return request.redirectSource; + } + + const initiatorURL = Lantern.PageDependencyGraph.getNetworkInitiators(request)[0]; + let candidates = requestsByURL.get(initiatorURL) || []; + // The (valid) initiator must come before the initiated request. + candidates = candidates.filter(c => { + return c.responseHeadersEndTime <= request.rendererStartTime && c.finished && !c.failed; + }); + if (candidates.length > 1) { + // Disambiguate based on prefetch. Prefetch requests have type 'Other' and cannot + // initiate requests, so we drop them here. + const nonPrefetchCandidates = candidates.filter(cand => cand.resourceType !== Lantern.NetworkRequestTypes.Other); + if (nonPrefetchCandidates.length) { + candidates = nonPrefetchCandidates; + } + } + if (candidates.length > 1) { + // Disambiguate based on frame. It's likely that the initiator comes from the same frame. + const sameFrameCandidates = candidates.filter(cand => cand.frameId === request.frameId); + if (sameFrameCandidates.length) { + candidates = sameFrameCandidates; + } + } + if (candidates.length > 1 && request.initiator.type === 'parser') { + // Filter to just Documents when initiator type is parser. + const documentCandidates = candidates.filter(cand => cand.resourceType === Lantern.NetworkRequestTypes.Document); + if (documentCandidates.length) { + candidates = documentCandidates; + } + } + if (candidates.length > 1) { + // If all real loads came from successful preloads (url preloaded and + // loads came from the cache), filter to link rel=preload request(s). + const linkPreloadCandidates = candidates.filter(c => c.isLinkPreload); + if (linkPreloadCandidates.length) { + const nonPreloadCandidates = candidates.filter(c => !c.isLinkPreload); + const allPreloaded = nonPreloadCandidates.every(c => c.fromDiskCache || c.fromMemoryCache); + if (nonPreloadCandidates.length && allPreloaded) { + candidates = linkPreloadCandidates; + } + } + } + + // Only return an initiator if the result is unambiguous. + return candidates.length === 1 ? candidates[0] : null; +} + +function linkInitiators(lanternRequests: Lantern.NetworkRequest[]): void { + const requestsByURL: Map = new Map(); + for (const request of lanternRequests) { + const requests = requestsByURL.get(request.url) || []; + requests.push(request); + requestsByURL.set(request.url, requests); + } + + for (const request of lanternRequests) { + const initiatorRequest = chooseInitiatorRequest(request, requestsByURL); + if (initiatorRequest) { + request.initiatorRequest = initiatorRequest; + } + } +} + +function createNetworkRequests(trace: Lantern.Trace, traceEngineData: TraceParseData): Lantern.NetworkRequest[] { + const workerThreads = findWorkerThreads(trace); + + const lanternRequests: NetworkRequest[] = []; + for (const request of traceEngineData.NetworkRequests.byTime) { + const lanternRequest = createLanternRequest(traceEngineData, workerThreads, request); + if (lanternRequest) { + lanternRequests.push(lanternRequest); + } + } + + // TraceEngine consolidates all redirects into a single request object, but lantern needs + // an entry for each redirected request. + for (const request of [...lanternRequests]) { + if (!request.rawRequest) { + continue; + } + + const redirects = request.rawRequest.args.data.redirects; + if (!redirects.length) { + continue; + } + + const requestChain = []; + for (const redirect of redirects) { + const redirectedRequest = structuredClone(request); + + redirectedRequest.networkRequestTime = redirect.ts / 1000; + redirectedRequest.rendererStartTime = redirectedRequest.networkRequestTime; + + redirectedRequest.networkEndTime = (redirect.ts + redirect.dur) / 1000; + redirectedRequest.responseHeadersEndTime = redirectedRequest.networkEndTime; + + redirectedRequest.timing = { + requestTime: redirectedRequest.networkRequestTime / 1000, + receiveHeadersStart: redirectedRequest.responseHeadersEndTime, + receiveHeadersEnd: redirectedRequest.responseHeadersEndTime, + proxyStart: -1, + proxyEnd: -1, + dnsStart: -1, + dnsEnd: -1, + connectStart: -1, + connectEnd: -1, + sslStart: -1, + sslEnd: -1, + sendStart: -1, + sendEnd: -1, + workerStart: -1, + workerReady: -1, + workerFetchStart: -1, + workerRespondWithSettled: -1, + pushStart: -1, + pushEnd: -1, + }; + + redirectedRequest.url = redirect.url; + redirectedRequest.parsedURL = createParsedUrl(redirect.url); + // TODO: TraceEngine is not retaining the actual status code. + redirectedRequest.statusCode = 302; + redirectedRequest.resourceType = undefined; + // TODO: TraceEngine is not retaining transfer size of redirected request. + redirectedRequest.transferSize = 400; + requestChain.push(redirectedRequest); + lanternRequests.push(redirectedRequest); + } + requestChain.push(request); + + for (let i = 0; i < requestChain.length; i++) { + const request = requestChain[i]; + if (i > 0) { + request.redirectSource = requestChain[i - 1]; + request.redirects = requestChain.slice(0, i); + } + if (i !== requestChain.length - 1) { + request.redirectDestination = requestChain[i + 1]; + } + } + + // Apply the `:redirect` requestId convention: only redirects[0].requestId is the actual + // requestId, all the rest have n occurences of `:redirect` as a suffix. + for (let i = 1; i < requestChain.length; i++) { + requestChain[i].requestId = `${requestChain[i - 1].requestId}:redirect`; + } + } + + linkInitiators(lanternRequests); + + // This would already be sorted by rendererStartTime, if not for the redirect unwrapping done + // above. + return lanternRequests.sort((a, b) => a.rendererStartTime - b.rendererStartTime); +} + +function collectMainThreadEvents(trace: Lantern.Trace, traceEngineData: TraceParseData): Lantern.TraceEvent[] { + const Meta = traceEngineData.Meta; + const mainFramePids = Meta.mainFrameNavigations.length ? new Set(Meta.mainFrameNavigations.map(nav => nav.pid)) : + Meta.topLevelRendererIds; + + const rendererPidToTid = new Map(); + for (const pid of mainFramePids) { + const threads = Meta.threadsInProcess.get(pid) ?? []; + + let found = false; + for (const [tid, thread] of threads) { + if (thread.args.name === 'CrRendererMain') { + rendererPidToTid.set(pid, tid); + found = true; + break; + } + } + + if (found) { + continue; + } + + // `CrRendererMain` can be missing if chrome is launched with the `--single-process` flag. + // In this case, page tasks will be run in the browser thread. + for (const [tid, thread] of threads) { + if (thread.args.name === 'CrBrowserMain') { + rendererPidToTid.set(pid, tid); + found = true; + break; + } + } + } + + return trace.traceEvents.filter(e => rendererPidToTid.get(e.pid) === e.tid); +} + +function createGraph( + requests: Lantern.NetworkRequest[], trace: Lantern.Trace, traceEngineData: TraceParseData, + url?: Lantern.Simulation.URL): Lantern.Node { + const mainThreadEvents = collectMainThreadEvents(trace, traceEngineData); + + // url defines the initial request that the Lantern graph starts at (the root node) and the + // main document request. These are equal if there are no redirects. + if (!url) { + url = { + requestedUrl: requests[0].url, + mainDocumentUrl: '', + }; + + let request = requests[0]; + while (request.redirectDestination) { + request = request.redirectDestination; + } + url.mainDocumentUrl = request.url; + } + + return Lantern.PageDependencyGraph.createGraph(mainThreadEvents, requests, url); +} + +export { + createProcessedNavigation, + createNetworkRequests, + createGraph, +}; diff --git a/front_end/models/trace/lantern/.eslintrc.js b/front_end/models/trace/lantern/.eslintrc.js new file mode 100644 index 00000000000..acd946f6f52 --- /dev/null +++ b/front_end/models/trace/lantern/.eslintrc.js @@ -0,0 +1,18 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +const path = require('path'); +const rulesDirPlugin = require('eslint-plugin-rulesdir'); +rulesDirPlugin.RULES_DIR = path.join(__dirname, '..', 'scripts', 'eslint_rules', 'lib'); + +module.exports = { + 'overrides' : [{ + 'files' : ['*.ts'], + 'rules' : { + '@typescript-eslint/no-unused-vars' : ['error', {'argsIgnorePattern' : '^_'}], + // TODO(crbug.com/348449529): off due to Lantern needing more refactoring. + 'rulesdir/no_underscored_properties' : 'off', + } + }] +}; diff --git a/front_end/models/trace/lantern/BUILD.gn b/front_end/models/trace/lantern/BUILD.gn new file mode 100644 index 00000000000..d80d7a9a3fc --- /dev/null +++ b/front_end/models/trace/lantern/BUILD.gn @@ -0,0 +1,74 @@ +# Copyright 2024 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("../../../../scripts/build/ninja/devtools_entrypoint.gni") +import("../../../../scripts/build/ninja/devtools_module.gni") +import("../../../../third_party/typescript/typescript.gni") +import("../../visibility.gni") + +devtools_module("lantern") { + sources = [ + "BaseNode.ts", + "CpuNode.ts", + "LanternError.ts", + "Metric.ts", + "NetworkNode.ts", + "PageDependencyGraph.ts", + "TBTUtils.ts", + "metrics/FirstContentfulPaint.ts", + "metrics/Interactive.ts", + "metrics/LargestContentfulPaint.ts", + "metrics/MaxPotentialFID.ts", + "metrics/SpeedIndex.ts", + "metrics/TotalBlockingTime.ts", + "metrics/metrics.ts", + "simulation/ConnectionPool.ts", + "simulation/Constants.ts", + "simulation/DNSCache.ts", + "simulation/NetworkAnalyzer.ts", + "simulation/SimulationTimingMap.ts", + "simulation/Simulator.ts", + "simulation/TcpConnection.ts", + "simulation/simulation.ts", + "types/lantern.ts", + ] + + deps = [ "../types:bundle" ] +} + +devtools_entrypoint("bundle") { + entrypoint = "lantern.ts" + deps = [ ":lantern" ] + visibility = [ + ":*", + "../*", + ] + + visibility += devtools_models_visibility +} + +ts_library("unittests") { + testonly = true + + sources = [ + "BaseNode.test.ts", + "PageDependencyGraph.test.ts", + "TBTUtils.test.ts", + "metrics/FirstContentfulPaint.test.ts", + "metrics/Interactive.test.ts", + "metrics/LargestContentfulPaint.test.ts", + "metrics/SpeedIndex.test.ts", + "simulation/ConnectionPool.test.ts", + "simulation/DNSCache.test.ts", + "simulation/NetworkAnalyzer.test.ts", + "simulation/Simulator.test.ts", + "simulation/TCPConnection.test.ts", + "testing/MetricTestUtils.ts", + ] + + deps = [ + "../:bundle", + "../../../testing", + ] +} diff --git a/front_end/models/trace/lantern/BaseNode.test.ts b/front_end/models/trace/lantern/BaseNode.test.ts new file mode 100644 index 00000000000..fa0418f178e --- /dev/null +++ b/front_end/models/trace/lantern/BaseNode.test.ts @@ -0,0 +1,390 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2017 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// @ts-nocheck TODO(crbug.com/348449529) + +import * as Lantern from './lantern.js'; + +const {BaseNode, NetworkNode} = Lantern; + +function sortedById(nodeArray: Lantern.Node[]) { + return nodeArray.sort((node1, node2) => node1.id.localeCompare(node2.id)); +} + +function createComplexGraph() { + // B F + // / \ / + // A D - E + // \ / \ + // C G - H + + const nodeA = new BaseNode('A'); + const nodeB = new BaseNode('B'); + const nodeC = new BaseNode('C'); + const nodeD = new BaseNode('D'); + const nodeE = new BaseNode('E'); + const nodeF = new BaseNode('F'); + const nodeG = new BaseNode('G'); + const nodeH = new BaseNode('H'); + + nodeA.addDependent(nodeB); + nodeA.addDependent(nodeC); + nodeB.addDependent(nodeD); + nodeC.addDependent(nodeD); + nodeD.addDependent(nodeE); + nodeE.addDependent(nodeF); + nodeE.addDependent(nodeG); + nodeG.addDependent(nodeH); + + return { + nodeA, + nodeB, + nodeC, + nodeD, + nodeE, + nodeF, + nodeG, + nodeH, + }; +} + +describe('BaseNode', () => { + describe('#constructor', () => { + it('should set the ID', () => { + const node = new BaseNode('foo'); + assert.strictEqual(node.id, 'foo'); + }); + }); + + describe('.addDependent', () => { + it('should add the correct edge', () => { + const nodeA = new BaseNode('1'); + const nodeB = new BaseNode('2'); + nodeA.addDependent(nodeB); + + assert.deepEqual(nodeA.getDependents(), [nodeB]); + assert.deepEqual(nodeB.getDependencies(), [nodeA]); + }); + }); + + describe('.addDependency', () => { + it('should add the correct edge', () => { + const nodeA = new BaseNode('1'); + const nodeB = new BaseNode('2'); + nodeA.addDependency(nodeB); + + assert.deepEqual(nodeA.getDependencies(), [nodeB]); + assert.deepEqual(nodeB.getDependents(), [nodeA]); + }); + + it('throw when trying to add a dependency on itself', () => { + const nodeA = new BaseNode('1'); + expect(() => nodeA.addDependency(nodeA)).to.throw(); + }); + }); + + describe('.isDependentOn', () => { + it('should identify the dependency relationships', () => { + const graph = createComplexGraph(); + const nodes = Object.values(graph); + const {nodeA, nodeB, nodeD, nodeF, nodeH} = graph; + + for (const node of nodes) { + expect(nodeA.isDependentOn(node)).equals(node === nodeA); + expect(nodeB.isDependentOn(node)).equals(node === nodeA || node === nodeB); + expect(nodeH.isDependentOn(node)).equals(node !== nodeF); + } + + expect(nodeD.isDependentOn(nodeA)).equals(true); + expect(nodeD.isDependentOn(nodeB)).equals(true); + expect(nodeD.isDependentOn(nodeD)).equals(true); + + expect(nodeD.isDependentOn(nodeH)).equals(false); + expect(nodeH.isDependentOn(nodeD)).equals(true); + + expect(nodeF.isDependentOn(nodeH)).equals(false); + expect(nodeH.isDependentOn(nodeF)).equals(false); + }); + }); + + describe('.getRootNode', () => { + it('should return the root node', () => { + const graph = createComplexGraph(); + + assert.strictEqual(graph.nodeA.getRootNode(), graph.nodeA); + assert.strictEqual(graph.nodeB.getRootNode(), graph.nodeA); + assert.strictEqual(graph.nodeD.getRootNode(), graph.nodeA); + assert.strictEqual(graph.nodeF.getRootNode(), graph.nodeA); + }); + }); + + describe('.cloneWithoutRelationships', () => { + it('should create a copy', () => { + const node = new BaseNode('1'); + const neighbor = new BaseNode('2'); + node.addDependency(neighbor); + const clone = node.cloneWithoutRelationships(); + + assert.strictEqual(clone.id, '1'); + assert.notEqual(node, clone); + assert.strictEqual(clone.getDependencies().length, 0); + }); + + it('should copy isMainDocument', () => { + const node = new BaseNode('1'); + node.setIsMainDocument(true); + const networkNode = new NetworkNode({}); + networkNode.setIsMainDocument(true); + + assert.ok(node.cloneWithoutRelationships().isMainDocument()); + assert.ok(networkNode.cloneWithoutRelationships().isMainDocument()); + }); + }); + + describe('.cloneWithRelationships', () => { + it('should create a copy of a basic graph', () => { + const node = new BaseNode('1'); + const neighbor = new BaseNode('2'); + node.addDependency(neighbor); + const clone = node.cloneWithRelationships(); + + assert.strictEqual(clone.id, '1'); + assert.notEqual(node, clone); + + const dependencies = clone.getDependencies(); + assert.strictEqual(dependencies.length, 1); + + const neighborClone = dependencies[0]; + assert.strictEqual(neighborClone.id, neighbor.id); + assert.notEqual(neighborClone, neighbor); + assert.strictEqual(neighborClone.getDependents()[0], clone); + }); + + it('should create a copy of a complex graph', () => { + const graph = createComplexGraph(); + const clone = graph.nodeA.cloneWithRelationships(); + + const clonedIdMap = new Map(); + clone.traverse(node => clonedIdMap.set(node.id, node)); + assert.strictEqual(clonedIdMap.size, 8); + + graph.nodeA.traverse(node => { + const clone = clonedIdMap.get(node.id); + assert.strictEqual(clone.id, node.id); + assert.notEqual(clone, node); + + const actualDependents = sortedById(clone.getDependents()); + const expectedDependents = sortedById(node.getDependents()); + actualDependents.forEach((cloneDependent, index) => { + const originalDependent = expectedDependents[index]; + assert.strictEqual(cloneDependent.id, originalDependent.id); + assert.notEqual(cloneDependent, originalDependent); + }); + }); + }); + + it('should create a copy of a graph with long dependency chains', () => { + // C - D - E - F + // / \ + // A - - - - - - - B + const nodeA = new BaseNode('A'); + const nodeB = new BaseNode('B'); + const nodeC = new BaseNode('C'); + const nodeD = new BaseNode('D'); + const nodeE = new BaseNode('E'); + const nodeF = new BaseNode('F'); + + nodeA.addDependent(nodeB); + nodeF.addDependent(nodeB); + + nodeA.addDependent(nodeC); + nodeC.addDependent(nodeD); + nodeD.addDependent(nodeE); + nodeE.addDependent(nodeF); + + const clone = nodeA.cloneWithRelationships(); + + const clonedIdMap = new Map(); + clone.traverse(node => clonedIdMap.set(node.id, node)); + assert.strictEqual(clonedIdMap.size, 6); + }); + + it('should create a copy when not starting at root node', () => { + const graph = createComplexGraph(); + const cloneD = graph.nodeD.cloneWithRelationships(); + assert.strictEqual(cloneD.id, 'D'); + assert.strictEqual(cloneD.getRootNode().id, 'A'); + }); + + it('should create a partial copy of a complex graph', () => { + const graph = createComplexGraph(); + // create a clone with F and all its dependencies + const clone = graph.nodeA.cloneWithRelationships(node => node.id === 'F'); + + const clonedIdMap = new Map(); + clone.traverse(node => clonedIdMap.set(node.id, node)); + + assert.strictEqual(clonedIdMap.size, 6); + assert.ok(clonedIdMap.has('F'), 'did not include target node'); + assert.ok(clonedIdMap.has('E'), 'did not include dependency'); + assert.ok(clonedIdMap.has('B'), 'did not include branched dependency'); + assert.ok(clonedIdMap.has('C'), 'did not include branched dependency'); + assert.strictEqual(clonedIdMap.get('G'), undefined); + assert.strictEqual(clonedIdMap.get('H'), undefined); + }); + + it('should throw if original node is not in cloned graph', () => { + const graph = createComplexGraph(); + assert.throws( + // clone from root to nodeB, but called on nodeD + _ => graph.nodeD.cloneWithRelationships(node => node.id === 'B'), + /^Cloned graph missing node$/, + ); + }); + }); + + describe('.traverse', () => { + it('should visit every dependent node', () => { + const graph = createComplexGraph(); + const ids = []; + graph.nodeA.traverse(node => ids.push(node.id)); + + assert.deepEqual(ids, ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']); + }); + + it('should include a shortest traversal path to every dependent node', () => { + const graph = createComplexGraph(); + const paths = []; + graph.nodeA.traverse((node, traversalPath) => { + assert.strictEqual(node.id, traversalPath[0].id); + paths.push(traversalPath.map(node => node.id)); + }); + + assert.deepStrictEqual(paths, [ + ['A'], + ['B', 'A'], + ['C', 'A'], + ['D', 'B', 'A'], + ['E', 'D', 'B', 'A'], + ['F', 'E', 'D', 'B', 'A'], + ['G', 'E', 'D', 'B', 'A'], + ['H', 'G', 'E', 'D', 'B', 'A'], + ]); + }); + + it('should respect getNext', () => { + const graph = createComplexGraph(); + const ids = []; + graph.nodeF.traverse( + node => ids.push(node.id), + node => node.getDependencies(), + ); + + assert.deepEqual(ids, ['F', 'E', 'D', 'B', 'C', 'A']); + }); + }); + + describe('#hasCycle', () => { + it('should return false for DAGs', () => { + const graph = createComplexGraph(); + assert.strictEqual(BaseNode.hasCycle(graph.nodeA), false); + }); + + it('should return false for triangular DAGs', () => { + // B + // / \ + // A - C + const nodeA = new BaseNode('A'); + const nodeB = new BaseNode('B'); + const nodeC = new BaseNode('C'); + + nodeA.addDependent(nodeC); + nodeA.addDependent(nodeB); + nodeB.addDependent(nodeC); + + assert.strictEqual(BaseNode.hasCycle(nodeA), false); + }); + + it('should return true for basic cycles', () => { + // A - B - C - A! + const nodeA = new BaseNode('A'); + const nodeB = new BaseNode('B'); + const nodeC = new BaseNode('C'); + + nodeA.addDependent(nodeB); + nodeB.addDependent(nodeC); + nodeC.addDependent(nodeA); + + assert.strictEqual(BaseNode.hasCycle(nodeA), true); + }); + + it('should return true for children', () => { + // A! + // / + // A - B - C + const nodeA = new BaseNode('A'); + const nodeB = new BaseNode('B'); + const nodeC = new BaseNode('C'); + + nodeA.addDependent(nodeB); + nodeB.addDependent(nodeC); + nodeB.addDependent(nodeA); + + assert.strictEqual(BaseNode.hasCycle(nodeC), true); + }); + + it('should return true for complex cycles', () => { + // B - D - F - G - C! + // / / + // A - - C - E - H + const nodeA = new BaseNode('A'); + const nodeB = new BaseNode('B'); + const nodeC = new BaseNode('C'); + const nodeD = new BaseNode('D'); + const nodeE = new BaseNode('E'); + const nodeF = new BaseNode('F'); + const nodeG = new BaseNode('G'); + const nodeH = new BaseNode('H'); + + nodeA.addDependent(nodeB); + nodeA.addDependent(nodeC); + nodeB.addDependent(nodeD); + nodeC.addDependent(nodeE); + nodeC.addDependent(nodeF); + nodeD.addDependent(nodeF); + nodeE.addDependent(nodeH); + nodeF.addDependent(nodeG); + nodeG.addDependent(nodeC); + + assert.strictEqual(BaseNode.hasCycle(nodeA), true); + assert.strictEqual(BaseNode.hasCycle(nodeB), true); + assert.strictEqual(BaseNode.hasCycle(nodeC), true); + assert.strictEqual(BaseNode.hasCycle(nodeD), true); + assert.strictEqual(BaseNode.hasCycle(nodeE), true); + assert.strictEqual(BaseNode.hasCycle(nodeF), true); + assert.strictEqual(BaseNode.hasCycle(nodeG), true); + assert.strictEqual(BaseNode.hasCycle(nodeH), true); + }); + + it('works for very large graphs', () => { + const root = new BaseNode('root'); + + let lastNode = root; + for (let i = 0; i < 10000; i++) { + const nextNode = new BaseNode(`child${i}`); + lastNode.addDependent(nextNode); + lastNode = nextNode; + } + + lastNode.addDependent(root); + assert.strictEqual(BaseNode.hasCycle(root), true); + }); + }); +}); diff --git a/front_end/models/trace/lantern/BaseNode.ts b/front_end/models/trace/lantern/BaseNode.ts new file mode 100644 index 00000000000..8c97a2eb247 --- /dev/null +++ b/front_end/models/trace/lantern/BaseNode.ts @@ -0,0 +1,339 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import {type CPUNode} from './CpuNode.js'; +import {type NetworkNode} from './NetworkNode.js'; +import type * as Lantern from './types/lantern.js'; + +/** + * @license + * Copyright 2017 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * A union of all types derived from BaseNode, allowing type check discrimination + * based on `node.type`. If a new node type is created, it should be added here. + */ +export type Node = CPUNode|NetworkNode; + +/** + * @fileoverview This class encapsulates logic for handling resources and tasks used to model the + * execution dependency graph of the page. A node has a unique identifier and can depend on other + * nodes/be depended on. The construction of the graph maintains some important invariants that are + * inherent to the model: + * + * 1. The graph is a DAG, there are no cycles. + * 2. There is always a root node upon which all other nodes eventually depend. + * + * This allows particular optimizations in this class so that we do no need to check for cycles as + * these methods are called and we can always start traversal at the root node. + */ + +class BaseNode { + static types = { + NETWORK: 'network', + CPU: 'cpu', + } as const; + + _id: string; + _isMainDocument: boolean; + _dependents: Node[]; + _dependencies: Node[]; + + constructor(id: string) { + this._id = id; + this._isMainDocument = false; + this._dependents = []; + this._dependencies = []; + } + + get id(): string { + return this._id; + } + + get type(): 'network'|'cpu' { + throw new Error('Unimplemented'); + } + + /** + * In microseconds + */ + get startTime(): number { + throw new Error('Unimplemented'); + } + + /** + * In microseconds + */ + get endTime(): number { + throw new Error('Unimplemented'); + } + + setIsMainDocument(value: boolean): void { + this._isMainDocument = value; + } + + isMainDocument(): boolean { + return this._isMainDocument; + } + + getDependents(): Node[] { + return this._dependents.slice(); + } + + getNumberOfDependents(): number { + return this._dependents.length; + } + + getDependencies(): Node[] { + return this._dependencies.slice(); + } + + getNumberOfDependencies(): number { + return this._dependencies.length; + } + + getRootNode(): Node { + let rootNode = this as BaseNode as Node; + while (rootNode._dependencies.length) { + rootNode = rootNode._dependencies[0]; + } + + return rootNode; + } + + addDependent(node: Node): void { + node.addDependency(this as BaseNode as Node); + } + + addDependency(node: Node): void { + // @ts-expect-error - in checkJs, ts doesn't know that CPUNode and NetworkNode *are* BaseNodes. + if (node === this) { + throw new Error('Cannot add dependency on itself'); + } + + if (this._dependencies.includes(node)) { + return; + } + + node._dependents.push(this as BaseNode as Node); + this._dependencies.push(node); + } + + removeDependent(node: Node): void { + node.removeDependency(this as BaseNode as Node); + } + + removeDependency(node: Node): void { + if (!this._dependencies.includes(node)) { + return; + } + + const thisIndex = node._dependents.indexOf(this as BaseNode as Node); + node._dependents.splice(thisIndex, 1); + this._dependencies.splice(this._dependencies.indexOf(node), 1); + } + + removeAllDependencies(): void { + for (const node of this._dependencies.slice()) { + this.removeDependency(node); + } + } + + /** + * Computes whether the given node is anywhere in the dependency graph of this node. + * While this method can prevent cycles, it walks the graph and should be used sparingly. + * Nodes are always considered dependent on themselves for the purposes of cycle detection. + */ + isDependentOn(node: BaseNode): boolean { + let isDependentOnNode = false; + this.traverse( + currentNode => { + if (isDependentOnNode) { + return; + } + isDependentOnNode = currentNode === node; + }, + currentNode => { + // If we've already found the dependency, don't traverse further. + if (isDependentOnNode) { + return []; + } + // Otherwise, traverse the dependencies. + return currentNode.getDependencies(); + }); + + return isDependentOnNode; + } + + /** + * Clones the node's information without adding any dependencies/dependents. + */ + cloneWithoutRelationships(): Node { + const node = new BaseNode(this.id) as Node; + node.setIsMainDocument(this._isMainDocument); + return node; + } + + /** + * Clones the entire graph connected to this node filtered by the optional predicate. If a node is + * included by the predicate, all nodes along the paths between the node and the root will be included. If the + * node this was called on is not included in the resulting filtered graph, the method will throw. + */ + cloneWithRelationships(predicate?: (arg0: Node) => boolean): Node { + const rootNode = this.getRootNode(); + + const idsToIncludedClones = new Map(); + + // Walk down dependents. + rootNode.traverse(node => { + if (idsToIncludedClones.has(node.id)) { + return; + } + + if (predicate === undefined) { + // No condition for entry, so clone every node. + idsToIncludedClones.set(node.id, node.cloneWithoutRelationships()); + return; + } + + if (predicate(node)) { + // Node included, so walk back up dependencies, cloning nodes from here back to the root. + node.traverse( + node => idsToIncludedClones.set(node.id, node.cloneWithoutRelationships()), + // Dependencies already cloned have already cloned ancestors, so no need to visit again. + node => node._dependencies.filter(parent => !idsToIncludedClones.has(parent.id)), + ); + } + }); + + // Copy dependencies between nodes. + rootNode.traverse(originalNode => { + const clonedNode = idsToIncludedClones.get(originalNode.id); + if (!clonedNode) { + return; + } + + for (const dependency of originalNode._dependencies) { + const clonedDependency = idsToIncludedClones.get(dependency.id); + if (!clonedDependency) { + throw new Error('Dependency somehow not cloned'); + } + clonedNode.addDependency(clonedDependency); + } + }); + + const clonedThisNode = idsToIncludedClones.get(this.id); + if (!clonedThisNode) { + throw new Error('Cloned graph missing node'); + } + return clonedThisNode; + } + + /** + * Traverses all connected nodes in BFS order, calling `callback` exactly once + * on each. `traversalPath` is the shortest (though not necessarily unique) + * path from `node` to the root of the iteration. + * + * The `getNextNodes` function takes a visited node and returns which nodes to + * visit next. It defaults to returning the node's dependents. + */ + traverse(callback: (node: Node, traversalPath: Node[]) => void, getNextNodes?: (arg0: Node) => Node[]): + void { + for (const {node, traversalPath} of this.traverseGenerator(getNextNodes)) { + callback(node, traversalPath); + } + } + + /** + * @see BaseNode.traverse + */ + * + traverseGenerator(getNextNodes?: (arg0: Node) => Node[]): + Generator<{node: Node, traversalPath: Node[]}, void, unknown> { + if (!getNextNodes) { + getNextNodes = node => node.getDependents(); + } + + // @ts-expect-error - only traverses graphs of Node, so force tsc to treat `this` as one + const queue: Node[][] = [[this]]; + const visited = new Set([this.id]); + + while (queue.length) { + // @ts-expect-error - queue has length so it's guaranteed to have an item + const traversalPath: Node[] = queue.shift(); + const node = traversalPath[0]; + yield {node, traversalPath}; + + for (const nextNode of getNextNodes(node)) { + if (visited.has(nextNode.id)) { + continue; + } + visited.add(nextNode.id); + + queue.push([nextNode, ...traversalPath]); + } + } + } + + /** + * Returns whether the given node has a cycle in its dependent graph by performing a DFS. + */ + static hasCycle(node: Node, direction: 'dependents'|'dependencies'|'both' = 'both'): boolean { + // Checking 'both' is the default entrypoint to recursively check both directions + if (direction === 'both') { + return BaseNode.hasCycle(node, 'dependents') || BaseNode.hasCycle(node, 'dependencies'); + } + + const visited = new Set(); + const currentPath: BaseNode[] = []; + const toVisit = [node]; + const depthAdded = new Map([[node, 0]]); + + // Keep going while we have nodes to visit in the stack + while (toVisit.length) { + // Get the last node in the stack (DFS uses stack, not queue) + // @ts-expect-error - toVisit has length so it's guaranteed to have an item + const currentNode: BaseNode = toVisit.pop(); + + // We've hit a cycle if the node we're visiting is in our current dependency path + if (currentPath.includes(currentNode)) { + return true; + } + // If we've already visited the node, no need to revisit it + if (visited.has(currentNode)) { + continue; + } + + // Since we're visiting this node, clear out any nodes in our path that we had to backtrack + // @ts-expect-error + while (currentPath.length > depthAdded.get(currentNode)) { + currentPath.pop(); + } + + // Update our data structures to reflect that we're adding this node to our path + visited.add(currentNode); + currentPath.push(currentNode); + + // Add all of its dependents to our toVisit stack + const nodesToExplore = direction === 'dependents' ? currentNode._dependents : currentNode._dependencies; + for (const nextNode of nodesToExplore) { + if (toVisit.includes(nextNode)) { + continue; + } + toVisit.push(nextNode); + depthAdded.set(nextNode, currentPath.length); + } + } + + return false; + } + + canDependOn(node: Node): boolean { + return node.startTime <= this.startTime; + } +} + +export {BaseNode}; diff --git a/front_end/models/trace/lantern/CpuNode.ts b/front_end/models/trace/lantern/CpuNode.ts new file mode 100644 index 00000000000..dfc7e8e62bb --- /dev/null +++ b/front_end/models/trace/lantern/CpuNode.ts @@ -0,0 +1,85 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2017 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {BaseNode} from './BaseNode.js'; +import type * as Lantern from './types/lantern.js'; + +class CPUNode extends BaseNode { + _event: Lantern.TraceEvent; + _childEvents: Lantern.TraceEvent[]; + _correctedEndTs: number|undefined; + + constructor(parentEvent: Lantern.TraceEvent, childEvents: Lantern.TraceEvent[] = [], correctedEndTs?: number) { + const nodeId = `${parentEvent.tid}.${parentEvent.ts}`; + super(nodeId); + + this._event = parentEvent; + this._childEvents = childEvents; + this._correctedEndTs = correctedEndTs; + } + + override get type(): 'cpu' { + return BaseNode.types.CPU; + } + + override get startTime(): number { + return this._event.ts; + } + + override get endTime(): number { + if (this._correctedEndTs) { + return this._correctedEndTs; + } + return this._event.ts + this._event.dur; + } + + get duration(): number { + return this.endTime - this.startTime; + } + + get event(): Lantern.TraceEvent { + return this._event; + } + + get childEvents(): Lantern.TraceEvent[] { + return this._childEvents; + } + + /** + * Returns true if this node contains a Layout task. + */ + didPerformLayout(): boolean { + return this._childEvents.some(evt => evt.name === 'Layout'); + } + + /** + * Returns the script URLs that had their EvaluateScript events occur in this task. + */ + getEvaluateScriptURLs(): Set { + const urls = new Set(); + for (const event of this._childEvents) { + if (event.name !== 'EvaluateScript') { + continue; + } + if (!event.args.data || !event.args.data.url) { + continue; + } + urls.add(event.args.data.url); + } + + return urls; + } + + override cloneWithoutRelationships(): CPUNode { + return new CPUNode(this._event, this._childEvents, this._correctedEndTs); + } +} + +export {CPUNode}; diff --git a/front_end/models/trace/lantern/LanternError.ts b/front_end/models/trace/lantern/LanternError.ts new file mode 100644 index 00000000000..7911d2f4049 --- /dev/null +++ b/front_end/models/trace/lantern/LanternError.ts @@ -0,0 +1,13 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +class LanternError extends Error {} + +export {LanternError}; diff --git a/front_end/models/trace/lantern/Metric.ts b/front_end/models/trace/lantern/Metric.ts new file mode 100644 index 00000000000..d658517046b --- /dev/null +++ b/front_end/models/trace/lantern/Metric.ts @@ -0,0 +1,114 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {BaseNode, type Node} from './BaseNode.js'; +import {type NetworkNode} from './NetworkNode.js'; +import type * as Lantern from './types/lantern.js'; + +export interface Extras { + optimistic: boolean; + fcpResult?: Lantern.Metrics.Result; + lcpResult?: Lantern.Metrics.Result; + interactiveResult?: Lantern.Metrics.Result; + observedSpeedIndex?: number; +} + +class Metric { + static getScriptUrls(dependencyGraph: Node, treatNodeAsRenderBlocking?: (node: NetworkNode) => boolean): Set { + const scriptUrls: Set = new Set(); + + dependencyGraph.traverse(node => { + if (node.type !== BaseNode.types.NETWORK) { + return; + } + if (node.request.resourceType !== 'Script') { + return; + } + if (treatNodeAsRenderBlocking?.(node)) { + scriptUrls.add(node.request.url); + } + }); + + return scriptUrls; + } + + // eslint-disable-next-line @typescript-eslint/naming-convention + static get coefficients(): Lantern.Simulation.MetricCoefficients { + throw new Error('coefficients unimplemented!'); + } + + /* eslint-disable @typescript-eslint/no-unused-vars */ + + /** + * Returns the coefficients, scaled by the throttling settings if needed by the metric. + * Some lantern metrics (speed-index) use components in their estimate that are not + * from the simulator. In this case, we need to adjust the coefficients as the target throttling + * settings change. + */ + static getScaledCoefficients(rttMs: number): Lantern.Simulation.MetricCoefficients { + return this.coefficients; + } + + static getOptimisticGraph(dependencyGraph: Node, processedNavigation: Lantern.Simulation.ProcessedNavigation): Node { + throw new Error('Optimistic graph unimplemented!'); + } + + static getPessimisticGraph(dependencyGraph: Node, processedNavigation: Lantern.Simulation.ProcessedNavigation): Node { + throw new Error('Pessmistic graph unimplemented!'); + } + + static getEstimateFromSimulation(simulationResult: Lantern.Simulation.Result, extras: Extras): + Lantern.Simulation.Result { + return simulationResult; + } + + /* eslint-enable @typescript-eslint/no-unused-vars */ + + static async compute(data: Lantern.Simulation.MetricComputationDataInput, extras?: Omit): + Promise { + const {simulator, graph, processedNavigation} = data; + + const metricName = this.name.replace('Lantern', ''); + const optimisticGraph = this.getOptimisticGraph(graph, processedNavigation); + const pessimisticGraph = this.getPessimisticGraph(graph, processedNavigation); + + let simulateOptions = {label: `optimistic${metricName}`}; + const optimisticSimulation = simulator.simulate(optimisticGraph, simulateOptions); + + simulateOptions = {label: `pessimistic${metricName}`}; + const pessimisticSimulation = simulator.simulate(pessimisticGraph, simulateOptions); + + const optimisticEstimate = this.getEstimateFromSimulation( + optimisticSimulation, + {...extras, optimistic: true}, + ); + + const pessimisticEstimate = this.getEstimateFromSimulation( + pessimisticSimulation, + {...extras, optimistic: false}, + ); + + const coefficients = this.getScaledCoefficients(simulator.rtt); + // Estimates under 1s don't really follow the normal curve fit, minimize the impact of the intercept + const interceptMultiplier = coefficients.intercept > 0 ? Math.min(1, optimisticEstimate.timeInMs / 1000) : 1; + const timing = coefficients.intercept * interceptMultiplier + + coefficients.optimistic * optimisticEstimate.timeInMs + coefficients.pessimistic * pessimisticEstimate.timeInMs; + + return { + timing, + optimisticEstimate, + pessimisticEstimate, + optimisticGraph, + pessimisticGraph, + }; + } +} + +export {Metric}; diff --git a/front_end/models/trace/lantern/NetworkNode.ts b/front_end/models/trace/lantern/NetworkNode.ts new file mode 100644 index 00000000000..702dcacfb54 --- /dev/null +++ b/front_end/models/trace/lantern/NetworkNode.ts @@ -0,0 +1,106 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2017 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {BaseNode} from './BaseNode.js'; +import type * as Lantern from './types/lantern.js'; + +const NON_NETWORK_SCHEMES = [ + 'blob', // @see https://developer.mozilla.org/en-US/docs/Web/API/URL/createObjectURL + 'data', // @see https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs + 'intent', // @see https://developer.chrome.com/docs/multidevice/android/intents/ + 'file', // @see https://en.wikipedia.org/wiki/File_URI_scheme + 'filesystem', // @see https://developer.mozilla.org/en-US/docs/Web/API/FileSystem + 'chrome-extension', +]; + +/** + * Note: the `protocol` field from CDP can be 'h2', 'http', (not 'https'!) or it'll be url's scheme. + * https://source.chromium.org/chromium/chromium/src/+/main:content/browser/devtools/protocol/network_handler.cc;l=598-611;drc=56d4a9a9deb30be73adcee8737c73bcb2a5ab64f + * However, a `new URL(href).protocol` has a colon suffix. + * https://url.spec.whatwg.org/#dom-url-protocol + * A URL's `scheme` is specced as the `protocol` sans-colon, but isn't exposed on a URL object. + * This method can take all 3 of these string types as a parameter. + * + * @param protocol Either a networkRequest's `protocol` per CDP or a `new URL(href).protocol` + */ +function isNonNetworkProtocol(protocol: string): boolean { + // Strip off any colon + const urlScheme = protocol.includes(':') ? protocol.slice(0, protocol.indexOf(':')) : protocol; + return NON_NETWORK_SCHEMES.includes(urlScheme); +} + +class NetworkNode extends BaseNode { + _request: Lantern.NetworkRequest; + + constructor(networkRequest: Lantern.NetworkRequest) { + super(networkRequest.requestId); + this._request = networkRequest; + } + + override get type(): 'network' { + return BaseNode.types.NETWORK; + } + + override get startTime(): number { + return this._request.rendererStartTime * 1000; + } + + override get endTime(): number { + return this._request.networkEndTime * 1000; + } + + get rawRequest(): Readonly { + return this._request.rawRequest as Required; + } + + get request(): Lantern.NetworkRequest { + return this._request; + } + + get initiatorType(): string { + return this._request.initiator && this._request.initiator.type; + } + + get fromDiskCache(): boolean { + return Boolean(this._request.fromDiskCache); + } + + get isNonNetworkProtocol(): boolean { + // The 'protocol' field in devtools a string more like a `scheme` + return isNonNetworkProtocol(this.request.protocol) || + // But `protocol` can fail to be populated if the request fails, so fallback to scheme. + isNonNetworkProtocol(this.request.parsedURL.scheme); + } + + /** + * Returns whether this network request can be downloaded without a TCP connection. + * During simulation we treat data coming in over a network connection separately from on-device data. + */ + get isConnectionless(): boolean { + return this.fromDiskCache || this.isNonNetworkProtocol; + } + + hasRenderBlockingPriority(): boolean { + const priority = this._request.priority; + const isScript = this._request.resourceType === 'Script'; + const isDocument = this._request.resourceType === 'Document'; + const isBlockingScript = priority === 'High' && isScript; + const isBlockingHtmlImport = priority === 'High' && isDocument; + return priority === 'VeryHigh' || isBlockingScript || isBlockingHtmlImport; + } + + override cloneWithoutRelationships(): NetworkNode { + const node = new NetworkNode(this._request); + node.setIsMainDocument(this._isMainDocument); + return node; + } +} + +export {NetworkNode}; diff --git a/front_end/models/trace/lantern/PageDependencyGraph.test.ts b/front_end/models/trace/lantern/PageDependencyGraph.test.ts new file mode 100644 index 00000000000..01a723e721d --- /dev/null +++ b/front_end/models/trace/lantern/PageDependencyGraph.test.ts @@ -0,0 +1,679 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// @ts-nocheck TODO(crbug.com/348449529) + +import * as Lantern from './lantern.js'; + +const {PageDependencyGraph, NetworkRequestTypes} = Lantern; + +function createRequest( + requestId, + url, + rendererStartTime = 0, + initiator = null, + resourceType = NetworkRequestTypes.Document, + fromWorker = false, + ): Lantern.NetworkRequest { + const networkEndTime = rendererStartTime + 50; + return { + requestId, + url, + rendererStartTime, + networkEndTime, + initiator, + resourceType, + fromWorker, + }; +} + +const TOPLEVEL_TASK_NAME = 'TaskQueueManager::ProcessTaskFromWorkQueue'; +describe('PageDependencyGraph', () => { + let traceEvents; + let url; + + function addTaskEvents(startTs, duration, evts) { + const mainEvent = { + name: TOPLEVEL_TASK_NAME, + tid: 1, + ts: startTs * 1000, + dur: duration * 1000, + args: {}, + }; + + traceEvents.push(mainEvent); + + let i = 0; + for (const evt of evts) { + i++; + traceEvents.push({ + name: evt.name, + ts: (evt.ts * 1000) || (startTs * 1000 + i), + args: {data: evt.data}, + }); + } + } + + beforeEach(() => { + traceEvents = []; + url = {requestedUrl: 'https://example.com/', mainDocumentUrl: 'https://example.com/'}; + }); + + describe('#getNetworkNodeOutput', () => { + const request1 = createRequest(1, 'https://example.com/'); + const request2 = createRequest(2, 'https://example.com/page'); + const request3 = createRequest(3, 'https://example.com/page'); + const networkRequests = [request1, request2, request3]; + + it('should create network nodes', () => { + const networkNodeOutput = PageDependencyGraph.getNetworkNodeOutput(networkRequests); + for (let i = 0; i < networkRequests.length; i++) { + const node = networkNodeOutput.nodes[i]; + assert.ok(node, `did not create node at index ${i}`); + assert.strictEqual(node.id, i + 1); + assert.strictEqual(node.type, 'network'); + assert.strictEqual(node.request, networkRequests[i]); + } + }); + + it('should ignore worker requests', () => { + const workerRequest = createRequest(4, 'https://example.com/worker.js', 0, null, 'Script', true); + const recordsWithWorker = [ + ...networkRequests, + workerRequest, + ]; + + const networkNodeOutput = PageDependencyGraph.getNetworkNodeOutput(recordsWithWorker); + + expect(networkNodeOutput.nodes).to.have.lengthOf(3); + expect(networkNodeOutput.nodes.map(node => node.request)).not.contain(workerRequest); + }); + + it('should index nodes by ID', () => { + const networkNodeOutput = PageDependencyGraph.getNetworkNodeOutput(networkRequests); + const indexedById = networkNodeOutput.idToNodeMap; + for (const request of networkRequests) { + assert.strictEqual(indexedById.get(request.requestId).request, request); + } + }); + + it('should index nodes by URL', () => { + const networkNodeOutput = PageDependencyGraph.getNetworkNodeOutput(networkRequests); + const nodes = networkNodeOutput.nodes; + const indexedByUrl = networkNodeOutput.urlToNodeMap; + assert.deepEqual(indexedByUrl.get('https://example.com/'), [nodes[0]]); + assert.deepEqual(indexedByUrl.get('https://example.com/page'), [nodes[1], nodes[2]]); + }); + + it('should index nodes by frame', () => { + const networkNodeOutput = PageDependencyGraph.getNetworkNodeOutput([ + {...createRequest(1, 'https://example.com/'), documentURL: 'https://example.com/', frameId: 'A'}, + {...createRequest(2, 'https://example.com/page'), documentURL: 'https://example.com/', frameId: 'A'}, + { + ...createRequest(3, 'https://example.com/page2'), + documentURL: 'https://example.com/page2', + frameId: 'C', + resourceType: NetworkRequestTypes.XHR, + }, + {...createRequest(4, 'https://example.com/page3'), documentURL: 'https://example.com/page3', frameId: 'D'}, + { + ...createRequest(4, 'https://example.com/page4'), + documentURL: 'https://example.com/page4', + frameId: undefined, + }, + { + ...createRequest(4, 'https://example.com/page5'), + documentURL: 'https://example.com/page5', + frameId: 'collision', + }, + { + ...createRequest(4, 'https://example.com/page6'), + documentURL: 'https://example.com/page6', + frameId: 'collision', + }, + ]); + + const nodes = networkNodeOutput.nodes; + const indexedByFrame = networkNodeOutput.frameIdToNodeMap; + expect([...indexedByFrame.entries()]).deep.equals([ + ['A', nodes[0]], + ['D', nodes[3]], + ['collision', null], + ]); + }); + }); + + describe('#getCPUNodes', () => { + it('should create CPU nodes', () => { + addTaskEvents(0, 100, [ + {name: 'MyCustomEvent'}, {name: 'OtherEvent'}, {name: 'OutsideTheWindow', ts: 200}, + {name: 'OrphanedEvent'}, // should be ignored since we stopped at OutsideTheWindow + ]); + + addTaskEvents(250, 50, [ + {name: 'LaterEvent'}, + ]); + + assert.strictEqual(traceEvents.length, 7); + const nodes = PageDependencyGraph.getCPUNodes(traceEvents); + assert.strictEqual(nodes.length, 2); + + const node1 = nodes[0]; + assert.strictEqual(node1.id, '1.0'); + assert.strictEqual(node1.type, 'cpu'); + assert.strictEqual(node1.event, traceEvents[0]); + assert.strictEqual(node1.childEvents.length, 2); + assert.strictEqual(node1.childEvents[1].name, 'OtherEvent'); + + const node2 = nodes[1]; + assert.strictEqual(node2.id, '1.250000'); + assert.strictEqual(node2.type, 'cpu'); + assert.strictEqual(node2.event, traceEvents[5]); + assert.strictEqual(node2.childEvents.length, 1); + assert.strictEqual(node2.childEvents[0].name, 'LaterEvent'); + }); + + it('should correct overlapping tasks', () => { + addTaskEvents(0, 500, [ + {name: 'MyCustomEvent'}, + {name: 'OtherEvent'}, + ]); + + addTaskEvents(400, 50, [ + {name: 'OverlappingEvent'}, + ]); + + assert.strictEqual(traceEvents.length, 5); + const nodes = PageDependencyGraph.getCPUNodes(traceEvents); + assert.strictEqual(nodes.length, 2); + + const node1 = nodes[0]; + assert.strictEqual(node1.id, '1.0'); + assert.strictEqual(node1.type, 'cpu'); + assert.strictEqual(node1.event, traceEvents[0]); + assert.strictEqual(node1.childEvents.length, 2); + assert.strictEqual(node1.childEvents[0].name, 'MyCustomEvent'); + assert.strictEqual(node1.childEvents[1].name, 'OtherEvent'); + + const node2 = nodes[1]; + assert.strictEqual(node2.id, '1.400000'); + assert.strictEqual(node2.type, 'cpu'); + assert.strictEqual(node2.event, traceEvents[3]); + assert.strictEqual(node2.childEvents.length, 1); + assert.strictEqual(node2.childEvents[0].name, 'OverlappingEvent'); + }); + }); + + describe('#createGraph', () => { + it('should compute a simple network graph', () => { + const request1 = createRequest(1, 'https://example.com/', 0); + const request2 = createRequest(2, 'https://example.com/page', 5); + const request3 = createRequest(3, 'https://example.com/page2', 5); + const request4 = createRequest(4, 'https://example.com/page3', 10, {url: 'https://example.com/page'}); + const networkRequests = [request1, request2, request3, request4]; + + addTaskEvents(0, 0, []); + + const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, url); + const nodes = []; + graph.traverse(node => nodes.push(node)); + + assert.strictEqual(nodes.length, 4); + assert.deepEqual(nodes.map(node => node.id), [1, 2, 3, 4]); + assert.deepEqual(nodes[0].getDependencies(), []); + assert.deepEqual(nodes[1].getDependencies(), [nodes[0]]); + assert.deepEqual(nodes[2].getDependencies(), [nodes[0]]); + assert.deepEqual(nodes[3].getDependencies(), [nodes[1]]); + }); + + it('should compute a simple network and CPU graph', () => { + const request1 = createRequest(1, 'https://example.com/', 0); + const request2 = createRequest(2, 'https://example.com/page', 50); + const request3 = createRequest(3, 'https://example.com/page2', 50); + const request4 = createRequest(4, 'https://example.com/page3', 300, null, NetworkRequestTypes.XHR); + const networkRequests = [request1, request2, request3, request4]; + + addTaskEvents(200, 200, [ + {name: 'EvaluateScript', data: {url: 'https://example.com/page'}}, + {name: 'ResourceSendRequest', data: {requestId: 4}}, + ]); + + addTaskEvents(700, 50, [ + {name: 'InvalidateLayout', data: {stackTrace: [{url: 'https://example.com/page2'}]}}, + {name: 'XHRReadyStateChange', data: {readyState: 4, url: 'https://example.com/page3'}}, + ]); + + const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, url); + const nodes = []; + graph.traverse(node => nodes.push(node)); + + const getIds = nodes => nodes.map(node => node.id); + const getDependencyIds = node => getIds(node.getDependencies()); + + assert.strictEqual(nodes.length, 6); + assert.deepEqual(getIds(nodes), [1, 2, 3, 4, '1.200000', '1.700000']); + assert.deepEqual(getDependencyIds(nodes[0]), []); + assert.deepEqual(getDependencyIds(nodes[1]), [1]); + assert.deepEqual(getDependencyIds(nodes[2]), [1]); + assert.deepEqual(getDependencyIds(nodes[3]), [1, '1.200000']); + assert.deepEqual(getDependencyIds(nodes[4]), [2]); + assert.deepEqual(getDependencyIds(nodes[5]), [3, 4]); + }); + + it('should compute a network graph with duplicate URLs', () => { + const request1 = createRequest(1, 'https://example.com/', 0); + const request2 = createRequest(2, 'https://example.com/page', 5); + const request3 = createRequest(3, 'https://example.com/page', 5); // duplicate URL + const request4 = createRequest(4, 'https://example.com/page3', 10, {url: 'https://example.com/page'}); + const networkRequests = [request1, request2, request3, request4]; + + addTaskEvents(0, 0, []); + + const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, url); + const nodes = []; + graph.traverse(node => nodes.push(node)); + + assert.strictEqual(nodes.length, 4); + assert.deepEqual(nodes.map(node => node.id), [1, 2, 3, 4]); + assert.deepEqual(nodes[0].getDependencies(), []); + assert.deepEqual(nodes[1].getDependencies(), [nodes[0]]); + assert.deepEqual(nodes[2].getDependencies(), [nodes[0]]); + assert.deepEqual(nodes[3].getDependencies(), [nodes[0]]); // should depend on rootNode instead + }); + + it('should be forgiving without cyclic dependencies', () => { + const request1 = createRequest(1, 'https://example.com/', 0); + const request2 = createRequest(2, 'https://example.com/page', 250, null, NetworkRequestTypes.XHR); + const request3 = createRequest(3, 'https://example.com/page2', 210); + const request4 = createRequest(4, 'https://example.com/page3', 590); + const request5 = createRequest(5, 'https://example.com/page4', 595, null, NetworkRequestTypes.XHR); + const networkRequests = [request1, request2, request3, request4, request5]; + + addTaskEvents(200, 200, [ + // CPU 1.2 should depend on Network 1 + {name: 'EvaluateScript', data: {url: 'https://example.com/'}}, + + // Network 2 should depend on CPU 1.2, but 1.2 should not depend on Network 1 + {name: 'ResourceSendRequest', data: {requestId: 2}}, + {name: 'XHRReadyStateChange', data: {readyState: 4, url: 'https://example.com/page'}}, + + // CPU 1.2 should not depend on Network 3 because it starts after CPU 1.2 + {name: 'EvaluateScript', data: {url: 'https://example.com/page2'}}, + ]); + + addTaskEvents(600, 150, [ + // CPU 1.6 should depend on Network 4 even though it ends at 410ms + {name: 'InvalidateLayout', data: {stackTrace: [{url: 'https://example.com/page3'}]}}, + // Network 5 should not depend on CPU 1.6 because it started before CPU 1.6 + {name: 'ResourceSendRequest', data: {requestId: 5}}, + ]); + + const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, url); + const nodes = []; + graph.traverse(node => nodes.push(node)); + + const getDependencyIds = node => node.getDependencies().map(node => node.id); + + assert.strictEqual(nodes.length, 7); + assert.deepEqual(getDependencyIds(nodes[0]), []); + assert.deepEqual(getDependencyIds(nodes[1]), [1, '1.200000']); + assert.deepEqual(getDependencyIds(nodes[2]), [1]); + assert.deepEqual(getDependencyIds(nodes[3]), [1]); + assert.deepEqual(getDependencyIds(nodes[4]), [1]); + assert.deepEqual(getDependencyIds(nodes[5]), [1]); + assert.deepEqual(getDependencyIds(nodes[6]), [4]); + }); + + it('should not install timer dependency on itself', () => { + const request1 = createRequest(1, 'https://example.com/', 0); + const networkRequests = [request1]; + + addTaskEvents(200, 200, [ + // CPU 1.2 should depend on Network 1 + {name: 'EvaluateScript', data: {url: 'https://example.com/'}}, + // CPU 1.2 will install and fire it's own timer, but should not depend on itself + {name: 'TimerInstall', data: {timerId: 'timer1'}}, + {name: 'TimerFire', data: {timerId: 'timer1'}}, + ]); + + const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, url); + const nodes = []; + graph.traverse(node => nodes.push(node)); + + const getDependencyIds = node => node.getDependencies().map(node => node.id); + + assert.strictEqual(nodes.length, 2); + assert.deepEqual(getDependencyIds(nodes[0]), []); + assert.deepEqual(getDependencyIds(nodes[1]), [1]); + }); + + it('should prune short tasks', () => { + const request0 = createRequest(0, 'https://example.com/page0', 0); + const request1 = createRequest(1, 'https://example.com/', 100, null, NetworkRequestTypes.Script); + const request2 = createRequest(2, 'https://example.com/page', 200, null, NetworkRequestTypes.XHR); + const request3 = createRequest(3, 'https://example.com/page2', 300, null, NetworkRequestTypes.Script); + const request4 = createRequest(4, 'https://example.com/page3', 400, null, NetworkRequestTypes.XHR); + const networkRequests = [request0, request1, request2, request3, request4]; + url = {requestedUrl: 'https://example.com/page0', mainDocumentUrl: 'https://example.com/page0'}; + + // Long task, should be kept in the output. + addTaskEvents(120, 50, [ + {name: 'EvaluateScript', data: {url: 'https://example.com/'}}, + {name: 'ResourceSendRequest', data: {requestId: 2}}, + {name: 'XHRReadyStateChange', data: {readyState: 4, url: 'https://example.com/page'}}, + ]); + + // Short task, should be pruned, but the 3->4 relationship should be retained + addTaskEvents(350, 5, [ + {name: 'EvaluateScript', data: {url: 'https://example.com/page2'}}, + {name: 'ResourceSendRequest', data: {requestId: 4}}, + {name: 'XHRReadyStateChange', data: {readyState: 4, url: 'https://example.com/page3'}}, + ]); + + const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, url); + const nodes = []; + graph.traverse(node => nodes.push(node)); + + const getDependencyIds = node => node.getDependencies().map(node => node.id); + + assert.strictEqual(nodes.length, 6); + + assert.deepEqual(getDependencyIds(nodes[0]), []); + assert.deepEqual(getDependencyIds(nodes[1]), [0]); + assert.deepEqual(getDependencyIds(nodes[2]), [0, '1.120000']); + assert.deepEqual(getDependencyIds(nodes[3]), [0]); + assert.deepEqual(getDependencyIds(nodes[4]), [0, 3]); + + assert.strictEqual('1.120000', nodes[5].id); + assert.deepEqual(getDependencyIds(nodes[5]), [1]); + }); + + it('should not prune highly-connected short tasks', () => { + const request0 = createRequest(0, 'https://example.com/page0', 0); + const request1 = { + ...createRequest(1, 'https://example.com/', 100, null, NetworkRequestTypes.Document), + documentURL: 'https://example.com/', + frameId: 'frame1', + }; + const request2 = { + ...createRequest(2, 'https://example.com/page', 200, null, NetworkRequestTypes.Script), + documentURL: 'https://example.com/', + frameId: 'frame1', + }; + const request3 = createRequest(3, 'https://example.com/page2', 300, null, NetworkRequestTypes.XHR); + const request4 = createRequest(4, 'https://example.com/page3', 400, null, NetworkRequestTypes.XHR); + const networkRequests = [request0, request1, request2, request3, request4]; + url = {requestedUrl: 'https://example.com/page0', mainDocumentUrl: 'https://example.com/page0'}; + + // Short task, evaluates script (2) and sends two XHRs. + addTaskEvents(220, 5, [ + {name: 'EvaluateScript', data: {url: 'https://example.com/page', frame: 'frame1'}}, + + {name: 'ResourceSendRequest', data: {requestId: 3}}, + {name: 'XHRReadyStateChange', data: {readyState: 4, url: 'https://example.com/page2'}}, + + {name: 'ResourceSendRequest', data: {requestId: 4}}, + {name: 'XHRReadyStateChange', data: {readyState: 4, url: 'https://example.com/page3'}}, + ]); + + const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, url); + const nodes = []; + graph.traverse(node => nodes.push(node)); + + const getDependencyIds = node => node.getDependencies().map(node => node.id); + + assert.strictEqual(nodes.length, 6); + + assert.deepEqual(getDependencyIds(nodes[0]), []); + assert.deepEqual(getDependencyIds(nodes[1]), [0]); + assert.deepEqual(getDependencyIds(nodes[2]), [0]); + assert.deepEqual(getDependencyIds(nodes[3]), [0, '1.220000']); + assert.deepEqual(getDependencyIds(nodes[4]), [0, '1.220000']); + + assert.strictEqual('1.220000', nodes[5].id); + assert.deepEqual(getDependencyIds(nodes[5]), [1, 2]); + }); + + it('should not prune short, first tasks of critical events', () => { + const request0 = createRequest(0, 'https://example.com/page0', 0); + const networkRequests = [request0]; + url = {requestedUrl: 'https://example.com/page0', mainDocumentUrl: 'https://example.com/page0'}; + + const makeShortEvent = firstEventName => { + const startTs = traceEvents.length * 100; + addTaskEvents(startTs, 5, [ + {name: firstEventName, data: {url: 'https://example.com/page0'}}, + ]); + }; + + const criticalEventNames = [ + 'Paint', + 'Layout', + 'ParseHTML', + ]; + for (const eventName of criticalEventNames) { + makeShortEvent(eventName); + makeShortEvent(eventName); + } + + const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, url); + const cpuNodes = []; + graph.traverse(node => node.type === 'cpu' && cpuNodes.push(node)); + + expect(cpuNodes.map(node => { + return { + id: node.id, + name: node.childEvents[0].name, + }; + })) + .deep.equals([ + { + id: '1.0', + name: 'Paint', + }, + { + // ID jumps by 4 between each because each node has 2 CPU tasks and we skip the 2nd of each event type + id: '1.400000', + name: 'Layout', + }, + { + id: '1.800000', + name: 'ParseHTML', + }, + ]); + }); + + it('should set isMainDocument on request with mainDocumentUrl', () => { + const request1 = createRequest(1, 'https://example.com/', 0, null, NetworkRequestTypes.Other); + const request2 = createRequest(2, 'https://example.com/page', 5, null, NetworkRequestTypes.Document); + // Add in another unrelated + early request to make sure we pick the correct chain + const request3 = createRequest(3, 'https://example.com/page2', 0, null, NetworkRequestTypes.Other); + request2.redirects = [request1]; + const networkRequests = [request1, request2, request3]; + url = {requestedUrl: 'https://example.com/', mainDocumentUrl: 'https://example.com/page'}; + + addTaskEvents(0, 0, []); + + const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, url); + const nodes = []; + graph.traverse(node => nodes.push(node)); + + assert.strictEqual(nodes.length, 3); + assert.strictEqual(nodes[0].id, 1); + assert.strictEqual(nodes[0].isMainDocument(), false); + assert.strictEqual(nodes[1].isMainDocument(), true); + assert.strictEqual(nodes[2].isMainDocument(), false); + }); + + it('should link up script initiators', () => { + const request1 = createRequest(1, 'https://example.com/', 0); + const request2 = createRequest(2, 'https://example.com/page', 5); + const request3 = createRequest(3, 'https://example.com/page2', 5); + const request4 = createRequest(4, 'https://example.com/page3', 20); + // Set multiple initiator requests through script stack. + request4.initiator = { + type: 'script', + stack: { + callFrames: [{url: 'https://example.com/page'}], + parent: {parent: {callFrames: [{url: 'https://example.com/page2'}]}}, + }, + }; + // Also set the initiatorRequest that Lighthouse's network-recorder.js creates. + // This should be ignored and only used as a fallback. + request4.initiatorRequest = request1; + const networkRequests = [request1, request2, request3, request4]; + + addTaskEvents(0, 0, []); + + const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, url); + const nodes = []; + graph.traverse(node => nodes.push(node)); + + assert.strictEqual(nodes.length, 4); + assert.deepEqual(nodes.map(node => node.id), [1, 2, 3, 4]); + assert.deepEqual(nodes[0].getDependencies(), []); + assert.deepEqual(nodes[1].getDependencies(), [nodes[0]]); + assert.deepEqual(nodes[2].getDependencies(), [nodes[0]]); + assert.deepEqual(nodes[3].getDependencies(), [nodes[1], nodes[2]]); + }); + + it('should link up script initiators only when timing is valid', () => { + const request1 = createRequest(1, 'https://example.com/', 0); + const request2 = createRequest(2, 'https://example.com/page', 500); + const request3 = createRequest(3, 'https://example.com/page2', 500); + const request4 = createRequest(4, 'https://example.com/page3', 20); + request4.initiator = { + type: 'script', + stack: { + callFrames: [{url: 'https://example.com/page'}], + parent: {parent: {callFrames: [{url: 'https://example.com/page2'}]}}, + }, + }; + const networkRequests = [request1, request2, request3, request4]; + + addTaskEvents(0, 0, []); + + const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, url); + const nodes = []; + graph.traverse(node => nodes.push(node)); + + assert.strictEqual(nodes.length, 4); + assert.deepEqual(nodes.map(node => node.id), [1, 2, 3, 4]); + assert.deepEqual(nodes[0].getDependencies(), []); + assert.deepEqual(nodes[1].getDependencies(), [nodes[0]]); + assert.deepEqual(nodes[2].getDependencies(), [nodes[0]]); + assert.deepEqual(nodes[3].getDependencies(), [nodes[0]]); + }); + + it('should link up script initiators with prefetch requests', () => { + const request1 = createRequest(1, 'https://a.com/1', 0); + const request2Prefetch = createRequest(2, 'https://a.com/js', 5); + const request2Fetch = createRequest(3, 'https://a.com/js', 10); + const request3 = createRequest(4, 'https://a.com/4', 20); + // Set the initiator to an ambiguous URL (there are 2 requests for https://a.com/js) + request3.initiator = { + type: 'script', + stack: {callFrames: [{url: 'https://a.com/js'}], parent: {parent: {callFrames: [{url: 'js'}]}}}, + }; + // Set the initiatorRequest that it should fallback to. + request3.initiatorRequest = request2Fetch; + const networkRequests = [request1, request2Prefetch, request2Fetch, request3]; + url = {requestedUrl: 'https://a.com/1', mainDocumentUrl: 'https://a.com/1'}; + + addTaskEvents(0, 0, []); + + const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, url); + const nodes = []; + graph.traverse(node => nodes.push(node)); + + assert.strictEqual(nodes.length, 4); + assert.deepEqual(nodes.map(node => node.id), [1, 2, 3, 4]); + assert.deepEqual(nodes[0].getDependencies(), []); + assert.deepEqual(nodes[1].getDependencies(), [nodes[0]]); + assert.deepEqual(nodes[2].getDependencies(), [nodes[0]]); + assert.deepEqual(nodes[3].getDependencies(), [nodes[2]]); + }); + + it('should not link up initiators with circular dependencies', () => { + const rootRequest = createRequest(1, 'https://a.com', 0); + // jsRequest1 initiated by jsRequest2 + // *AND* + // jsRequest2 initiated by jsRequest1 + const jsRequest1 = createRequest(2, 'https://a.com/js1', 1, {url: 'https://a.com/js2'}); + const jsRequest2 = createRequest(3, 'https://a.com/js2', 1, {url: 'https://a.com/js1'}); + const networkRequests = [rootRequest, jsRequest1, jsRequest2]; + url = {requestedUrl: 'https://a.com', mainDocumentUrl: 'https://a.com'}; + + addTaskEvents(0, 0, []); + + const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, url); + const nodes = []; + graph.traverse(node => nodes.push(node)); + nodes.sort((a, b) => a.id - b.id); + + assert.strictEqual(nodes.length, 3); + assert.deepEqual(nodes.map(node => node.id), [1, 2, 3]); + assert.deepEqual(nodes[0].getDependencies(), []); + // We don't know which of the initiators to trust in a cycle, so for now we + // trust the earliest one (mostly because it's simplest). + // In the wild so far we've only seen this for self-referential relationships. + // If the evidence changes, then feel free to change these expectations :) + assert.deepEqual(nodes[1].getDependencies(), [nodes[2]]); + assert.deepEqual(nodes[2].getDependencies(), [nodes[0]]); + }); + + it('should not link up initiatorRequests with circular dependencies', () => { + const rootRequest = createRequest(1, 'https://a.com', 0); + // jsRequest1 initiated by jsRequest2 + // *AND* + // jsRequest2 initiated by jsRequest1 + const jsRequest1 = createRequest(2, 'https://a.com/js1', 1); + const jsRequest2 = createRequest(3, 'https://a.com/js2', 1); + jsRequest1.initiatorRequest = jsRequest2; + jsRequest2.initiatorRequest = jsRequest1; + const networkRequests = [rootRequest, jsRequest1, jsRequest2]; + url = {requestedUrl: 'https://a.com', mainDocumentUrl: 'https://a.com'}; + + addTaskEvents(0, 0, []); + + const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, url); + const nodes = []; + graph.traverse(node => nodes.push(node)); + nodes.sort((a, b) => a.id - b.id); + + assert.strictEqual(nodes.length, 3); + assert.deepEqual(nodes.map(node => node.id), [1, 2, 3]); + assert.deepEqual(nodes[0].getDependencies(), []); + assert.deepEqual(nodes[1].getDependencies(), [nodes[2]]); + assert.deepEqual(nodes[2].getDependencies(), [nodes[0]]); + }); + + it('should find root if it is not the first node', () => { + const request1 = createRequest(1, 'https://example.com/', 0, null, NetworkRequestTypes.Other); + const request2 = createRequest(2, 'https://example.com/page', 5, null, NetworkRequestTypes.Document); + const networkRequests = [request1, request2]; + url = {requestedUrl: 'https://example.com/page', mainDocumentUrl: 'https://example.com/page'}; + + // Evaluated before root request. + addTaskEvents(0.1, 50, [ + {name: 'EvaluateScript'}, + ]); + + const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, url); + const nodes = []; + graph.traverse(node => nodes.push(node)); + + assert.strictEqual(nodes.length, 1); + assert.deepEqual(nodes.map(node => node.id), [2]); + assert.deepEqual(nodes[0].getDependencies(), []); + assert.deepEqual(nodes[0].getDependents(), []); + }); + }); +}); diff --git a/front_end/models/trace/lantern/PageDependencyGraph.ts b/front_end/models/trace/lantern/PageDependencyGraph.ts new file mode 100644 index 00000000000..385fde75a06 --- /dev/null +++ b/front_end/models/trace/lantern/PageDependencyGraph.ts @@ -0,0 +1,591 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {type Node} from './BaseNode.js'; +import {CPUNode} from './CpuNode.js'; +import {NetworkNode} from './NetworkNode.js'; +import {NetworkAnalyzer} from './simulation/NetworkAnalyzer.js'; +import type * as Lantern from './types/lantern.js'; + +// COMPAT: m71+ We added RunTask to `disabled-by-default-lighthouse` +const SCHEDULABLE_TASK_TITLE_LH = 'RunTask'; +// m69-70 DoWork is different and we now need RunTask, see https://bugs.chromium.org/p/chromium/issues/detail?id=871204#c11 +const SCHEDULABLE_TASK_TITLE_ALT1 = 'ThreadControllerImpl::RunTask'; +// In m66-68 refactored to this task title, https://crrev.com/c/883346 +const SCHEDULABLE_TASK_TITLE_ALT2 = 'ThreadControllerImpl::DoWork'; +// m65 and earlier +const SCHEDULABLE_TASK_TITLE_ALT3 = 'TaskQueueManager::ProcessTaskFromWorkQueue'; + +interface NetworkNodeOutput { + nodes: Array; + idToNodeMap: Map; + urlToNodeMap: Map>; + frameIdToNodeMap: Map; +} + +// Shorter tasks have negligible impact on simulation results. +const SIGNIFICANT_DUR_THRESHOLD_MS = 10; + +// TODO: video files tend to be enormous and throw off all graph traversals, move this ignore +// into estimation logic when we use the dependency graph for other purposes. +const IGNORED_MIME_TYPES_REGEX = /^video/; + +class PageDependencyGraph { + static getNetworkInitiators(request: Lantern.NetworkRequest): string[] { + if (!request.initiator) { + return []; + } + if (request.initiator.url) { + return [request.initiator.url]; + } + if (request.initiator.type === 'script') { + // Script initiators have the stack of callFrames from all functions that led to this request. + // If async stacks are enabled, then the stack will also have the parent functions that asynchronously + // led to this request chained in the `parent` property. + const scriptURLs = new Set(); + let stack = request.initiator.stack; + while (stack) { + const callFrames = stack.callFrames || []; + for (const frame of callFrames) { + if (frame.url) { + scriptURLs.add(frame.url); + } + } + + stack = stack.parent; + } + + return Array.from(scriptURLs); + } + + return []; + } + + static getNetworkNodeOutput(networkRequests: Lantern.NetworkRequest[]): NetworkNodeOutput { + const nodes: Array = []; + const idToNodeMap = new Map(); + const urlToNodeMap = new Map>(); + const frameIdToNodeMap = new Map(); + + networkRequests.forEach(request => { + if (IGNORED_MIME_TYPES_REGEX.test(request.mimeType)) { + return; + } + if (request.fromWorker) { + return; + } + + // Network requestIds can be duplicated for an unknown reason + // Suffix all subsequent requests with `:duplicate` until it's unique + // NOTE: This should never happen with modern NetworkRequest library, but old fixtures + // might still have this issue. + while (idToNodeMap.has(request.requestId)) { + request.requestId += ':duplicate'; + } + + const node = new NetworkNode(request); + nodes.push(node); + + const urlList = urlToNodeMap.get(request.url) || []; + urlList.push(node); + + idToNodeMap.set(request.requestId, node); + urlToNodeMap.set(request.url, urlList); + + // If the request was for the root document of an iframe, save an entry in our + // map so we can link up the task `args.data.frame` dependencies later in graph creation. + if (request.frameId && request.resourceType === 'Document' && request.documentURL === request.url) { + // If there's ever any ambiguity, permanently set the value to `false` to avoid loops in the graph. + const value = frameIdToNodeMap.has(request.frameId) ? null : node; + frameIdToNodeMap.set(request.frameId, value); + } + }); + + return {nodes, idToNodeMap, urlToNodeMap, frameIdToNodeMap}; + } + + static isScheduleableTask(evt: Lantern.TraceEvent): boolean { + return evt.name === SCHEDULABLE_TASK_TITLE_LH || evt.name === SCHEDULABLE_TASK_TITLE_ALT1 || + evt.name === SCHEDULABLE_TASK_TITLE_ALT2 || evt.name === SCHEDULABLE_TASK_TITLE_ALT3; + } + + /** + * There should *always* be at least one top level event, having 0 typically means something is + * drastically wrong with the trace and we should just give up early and loudly. + */ + static assertHasToplevelEvents(events: Lantern.TraceEvent[]): void { + const hasToplevelTask = events.some(this.isScheduleableTask); + if (!hasToplevelTask) { + throw new Error('Could not find any top level events'); + } + } + + static getCPUNodes(mainThreadEvents: Lantern.TraceEvent[]): CPUNode[] { + const nodes: CPUNode[] = []; + let i = 0; + + PageDependencyGraph.assertHasToplevelEvents(mainThreadEvents); + + while (i < mainThreadEvents.length) { + const evt = mainThreadEvents[i]; + i++; + + // Skip all trace events that aren't schedulable tasks with sizable duration + if (!PageDependencyGraph.isScheduleableTask(evt) || !evt.dur) { + continue; + } + + let correctedEndTs: number|undefined = undefined; + + // Capture all events that occurred within the task + const children: Lantern.TraceEvent[] = []; + for (const endTime = evt.ts + evt.dur; i < mainThreadEvents.length && mainThreadEvents[i].ts < endTime; i++) { + const event = mainThreadEvents[i]; + + // Temporary fix for a Chrome bug where some RunTask events can be overlapping. + // We correct that here be ensuring each RunTask ends at least 1 microsecond before the next + // https://github.com/GoogleChrome/lighthouse/issues/15896 + // https://issues.chromium.org/issues/329678173 + if (PageDependencyGraph.isScheduleableTask(event) && event.dur) { + correctedEndTs = event.ts - 1; + break; + } + + children.push(event); + } + + nodes.push(new CPUNode(evt, children, correctedEndTs)); + } + + return nodes; + } + + static linkNetworkNodes(rootNode: NetworkNode, networkNodeOutput: NetworkNodeOutput): void { + networkNodeOutput.nodes.forEach(node => { + const directInitiatorRequest = node.request.initiatorRequest || rootNode.request; + const directInitiatorNode = networkNodeOutput.idToNodeMap.get(directInitiatorRequest.requestId) || rootNode; + const canDependOnInitiator = !directInitiatorNode.isDependentOn(node) && node.canDependOn(directInitiatorNode); + const initiators = PageDependencyGraph.getNetworkInitiators(node.request); + if (initiators.length) { + initiators.forEach(initiator => { + const parentCandidates = networkNodeOutput.urlToNodeMap.get(initiator) || []; + // Only add the edge if the parent is unambiguous with valid timing and isn't circular. + if (parentCandidates.length === 1 && parentCandidates[0].startTime <= node.startTime && + !parentCandidates[0].isDependentOn(node)) { + node.addDependency(parentCandidates[0]); + } else if (canDependOnInitiator) { + directInitiatorNode.addDependent(node); + } + }); + } else if (canDependOnInitiator) { + directInitiatorNode.addDependent(node); + } + + // Make sure the nodes are attached to the graph if the initiator information was invalid. + if (node !== rootNode && node.getDependencies().length === 0 && node.canDependOn(rootNode)) { + node.addDependency(rootNode); + } + + if (!node.request.redirects) { + return; + } + + const redirects = [...node.request.redirects, node.request]; + for (let i = 1; i < redirects.length; i++) { + const redirectNode = networkNodeOutput.idToNodeMap.get(redirects[i - 1].requestId); + const actualNode = networkNodeOutput.idToNodeMap.get(redirects[i].requestId); + if (actualNode && redirectNode) { + actualNode.addDependency(redirectNode); + } + } + }); + } + + static linkCPUNodes(rootNode: Node, networkNodeOutput: NetworkNodeOutput, cpuNodes: CPUNode[]): void { + const linkableResourceTypes = new Set([ + 'XHR', + 'Fetch', + 'Script', + ]); + + function addDependentNetworkRequest(cpuNode: CPUNode, reqId: string): void { + const networkNode = networkNodeOutput.idToNodeMap.get(reqId); + if (!networkNode || + // Ignore all network nodes that started before this CPU task started + // A network request that started earlier could not possibly have been started by this task + networkNode.startTime <= cpuNode.startTime) { + return; + } + const {request} = networkNode; + const resourceType = request.resourceType || request.redirectDestination?.resourceType; + if (!linkableResourceTypes.has(resourceType)) { + // We only link some resources to CPU nodes because we observe LCP simulation + // regressions when including images, etc. + return; + } + cpuNode.addDependent(networkNode); + } + + /** + * If the node has an associated frameId, then create a dependency on the root document request + * for the frame. The task obviously couldn't have started before the frame was even downloaded. + */ + function addDependencyOnFrame(cpuNode: CPUNode, frameId: string|undefined): void { + if (!frameId) { + return; + } + const networkNode = networkNodeOutput.frameIdToNodeMap.get(frameId); + if (!networkNode) { + return; + } + // Ignore all network nodes that started after this CPU task started + // A network request that started after could not possibly be required this task + if (networkNode.startTime >= cpuNode.startTime) { + return; + } + cpuNode.addDependency(networkNode); + } + + function addDependencyOnUrl(cpuNode: CPUNode, url: string): void { + if (!url) { + return; + } + // Allow network requests that end up to 100ms before the task started + // Some script evaluations can start before the script finishes downloading + const minimumAllowableTimeSinceNetworkNodeEnd = -100 * 1000; + const candidates = networkNodeOutput.urlToNodeMap.get(url) || []; + + let minCandidate = null; + let minDistance = Infinity; + // Find the closest request that finished before this CPU task started + for (const candidate of candidates) { + // Explicitly ignore all requests that started after this CPU node + // A network request that started after this task started cannot possibly be a dependency + if (cpuNode.startTime <= candidate.startTime) { + return; + } + + const distance = cpuNode.startTime - candidate.endTime; + if (distance >= minimumAllowableTimeSinceNetworkNodeEnd && distance < minDistance) { + minCandidate = candidate; + minDistance = distance; + } + } + + if (!minCandidate) { + return; + } + cpuNode.addDependency(minCandidate); + } + + const timers = new Map(); + for (const node of cpuNodes) { + for (const evt of node.childEvents) { + if (!evt.args.data) { + continue; + } + + const argsUrl = evt.args.data.url; + const stackTraceUrls = (evt.args.data.stackTrace || []).map(l => l.url).filter(Boolean); + + switch (evt.name) { + case 'TimerInstall': + // @ts-expect-error - 'TimerInstall' event means timerId exists. + timers.set(evt.args.data.timerId, node); + stackTraceUrls.forEach(url => addDependencyOnUrl(node, url)); + break; + case 'TimerFire': { + // @ts-expect-error - 'TimerFire' event means timerId exists. + const installer = timers.get(evt.args.data.timerId); + if (!installer || installer.endTime > node.startTime) { + break; + } + installer.addDependent(node); + break; + } + + case 'InvalidateLayout': + case 'ScheduleStyleRecalculation': + addDependencyOnFrame(node, evt.args.data.frame); + stackTraceUrls.forEach(url => addDependencyOnUrl(node, url)); + break; + + case 'EvaluateScript': + addDependencyOnFrame(node, evt.args.data.frame); + // @ts-expect-error - 'EvaluateScript' event means argsUrl is defined. + addDependencyOnUrl(node, argsUrl); + stackTraceUrls.forEach(url => addDependencyOnUrl(node, url)); + break; + + case 'XHRReadyStateChange': + // Only create the dependency if the request was completed + // 'XHRReadyStateChange' event means readyState is defined. + if (evt.args.data.readyState !== 4) { + break; + } + + // @ts-expect-error - 'XHRReadyStateChange' event means argsUrl is defined. + addDependencyOnUrl(node, argsUrl); + stackTraceUrls.forEach(url => addDependencyOnUrl(node, url)); + break; + + case 'FunctionCall': + case 'v8.compile': + addDependencyOnFrame(node, evt.args.data.frame); + // @ts-expect-error - events mean argsUrl is defined. + addDependencyOnUrl(node, argsUrl); + break; + + case 'ParseAuthorStyleSheet': + addDependencyOnFrame(node, evt.args.data.frame); + // @ts-expect-error - 'ParseAuthorStyleSheet' event means styleSheetUrl is defined. + addDependencyOnUrl(node, evt.args.data.styleSheetUrl); + break; + + case 'ResourceSendRequest': + addDependencyOnFrame(node, evt.args.data.frame); + // @ts-expect-error - 'ResourceSendRequest' event means requestId is defined. + addDependentNetworkRequest(node, evt.args.data.requestId); + stackTraceUrls.forEach(url => addDependencyOnUrl(node, url)); + break; + } + } + + // Nodes starting before the root node cannot depend on it. + if (node.getNumberOfDependencies() === 0 && node.canDependOn(rootNode)) { + node.addDependency(rootNode); + } + } + + // Second pass to prune the graph of short tasks. + const minimumEvtDur = SIGNIFICANT_DUR_THRESHOLD_MS * 1000; + let foundFirstLayout = false; + let foundFirstPaint = false; + let foundFirstParse = false; + + for (const node of cpuNodes) { + // Don't prune if event is the first ParseHTML/Layout/Paint. + // See https://github.com/GoogleChrome/lighthouse/issues/9627#issuecomment-526699524 for more. + let isFirst = false; + if (!foundFirstLayout && node.childEvents.some(evt => evt.name === 'Layout')) { + isFirst = foundFirstLayout = true; + } + if (!foundFirstPaint && node.childEvents.some(evt => evt.name === 'Paint')) { + isFirst = foundFirstPaint = true; + } + if (!foundFirstParse && node.childEvents.some(evt => evt.name === 'ParseHTML')) { + isFirst = foundFirstParse = true; + } + + if (isFirst || node.duration >= minimumEvtDur) { + // Don't prune this node. The task is long / important so it will impact simulation. + continue; + } + + // Prune the node if it isn't highly connected to minimize graph size. Rewiring the graph + // here replaces O(M + N) edges with (M * N) edges, which is fine if either M or N is at + // most 1. + if (node.getNumberOfDependencies() === 1 || node.getNumberOfDependents() <= 1) { + PageDependencyGraph._pruneNode(node); + } + } + } + + /** + * Removes the given node from the graph, but retains all paths between its dependencies and + * dependents. + */ + static _pruneNode(node: Node): void { + const dependencies = node.getDependencies(); + const dependents = node.getDependents(); + for (const dependency of dependencies) { + node.removeDependency(dependency); + for (const dependent of dependents) { + dependency.addDependent(dependent); + } + } + for (const dependent of dependents) { + node.removeDependent(dependent); + } + } + + /** + * TODO: remove when CDT backend in Lighthouse is gone. Until then, this is a useful debugging tool + * to find delta between using CDP or the trace to create the network requests. + * + * When a test fails using the trace backend, I enabled this debug method and copied the network + * requests when CDP was used, then when trace is used, and diff'd them. This method helped + * remove non-logical differences from the comparison (order of properties, slight rounding + * discrepancies, removing object cycles, etc). + * + * When using for a unit test, make sure to do `.only` so you are getting what you expect. + */ + static _debugNormalizeRequests(lanternRequests: Lantern.NetworkRequest[]): void { + for (const request of lanternRequests) { + request.rendererStartTime = Math.round(request.rendererStartTime * 1000) / 1000; + request.networkRequestTime = Math.round(request.networkRequestTime * 1000) / 1000; + request.responseHeadersEndTime = Math.round(request.responseHeadersEndTime * 1000) / 1000; + request.networkEndTime = Math.round(request.networkEndTime * 1000) / 1000; + } + + for (const r of lanternRequests) { + delete r.rawRequest; + if (r.initiatorRequest) { + // @ts-expect-error + r.initiatorRequest = {id: r.initiatorRequest.requestId}; + } + if (r.redirectDestination) { + // @ts-expect-error + r.redirectDestination = {id: r.redirectDestination.requestId}; + } + if (r.redirectSource) { + // @ts-expect-error + r.redirectSource = {id: r.redirectSource.requestId}; + } + if (r.redirects) { + // @ts-expect-error + r.redirects = r.redirects.map(r2 => r2.requestId); + } + } + const requests: Lantern.NetworkRequest[] = lanternRequests + .map(r => ({ + requestId: r.requestId, + connectionId: r.connectionId, + connectionReused: r.connectionReused, + url: r.url, + protocol: r.protocol, + parsedURL: r.parsedURL, + documentURL: r.documentURL, + rendererStartTime: r.rendererStartTime, + networkRequestTime: r.networkRequestTime, + responseHeadersEndTime: r.responseHeadersEndTime, + networkEndTime: r.networkEndTime, + transferSize: r.transferSize, + resourceSize: r.resourceSize, + fromDiskCache: r.fromDiskCache, + fromMemoryCache: r.fromMemoryCache, + finished: r.finished, + statusCode: r.statusCode, + redirectSource: r.redirectSource, + redirectDestination: r.redirectDestination, + redirects: r.redirects, + failed: r.failed, + initiator: r.initiator, + timing: r.timing ? { + requestTime: r.timing.requestTime, + proxyStart: r.timing.proxyStart, + proxyEnd: r.timing.proxyEnd, + dnsStart: r.timing.dnsStart, + dnsEnd: r.timing.dnsEnd, + connectStart: r.timing.connectStart, + connectEnd: r.timing.connectEnd, + sslStart: r.timing.sslStart, + sslEnd: r.timing.sslEnd, + workerStart: r.timing.workerStart, + workerReady: r.timing.workerReady, + workerFetchStart: r.timing.workerFetchStart, + workerRespondWithSettled: r.timing.workerRespondWithSettled, + sendStart: r.timing.sendStart, + sendEnd: r.timing.sendEnd, + pushStart: r.timing.pushStart, + pushEnd: r.timing.pushEnd, + receiveHeadersStart: r.timing.receiveHeadersStart, + receiveHeadersEnd: r.timing.receiveHeadersEnd, + } : + r.timing, + resourceType: r.resourceType, + mimeType: r.mimeType, + priority: r.priority, + initiatorRequest: r.initiatorRequest, + frameId: r.frameId, + fromWorker: r.fromWorker, + isLinkPreload: r.isLinkPreload, + serverResponseTime: r.serverResponseTime, + })) + .filter(r => !r.fromWorker); + // eslint-disable-next-line no-unused-vars + const debug = requests; + // Set breakpoint here. + // Copy `debug` and compare with https://www.diffchecker.com/text-compare/ + // eslint-disable-next-line no-console + console.log(debug); + } + + static createGraph( + mainThreadEvents: Lantern.TraceEvent[], networkRequests: Lantern.NetworkRequest[], + url: Lantern.Simulation.URL): Node { + // This is for debugging trace/devtoolslog network records. + // const debug = PageDependencyGraph._debugNormalizeRequests(networkRequests); + const networkNodeOutput = PageDependencyGraph.getNetworkNodeOutput(networkRequests); + const cpuNodes = PageDependencyGraph.getCPUNodes(mainThreadEvents); + const {requestedUrl, mainDocumentUrl} = url; + if (!requestedUrl) { + throw new Error('requestedUrl is required to get the root request'); + } + if (!mainDocumentUrl) { + throw new Error('mainDocumentUrl is required to get the main resource'); + } + + const rootRequest = NetworkAnalyzer.findResourceForUrl(networkRequests, requestedUrl); + if (!rootRequest) { + throw new Error('rootRequest not found'); + } + const rootNode = networkNodeOutput.idToNodeMap.get(rootRequest.requestId); + if (!rootNode) { + throw new Error('rootNode not found'); + } + const mainDocumentRequest = NetworkAnalyzer.findLastDocumentForUrl(networkRequests, mainDocumentUrl); + if (!mainDocumentRequest) { + throw new Error('mainDocumentRequest not found'); + } + const mainDocumentNode = networkNodeOutput.idToNodeMap.get(mainDocumentRequest.requestId); + if (!mainDocumentNode) { + throw new Error('mainDocumentNode not found'); + } + + PageDependencyGraph.linkNetworkNodes(rootNode, networkNodeOutput); + PageDependencyGraph.linkCPUNodes(rootNode, networkNodeOutput, cpuNodes); + mainDocumentNode.setIsMainDocument(true); + + if (NetworkNode.hasCycle(rootNode)) { + throw new Error('Invalid dependency graph created, cycle detected'); + } + + return rootNode; + } + + static printGraph(rootNode: Node, widthInCharacters = 100): void { + function padRight(str: string, target: number, padChar = ' '): string { + return str + padChar.repeat(Math.max(target - str.length, 0)); + } + + const nodes: Node[] = []; + rootNode.traverse(node => nodes.push(node)); + nodes.sort((a, b) => a.startTime - b.startTime); + + const min = nodes[0].startTime; + const max = nodes.reduce((max, node) => Math.max(max, node.endTime), 0); + + const totalTime = max - min; + const timePerCharacter = totalTime / widthInCharacters; + nodes.forEach(node => { + const offset = Math.round((node.startTime - min) / timePerCharacter); + const length = Math.ceil((node.endTime - node.startTime) / timePerCharacter); + const bar = padRight('', offset) + padRight('', length, '='); + + // @ts-expect-error -- disambiguate displayName from across possible Node types. + const displayName = node.request ? node.request.url : node.type; + // eslint-disable-next-line + console.log(padRight(bar, widthInCharacters), `| ${displayName.slice(0, 30)}`); + }); + } +} + +export {PageDependencyGraph}; diff --git a/front_end/models/trace/lantern/TBTUtils.test.ts b/front_end/models/trace/lantern/TBTUtils.test.ts new file mode 100644 index 00000000000..6415d970ccf --- /dev/null +++ b/front_end/models/trace/lantern/TBTUtils.test.ts @@ -0,0 +1,144 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as Lantern from './lantern.js'; + +const {calculateSumOfBlockingTime} = Lantern.TBTUtils; + +describe('TotalBlockingTime utils', () => { + it('reports 0 when no task is longer than 50ms', () => { + const events = [ + {start: 1000, end: 1050, duration: 50}, + {start: 2000, end: 2010, duration: 10}, + ]; + + const fcpTimeMs = 500; + const interactiveTimeMs = 4000; + + expect( + calculateSumOfBlockingTime(events, fcpTimeMs, interactiveTimeMs), + ) + .to.equal(0); + }); + + it('only looks at tasks within FCP and TTI', () => { + const events = [ + {start: 1000, end: 1060, duration: 60}, + {start: 2000, end: 2100, duration: 100}, + {start: 2300, end: 2450, duration: 150}, + {start: 2600, end: 2800, duration: 200}, + ]; + + const fcpTimeMs = 1500; + const interactiveTimeMs = 2500; + + expect( + calculateSumOfBlockingTime(events, fcpTimeMs, interactiveTimeMs), + ) + .to.equal(150); + }); + + it('clips before finding blocking regions', () => { + const fcpTimeMs = 150; + const interactiveTimeMs = 300; + + const events = [ + // The clipping is done first, so the task becomes [150, 200] after clipping and contributes + // 0ms of blocking time. This is in contrast to first calculating the blocking region ([100, + // 200]) and then clipping at FCP (150ms), which yields 50ms blocking time. + {start: 50, end: 200, duration: 150}, + // Similarly, the task is first clipped above to be [240, 300], and then contributes 10ms + // blocking time. + {start: 240, end: 460, duration: 120}, + ]; + + expect( + calculateSumOfBlockingTime(events, fcpTimeMs, interactiveTimeMs), + ) + .to.equal(10); // 0ms + 10ms. + }); + + // TTI can happen in the middle of a task, for example, if TTI is at FMP which occurs as part + // of a larger task, or in the lantern case where we use estimate TTI using a different graph + // from the one used to estimate TBT. + it('clips properly if TTI falls in the middle of a task', () => { + const fcpTimeMs = 1000; + const interactiveTimeMs = 2000; + + expect( + calculateSumOfBlockingTime( + [{start: 1951, end: 2100, duration: 149}], + fcpTimeMs, + interactiveTimeMs, + ), + ) + .to.equal(0); // Duration after clipping is 49, which is < 50. + expect( + calculateSumOfBlockingTime( + [{start: 1950, end: 2100, duration: 150}], + fcpTimeMs, + interactiveTimeMs, + ), + ) + .to.equal(0); // Duration after clipping is 50, so time after 50ms is 0ms. + expect( + calculateSumOfBlockingTime( + [{start: 1949, end: 2100, duration: 151}], + fcpTimeMs, + interactiveTimeMs, + ), + ) + .to.equal(1); // Duration after clipping is 51, so time after 50ms is 1ms. + }); + + it('clips properly if FCP falls in the middle of a task', () => { + const fcpTimeMs = 1000; + const interactiveTimeMs = 2000; + + expect( + calculateSumOfBlockingTime( + [{start: 900, end: 1049, duration: 149}], + fcpTimeMs, + interactiveTimeMs, + ), + ) + .to.equal(0); // Duration after clipping is 49, which is < 50. + expect( + calculateSumOfBlockingTime( + [{start: 900, end: 1050, duration: 150}], + fcpTimeMs, + interactiveTimeMs, + ), + ) + .to.equal(0); // Duration after clipping is 50, so time after 50ms is 0ms. + expect( + calculateSumOfBlockingTime( + [{start: 900, end: 1051, duration: 151}], + fcpTimeMs, + interactiveTimeMs, + ), + ) + .to.equal(1); // Duration after clipping is 51, so time after 50ms is 1ms. + }); + + // This can happen in the lantern metric case, where we use the optimistic + // TTI and pessimistic FCP. + it('returns 0 if interactiveTime is earlier than FCP', () => { + const fcpTimeMs = 2050; + const interactiveTimeMs = 1050; + + const events = [{start: 500, end: 3000, duration: 2500}]; + + expect( + calculateSumOfBlockingTime(events, fcpTimeMs, interactiveTimeMs), + ) + .to.equal(0); + }); +}); diff --git a/front_end/models/trace/lantern/TBTUtils.ts b/front_end/models/trace/lantern/TBTUtils.ts new file mode 100644 index 00000000000..ebae6630774 --- /dev/null +++ b/front_end/models/trace/lantern/TBTUtils.ts @@ -0,0 +1,88 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2021 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +const BLOCKING_TIME_THRESHOLD = 50; + +/** + * For TBT, We only want to consider tasks that fall in our time range + * - FCP and TTI for navigation mode + * - Trace start and trace end for timespan mode + * + * FCP is picked as `startTimeMs` because there is little risk of user input happening + * before FCP so Long Queuing Qelay regions do not harm user experience. Developers should be + * optimizing to reach FCP as fast as possible without having to worry about task lengths. + * + * TTI is picked as `endTimeMs` because we want a well defined end point for page load. + * + * @param startTimeMs Should be FCP in navigation mode and the trace start time in timespan mode + * @param endTimeMs Should be TTI in navigation mode and the trace end time in timespan mode + * @param topLevelEvent Leave unset if `event` is top level. Has no effect if `event` has the same duration as `topLevelEvent`. + */ +function calculateTbtImpactForEvent( + event: {start: number, end: number, duration: number}, startTimeMs: number, endTimeMs: number, + topLevelEvent?: {start: number, end: number, duration: number}): number { + let threshold = BLOCKING_TIME_THRESHOLD; + + // If a task is not top level, it doesn't make sense to subtract the entire 50ms + // blocking threshold from the event. + // + // e.g. A 80ms top level task with two 40ms children should attribute some blocking + // time to the 40ms tasks even though they do not meet the 50ms threshold. + // + // The solution is to scale the threshold for child events to be considered blocking. + if (topLevelEvent) { + threshold *= (event.duration / topLevelEvent.duration); + } + + if (event.duration < threshold) { + return 0; + } + if (event.end < startTimeMs) { + return 0; + } + if (event.start > endTimeMs) { + return 0; + } + + // Perform the clipping and then calculate Blocking Region. So if we have a 150ms task + // [0, 150] and `startTimeMs` is at 50ms, we first clip the task to [50, 150], and then + // calculate the Blocking Region to be [100, 150]. The rational here is that tasks before + // the start time are unimportant, so we care whether the main thread is busy more than + // 50ms at a time only after the start time. + const clippedStart = Math.max(event.start, startTimeMs); + const clippedEnd = Math.min(event.end, endTimeMs); + const clippedDuration = clippedEnd - clippedStart; + if (clippedDuration < threshold) { + return 0; + } + + return clippedDuration - threshold; +} + +function calculateSumOfBlockingTime( + topLevelEvents: Array<{start: number, end: number, duration: number}>, startTimeMs: number, + endTimeMs: number): number { + if (endTimeMs <= startTimeMs) { + return 0; + } + + let sumBlockingTime = 0; + for (const event of topLevelEvents) { + sumBlockingTime += calculateTbtImpactForEvent(event, startTimeMs, endTimeMs); + } + + return sumBlockingTime; +} + +export { + BLOCKING_TIME_THRESHOLD, + calculateSumOfBlockingTime, + calculateTbtImpactForEvent, +}; diff --git a/front_end/models/trace/lantern/lantern.ts b/front_end/models/trace/lantern/lantern.ts new file mode 100644 index 00000000000..3938ee13bc4 --- /dev/null +++ b/front_end/models/trace/lantern/lantern.ts @@ -0,0 +1,54 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +const NetworkRequestTypes = { + XHR: 'XHR', + Fetch: 'Fetch', + EventSource: 'EventSource', + Script: 'Script', + Stylesheet: 'Stylesheet', + Image: 'Image', + Media: 'Media', + Font: 'Font', + Document: 'Document', + TextTrack: 'TextTrack', + WebSocket: 'WebSocket', + Other: 'Other', + Manifest: 'Manifest', + SignedExchange: 'SignedExchange', + Ping: 'Ping', + Preflight: 'Preflight', + CSPViolationReport: 'CSPViolationReport', + Prefetch: 'Prefetch', +} as const; + +export {BaseNode} from './BaseNode.js'; +export type {Node} from './BaseNode.js'; +export {CPUNode} from './CpuNode.js'; +export {LanternError as Error} from './LanternError.js'; +export {Metric} from './Metric.js'; +export {NetworkNode} from './NetworkNode.js'; +export {PageDependencyGraph} from './PageDependencyGraph.js'; +export * as Metrics from './metrics/metrics.js'; +export * as Simulation from './simulation/simulation.js'; +export * as TBTUtils from './TBTUtils.js'; +export type { + NetworkRequest, + ParsedURL, + ResourcePriority, + ResourceTiming, + ResourceType, + Trace, + TraceEvent, +} from './types/lantern.js'; + +export { + NetworkRequestTypes, +}; diff --git a/front_end/models/trace/lantern/metrics/FirstContentfulPaint.test.ts b/front_end/models/trace/lantern/metrics/FirstContentfulPaint.test.ts new file mode 100644 index 00000000000..45875e86598 --- /dev/null +++ b/front_end/models/trace/lantern/metrics/FirstContentfulPaint.test.ts @@ -0,0 +1,67 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as Lantern from '../lantern.js'; +import {getComputationDataFromFixture, loadTrace} from '../testing/MetricTestUtils.js'; + +const {FirstContentfulPaint} = Lantern.Metrics; + +describe('Metrics: Lantern FCP', () => { + let trace: Lantern.Trace; + before(async function() { + trace = await loadTrace(this, 'lantern/progressive-app/trace.json.gz'); + }); + + it('should compute predicted value', async () => { + const data = await getComputationDataFromFixture({trace}); + const result = await FirstContentfulPaint.compute(data); + + assert.deepStrictEqual( + { + timing: Math.round(result.timing), + optimistic: Math.round(result.optimisticEstimate.timeInMs), + pessimistic: Math.round(result.pessimisticEstimate.timeInMs), + optimisticNodeTimings: result.optimisticEstimate.nodeTimings.size, + pessimisticNodeTimings: result.pessimisticEstimate.nodeTimings.size, + }, + { + timing: 1107, + optimistic: 1107, + pessimistic: 1107, + optimisticNodeTimings: 4, + pessimisticNodeTimings: 4, + }); + assert.ok(result.optimisticGraph, 'should have created optimistic graph'); + assert.ok(result.pessimisticGraph, 'should have created pessimistic graph'); + }); + + it('should handle negative request networkEndTime', async () => { + const data = await getComputationDataFromFixture({trace}); + assert(data.graph.type === 'network'); + data.graph.request.networkEndTime = -1; + const result = await FirstContentfulPaint.compute(data); + + const optimisticNodes: Lantern.NetworkNode[] = []; + result.optimisticGraph.traverse(node => { + if (node.type === 'network') { + optimisticNodes.push(node); + } + }); + expect(optimisticNodes.map(node => node.request.url)).to.deep.equal(['https://squoosh.app/']); + + const pessimisticNodes: Lantern.NetworkNode[] = []; + result.pessimisticGraph.traverse(node => { + if (node.type === 'network') { + pessimisticNodes.push(node); + } + }); + expect(pessimisticNodes.map(node => node.request.url)).to.deep.equal(['https://squoosh.app/']); + }); +}); diff --git a/front_end/models/trace/lantern/metrics/FirstContentfulPaint.ts b/front_end/models/trace/lantern/metrics/FirstContentfulPaint.ts new file mode 100644 index 00000000000..1f10384b85a --- /dev/null +++ b/front_end/models/trace/lantern/metrics/FirstContentfulPaint.ts @@ -0,0 +1,194 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {BaseNode, type Node} from '../BaseNode.js'; +import {type CPUNode} from '../CpuNode.js'; +import {Metric} from '../Metric.js'; +import {type NetworkNode} from '../NetworkNode.js'; +import type * as Lantern from '../types/lantern.js'; + +interface FirstPaintBasedGraphOpts { + /** + * The timestamp used to filter out tasks that occured after our paint of interest. + * Typically this is First Contentful Paint or First Meaningful Paint. + */ + cutoffTimestamp: number; + /** + * The function that determines which resources should be considered *possibly* + * render-blocking. + */ + treatNodeAsRenderBlocking: (node: NetworkNode) => boolean; + /** + * The function that determines which CPU nodes should also be included in our + * blocking node IDs set, beyond what getRenderBlockingNodeData() already includes. + */ + additionalCpuNodesToTreatAsRenderBlocking?: (node: CPUNode) => boolean; +} + +class FirstContentfulPaint extends Metric { + static override get coefficients(): Lantern.Simulation.MetricCoefficients { + return { + intercept: 0, + optimistic: 0.5, + pessimistic: 0.5, + }; + } + + /** + * Computes the set of URLs that *appeared* to be render-blocking based on our filter, + * *but definitely were not* render-blocking based on the timing of their EvaluateScript task. + * It also computes the set of corresponding CPU node ids that were needed for the paint at the + * given timestamp. + */ + static getRenderBlockingNodeData( + graph: Node, + {cutoffTimestamp, treatNodeAsRenderBlocking, additionalCpuNodesToTreatAsRenderBlocking}: + FirstPaintBasedGraphOpts, + ): {definitelyNotRenderBlockingScriptUrls: Set, renderBlockingCpuNodeIds: Set} { + /** A map of blocking script URLs to the earliest EvaluateScript task node that executed them. */ + const scriptUrlToNodeMap = new Map(); + + const cpuNodes: CPUNode[] = []; + graph.traverse(node => { + if (node.type === BaseNode.types.CPU) { + // A task is *possibly* render blocking if it *started* before cutoffTimestamp. + // We use startTime here because the paint event can be *inside* the task that was render blocking. + if (node.startTime <= cutoffTimestamp) { + cpuNodes.push(node); + } + + // Build our script URL map to find the earliest EvaluateScript task node. + const scriptUrls = node.getEvaluateScriptURLs(); + for (const url of scriptUrls) { + // Use the earliest CPU node we find. + const existing = scriptUrlToNodeMap.get(url) || node; + scriptUrlToNodeMap.set(url, node.startTime < existing.startTime ? node : existing); + } + } + }); + + cpuNodes.sort((a, b) => a.startTime - b.startTime); + + // A script is *possibly* render blocking if it finished loading before cutoffTimestamp. + const possiblyRenderBlockingScriptUrls = Metric.getScriptUrls(graph, node => { + // The optimistic LCP treatNodeAsRenderBlocking fn wants to exclude some images in the graph, + // but here it only receives scripts to evaluate. It's a no-op in this case, but it will + // matter below in the getFirstPaintBasedGraph clone operation. + return node.endTime <= cutoffTimestamp && treatNodeAsRenderBlocking(node); + }); + + // A script is *definitely not* render blocking if its EvaluateScript task started after cutoffTimestamp. + const definitelyNotRenderBlockingScriptUrls = new Set(); + const renderBlockingCpuNodeIds = new Set(); + for (const url of possiblyRenderBlockingScriptUrls) { + // Lookup the CPU node that had the earliest EvaluateScript for this URL. + const cpuNodeForUrl = scriptUrlToNodeMap.get(url); + + // If we can't find it at all, we can't conclude anything, so just skip it. + if (!cpuNodeForUrl) { + continue; + } + + // If we found it and it was in our `cpuNodes` set that means it finished before cutoffTimestamp, so it really is render-blocking. + if (cpuNodes.includes(cpuNodeForUrl)) { + renderBlockingCpuNodeIds.add(cpuNodeForUrl.id); + continue; + } + + // We couldn't find the evaluate script in the set of CPU nodes that ran before our paint, so + // it must not have been necessary for the paint. + definitelyNotRenderBlockingScriptUrls.add(url); + } + + // The first layout, first paint, and first ParseHTML are almost always necessary for first paint, + // so we always include those CPU nodes. + const firstLayout = cpuNodes.find(node => node.didPerformLayout()); + if (firstLayout) { + renderBlockingCpuNodeIds.add(firstLayout.id); + } + const firstPaint = cpuNodes.find(node => node.childEvents.some(e => e.name === 'Paint')); + if (firstPaint) { + renderBlockingCpuNodeIds.add(firstPaint.id); + } + const firstParse = cpuNodes.find(node => node.childEvents.some(e => e.name === 'ParseHTML')); + if (firstParse) { + renderBlockingCpuNodeIds.add(firstParse.id); + } + + // If a CPU filter was passed in, we also want to include those extra nodes. + if (additionalCpuNodesToTreatAsRenderBlocking) { + cpuNodes.filter(additionalCpuNodesToTreatAsRenderBlocking).forEach(node => renderBlockingCpuNodeIds.add(node.id)); + } + + return { + definitelyNotRenderBlockingScriptUrls, + renderBlockingCpuNodeIds, + }; + } + + /** + * Computes the graph required for the first paint of interest. + */ + static getFirstPaintBasedGraph( + dependencyGraph: Node, + {cutoffTimestamp, treatNodeAsRenderBlocking, additionalCpuNodesToTreatAsRenderBlocking}: + FirstPaintBasedGraphOpts, + ): Node { + const rbData = this.getRenderBlockingNodeData(dependencyGraph, { + cutoffTimestamp, + treatNodeAsRenderBlocking, + additionalCpuNodesToTreatAsRenderBlocking, + }); + const {definitelyNotRenderBlockingScriptUrls, renderBlockingCpuNodeIds} = rbData; + + return dependencyGraph.cloneWithRelationships(node => { + if (node.type === BaseNode.types.NETWORK) { + // Exclude all nodes that ended after cutoffTimestamp (except for the main document which we always consider necessary) + // endTime is negative if request does not finish, make sure startTime isn't after cutoffTimestamp in this case. + const endedAfterPaint = node.endTime > cutoffTimestamp || node.startTime > cutoffTimestamp; + if (endedAfterPaint && !node.isMainDocument()) { + return false; + } + + const url = node.request.url; + // If the URL definitely wasn't render-blocking then we filter it out. + if (definitelyNotRenderBlockingScriptUrls.has(url)) { + return false; + } + + // Lastly, build up the FCP graph of all nodes we consider render blocking + return treatNodeAsRenderBlocking(node); + } + // If it's a CPU node, just check if it was blocking. + return renderBlockingCpuNodeIds.has(node.id); + }); + } + + static override getOptimisticGraph( + dependencyGraph: Node, processedNavigation: Lantern.Simulation.ProcessedNavigation): Node { + return this.getFirstPaintBasedGraph(dependencyGraph, { + cutoffTimestamp: processedNavigation.timestamps.firstContentfulPaint, + // In the optimistic graph we exclude resources that appeared to be render blocking but were + // initiated by a script. While they typically have a very high importance and tend to have a + // significant impact on the page's content, these resources don't technically block rendering. + treatNodeAsRenderBlocking: node => node.hasRenderBlockingPriority() && node.initiatorType !== 'script', + }); + } + + static override getPessimisticGraph( + dependencyGraph: Node, processedNavigation: Lantern.Simulation.ProcessedNavigation): Node { + return this.getFirstPaintBasedGraph(dependencyGraph, { + cutoffTimestamp: processedNavigation.timestamps.firstContentfulPaint, + treatNodeAsRenderBlocking: node => node.hasRenderBlockingPriority(), + }); + } +} + +export {FirstContentfulPaint}; diff --git a/front_end/models/trace/lantern/metrics/Interactive.test.ts b/front_end/models/trace/lantern/metrics/Interactive.test.ts new file mode 100644 index 00000000000..8868f3ae9ae --- /dev/null +++ b/front_end/models/trace/lantern/metrics/Interactive.test.ts @@ -0,0 +1,73 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as Lantern from '../lantern.js'; +import {getComputationDataFromFixture, loadTrace} from '../testing/MetricTestUtils.js'; + +const {Interactive, FirstContentfulPaint, LargestContentfulPaint} = Lantern.Metrics; + +describe('Metrics: Lantern TTI', () => { + let trace: Lantern.Trace; + let iframeTrace: Lantern.Trace; + before(async function() { + trace = await loadTrace(this, 'lantern/progressive-app/trace.json.gz'); + iframeTrace = await loadTrace(this, 'lantern/iframe/trace.json.gz'); + }); + + it('should compute predicted value', async () => { + const data = await getComputationDataFromFixture({trace}); + const result = await Interactive.compute(data, { + lcpResult: await LargestContentfulPaint.compute(data, { + fcpResult: await FirstContentfulPaint.compute(data), + }), + }); + + assert.deepStrictEqual( + { + timing: Math.round(result.timing), + optimistic: Math.round(result.optimisticEstimate.timeInMs), + pessimistic: Math.round(result.pessimisticEstimate.timeInMs), + }, + { + optimistic: 1107, + pessimistic: 1134, + timing: 1122, + }); + assert.strictEqual(result.optimisticEstimate.nodeTimings.size, 14); + assert.strictEqual(result.pessimisticEstimate.nodeTimings.size, 31); + assert.ok(result.optimisticGraph, 'should have created optimistic graph'); + assert.ok(result.pessimisticGraph, 'should have created pessimistic graph'); + }); + + it('should compute predicted value on iframes with substantial layout', async () => { + const data = await getComputationDataFromFixture({ + trace: iframeTrace, + }); + const result = await Interactive.compute(data, { + lcpResult: await LargestContentfulPaint.compute(data, { + fcpResult: await FirstContentfulPaint.compute(data), + }), + }); + + assert.deepStrictEqual( + { + timing: Math.round(result.timing), + optimistic: Math.round(result.optimisticEstimate.timeInMs), + pessimistic: Math.round(result.pessimisticEstimate.timeInMs), + }, + { + optimistic: 2372, + pessimistic: 2386, + timing: 2379, + }); + assert.ok(result.optimisticGraph, 'should have created optimistic graph'); + assert.ok(result.pessimisticGraph, 'should have created pessimistic graph'); + }); +}); diff --git a/front_end/models/trace/lantern/metrics/Interactive.ts b/front_end/models/trace/lantern/metrics/Interactive.ts new file mode 100644 index 00000000000..1b672b90a01 --- /dev/null +++ b/front_end/models/trace/lantern/metrics/Interactive.ts @@ -0,0 +1,90 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {BaseNode, type Node} from '../BaseNode.js'; +import {type Extras, Metric} from '../Metric.js'; +import type * as Lantern from '../types/lantern.js'; + +// Any CPU task of 20 ms or more will end up being a critical long task on mobile +const CRITICAL_LONG_TASK_THRESHOLD = 20; + +class Interactive extends Metric { + // eslint-disable-next-line @typescript-eslint/naming-convention + static override get coefficients(): Lantern.Simulation.MetricCoefficients { + return { + intercept: 0, + optimistic: 0.45, + pessimistic: 0.55, + }; + } + + static override getOptimisticGraph(dependencyGraph: Node): Node { + // Adjust the critical long task threshold for microseconds + const minimumCpuTaskDuration = CRITICAL_LONG_TASK_THRESHOLD * 1000; + + return dependencyGraph.cloneWithRelationships(node => { + // Include everything that might be a long task + if (node.type === BaseNode.types.CPU) { + return node.duration > minimumCpuTaskDuration; + } + + // Include all scripts and high priority requests, exclude all images + const isImage = node.request.resourceType === 'Image'; + const isScript = node.request.resourceType === 'Script'; + return (!isImage && (isScript || node.request.priority === 'High' || node.request.priority === 'VeryHigh')); + }); + } + + static override getPessimisticGraph(dependencyGraph: Node): Node { + return dependencyGraph; + } + + static override getEstimateFromSimulation(simulationResult: Lantern.Simulation.Result, extras: Extras): + Lantern.Simulation.Result { + if (!extras.lcpResult) { + throw new Error('missing lcpResult'); + } + + const lastTaskAt = Interactive.getLastLongTaskEndTime(simulationResult.nodeTimings); + const minimumTime = extras.optimistic ? extras.lcpResult.optimisticEstimate.timeInMs : + extras.lcpResult.pessimisticEstimate.timeInMs; + return { + timeInMs: Math.max(minimumTime, lastTaskAt), + nodeTimings: simulationResult.nodeTimings, + }; + } + + static override async compute( + data: Lantern.Simulation.MetricComputationDataInput, + extras?: Omit): Promise { + const lcpResult = extras?.lcpResult; + if (!lcpResult) { + throw new Error('LCP is required to calculate the Interactive metric'); + } + + const metricResult = await super.compute(data, extras); + metricResult.timing = Math.max(metricResult.timing, lcpResult.timing); + return metricResult; + } + + static getLastLongTaskEndTime(nodeTimings: Lantern.Simulation.Result['nodeTimings'], duration = 50): number { + return Array.from(nodeTimings.entries()) + .filter(([node, timing]) => { + if (node.type !== BaseNode.types.CPU) { + return false; + } + return timing.duration > duration; + }) + .map(([_, timing]) => timing.endTime) + .reduce((max, x) => Math.max(max || 0, x || 0), 0); + } +} + +export {Interactive}; diff --git a/front_end/models/trace/lantern/metrics/LargestContentfulPaint.test.ts b/front_end/models/trace/lantern/metrics/LargestContentfulPaint.test.ts new file mode 100644 index 00000000000..d3e12d7a4dc --- /dev/null +++ b/front_end/models/trace/lantern/metrics/LargestContentfulPaint.test.ts @@ -0,0 +1,46 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as Lantern from '../lantern.js'; +import {getComputationDataFromFixture, loadTrace} from '../testing/MetricTestUtils.js'; + +const {FirstContentfulPaint, LargestContentfulPaint} = Lantern.Metrics; + +describe('Metrics: Lantern LCP', () => { + let trace: Lantern.Trace; + before(async function() { + trace = await loadTrace(this, 'lantern/paul/trace.json.gz'); + }); + + it('should compute predicted value', async () => { + const data = await getComputationDataFromFixture({trace}); + const result = await LargestContentfulPaint.compute(data, { + fcpResult: await FirstContentfulPaint.compute(data), + }); + + assert.deepStrictEqual( + { + timing: Math.round(result.timing), + optimistic: Math.round(result.optimisticEstimate.timeInMs), + pessimistic: Math.round(result.pessimisticEstimate.timeInMs), + optimisticNodeTimings: result.optimisticEstimate.nodeTimings.size, + pessimisticNodeTimings: result.pessimisticEstimate.nodeTimings.size, + }, + { + timing: 1536, + optimistic: 1457, + pessimistic: 1616, + optimisticNodeTimings: 8, + pessimisticNodeTimings: 9, + }); + assert.ok(result.optimisticGraph, 'should have created optimistic graph'); + assert.ok(result.pessimisticGraph, 'should have created pessimistic graph'); + }); +}); diff --git a/front_end/models/trace/lantern/metrics/LargestContentfulPaint.ts b/front_end/models/trace/lantern/metrics/LargestContentfulPaint.ts new file mode 100644 index 00000000000..43e0472b44c --- /dev/null +++ b/front_end/models/trace/lantern/metrics/LargestContentfulPaint.ts @@ -0,0 +1,94 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {type Node} from '../BaseNode.js'; +import {LanternError} from '../LanternError.js'; +import {type Extras, Metric} from '../Metric.js'; +import type * as Lantern from '../types/lantern.js'; + +import {FirstContentfulPaint} from './FirstContentfulPaint.js'; + +class LargestContentfulPaint extends Metric { + // eslint-disable-next-line @typescript-eslint/naming-convention + static override get coefficients(): Lantern.Simulation.MetricCoefficients { + return { + intercept: 0, + optimistic: 0.5, + pessimistic: 0.5, + }; + } + + /** + * Low priority image nodes are usually offscreen and very unlikely to be the + * resource that is required for LCP. Our LCP graphs include everything except for these images. + */ + static isNotLowPriorityImageNode(node: Node): boolean { + if (node.type !== 'network') { + return true; + } + const isImage = node.request.resourceType === 'Image'; + const isLowPriority = node.request.priority === 'Low' || node.request.priority === 'VeryLow'; + return !isImage || !isLowPriority; + } + + static override getOptimisticGraph( + dependencyGraph: Node, processedNavigation: Lantern.Simulation.ProcessedNavigation): Node { + const lcp = processedNavigation.timestamps.largestContentfulPaint; + if (!lcp) { + throw new LanternError('NO_LCP'); + } + + return FirstContentfulPaint.getFirstPaintBasedGraph(dependencyGraph, { + cutoffTimestamp: lcp, + treatNodeAsRenderBlocking: LargestContentfulPaint.isNotLowPriorityImageNode, + }); + } + + static override getPessimisticGraph( + dependencyGraph: Node, processedNavigation: Lantern.Simulation.ProcessedNavigation): Node { + const lcp = processedNavigation.timestamps.largestContentfulPaint; + if (!lcp) { + throw new LanternError('NO_LCP'); + } + + return FirstContentfulPaint.getFirstPaintBasedGraph(dependencyGraph, { + cutoffTimestamp: lcp, + treatNodeAsRenderBlocking: _ => true, + // For pessimistic LCP we'll include *all* layout nodes + additionalCpuNodesToTreatAsRenderBlocking: node => node.didPerformLayout(), + }); + } + + static override getEstimateFromSimulation(simulationResult: Lantern.Simulation.Result): Lantern.Simulation.Result { + const nodeTimesNotOffscreenImages = Array.from(simulationResult.nodeTimings.entries()) + .filter(entry => LargestContentfulPaint.isNotLowPriorityImageNode(entry[0])) + .map(entry => entry[1].endTime); + + return { + timeInMs: Math.max(...nodeTimesNotOffscreenImages), + nodeTimings: simulationResult.nodeTimings, + }; + } + + static override async compute( + data: Lantern.Simulation.MetricComputationDataInput, + extras?: Omit): Promise { + const fcpResult = extras?.fcpResult; + if (!fcpResult) { + throw new Error('FCP is required to calculate the LCP metric'); + } + + const metricResult = await super.compute(data, extras); + metricResult.timing = Math.max(metricResult.timing, fcpResult.timing); + return metricResult; + } +} + +export {LargestContentfulPaint}; diff --git a/front_end/models/trace/lantern/metrics/MaxPotentialFID.ts b/front_end/models/trace/lantern/metrics/MaxPotentialFID.ts new file mode 100644 index 00000000000..4949c76a6bf --- /dev/null +++ b/front_end/models/trace/lantern/metrics/MaxPotentialFID.ts @@ -0,0 +1,73 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {BaseNode, type Node} from '../BaseNode.js'; +import {type Extras, Metric} from '../Metric.js'; +import type * as Lantern from '../types/lantern.js'; + +class MaxPotentialFID extends Metric { + // eslint-disable-next-line @typescript-eslint/naming-convention + static override get coefficients(): Lantern.Simulation.MetricCoefficients { + return { + intercept: 0, + optimistic: 0.5, + pessimistic: 0.5, + }; + } + + static override getOptimisticGraph(dependencyGraph: Node): Node { + return dependencyGraph; + } + + static override getPessimisticGraph(dependencyGraph: Node): Node { + return dependencyGraph; + } + + static override getEstimateFromSimulation(simulation: Lantern.Simulation.Result, extras: Extras): + Lantern.Simulation.Result { + if (!extras.fcpResult) { + throw new Error('missing fcpResult'); + } + + // Intentionally use the opposite FCP estimate, a more pessimistic FCP means that more tasks + // are excluded from the FID computation, so a higher FCP means lower FID for same work. + const fcpTimeInMs = extras.optimistic ? extras.fcpResult.pessimisticEstimate.timeInMs : + extras.fcpResult.optimisticEstimate.timeInMs; + + const timings = MaxPotentialFID.getTimingsAfterFCP( + simulation.nodeTimings, + fcpTimeInMs, + ); + + return { + timeInMs: Math.max(...timings.map(timing => timing.duration), 16), + nodeTimings: simulation.nodeTimings, + }; + } + + static override compute(data: Lantern.Simulation.MetricComputationDataInput, extras?: Omit): + Promise { + const fcpResult = extras?.fcpResult; + if (!fcpResult) { + throw new Error('FCP is required to calculate the Max Potential FID metric'); + } + + return super.compute(data, extras); + } + + static getTimingsAfterFCP(nodeTimings: Lantern.Simulation.Result['nodeTimings'], fcpTimeInMs: number): + Array<{duration: number}> { + return Array.from(nodeTimings.entries()) + .filter(([node, timing]) => node.type === BaseNode.types.CPU && timing.endTime > fcpTimeInMs) + .map(([_, timing]) => timing); + } +} + +export {MaxPotentialFID}; diff --git a/front_end/models/trace/lantern/metrics/SpeedIndex.test.ts b/front_end/models/trace/lantern/metrics/SpeedIndex.test.ts new file mode 100644 index 00000000000..fbecf034117 --- /dev/null +++ b/front_end/models/trace/lantern/metrics/SpeedIndex.test.ts @@ -0,0 +1,92 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as Lantern from '../lantern.js'; +import {getComputationDataFromFixture, loadTrace} from '../testing/MetricTestUtils.js'; + +const {SpeedIndex, FirstContentfulPaint} = Lantern.Metrics; + +const defaultThrottling = Lantern.Simulation.Constants.throttling.mobileSlow4G; + +describe('Metrics: Lantern Speed Index', () => { + let trace: Lantern.Trace; + before(async function() { + trace = await loadTrace(this, 'lantern/progressive-app/trace.json.gz'); + }); + + it('should compute predicted value', async () => { + const data = await getComputationDataFromFixture({trace}); + // TODO: observedSpeedIndex is from the Speedline library, and is used for optimistic + // mode. At the moment callers must pass the result into Lantern. + const observedSpeedIndex = 379.04474997520487; + const result = await SpeedIndex.compute(data, { + fcpResult: await FirstContentfulPaint.compute(data), + observedSpeedIndex, + }); + + assert.deepStrictEqual( + { + timing: Math.round(result.timing), + optimistic: Math.round(result.optimisticEstimate.timeInMs), + pessimistic: Math.round(result.pessimisticEstimate.timeInMs), + }, + { + timing: 1107, + optimistic: 379, + pessimistic: 1122, + }); + }); + + it('should compute predicted value for different settings', async () => { + const settings: Lantern.Simulation.Settings = { + throttlingMethod: 'simulate', + throttling: {...defaultThrottling, rttMs: 300}, + // @ts-expect-error: not needed for test + networkAnalysis: null, + }; + const data = await getComputationDataFromFixture({trace, settings}); + const observedSpeedIndex = 379.04474997520487; + const result = await SpeedIndex.compute(data, { + fcpResult: await FirstContentfulPaint.compute(data), + observedSpeedIndex, + }); + + assert.deepStrictEqual( + { + timing: Math.round(result.timing), + optimistic: Math.round(result.optimisticEstimate.timeInMs), + pessimistic: Math.round(result.pessimisticEstimate.timeInMs), + }, + { + timing: 2007, + optimistic: 379, + pessimistic: 2022, + }); + }); + + it('should not scale coefficients at default', async () => { + const result = SpeedIndex.getScaledCoefficients(defaultThrottling.rttMs); + expect(result).to.deep.equal(SpeedIndex.coefficients); + }); + + it('should scale coefficients back', async () => { + const result = SpeedIndex.getScaledCoefficients(5); + expect(result).to.deep.equal({intercept: 0, pessimistic: 0.5, optimistic: 0.5}); + }); + + it('should scale coefficients forward', async () => { + const result = SpeedIndex.getScaledCoefficients(300); + assert.deepStrictEqual(result, { + intercept: 0, + optimistic: 2.525, + pessimistic: 0.275, + }); + }); +}); diff --git a/front_end/models/trace/lantern/metrics/SpeedIndex.ts b/front_end/models/trace/lantern/metrics/SpeedIndex.ts new file mode 100644 index 00000000000..14df9081cf9 --- /dev/null +++ b/front_end/models/trace/lantern/metrics/SpeedIndex.ts @@ -0,0 +1,130 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {BaseNode, type Node} from '../BaseNode.js'; +import {type Extras, Metric} from '../Metric.js'; +import type * as Lantern from '../types/lantern.js'; + +const mobileSlow4GRtt = 150; + +class SpeedIndex extends Metric { + // eslint-disable-next-line @typescript-eslint/naming-convention + static override get coefficients(): Lantern.Simulation.MetricCoefficients { + return { + // Note that the optimistic estimate is based on the real observed speed index rather than a + // real lantern graph (and the final estimate will be Math.max(FCP, Speed Index)). + intercept: 0, + optimistic: 1.4, + pessimistic: 0.4, + }; + } + + static override getScaledCoefficients(rttMs: number): + Lantern.Simulation.MetricCoefficients { // eslint-disable-line no-unused-vars + // We want to scale our default coefficients based on the speed of the connection. + // We will linearly interpolate coefficients for the passed-in rttMs based on two pre-determined points: + // 1. Baseline point of 30 ms RTT where Speed Index should be a ~50/50 blend of optimistic/pessimistic. + // 30 ms was based on a typical home WiFi connection's actual RTT. + // Coefficients here follow from the fact that the optimistic estimate should be very close + // to reality at this connection speed and the pessimistic estimate compensates for minor + // connection speed differences. + // 2. Default throttled point of 150 ms RTT where the default coefficients have been determined to be most accurate. + // Coefficients here were determined through thorough analysis and linear regression on the + // lantern test data set. See core/scripts/test-lantern.sh for more detail. + // While the coefficients haven't been analyzed at the interpolated points, it's our current best effort. + const defaultCoefficients = this.coefficients; + const defaultRttExcess = mobileSlow4GRtt - 30; + const multiplier = Math.max((rttMs - 30) / defaultRttExcess, 0); + + return { + intercept: defaultCoefficients.intercept * multiplier, + optimistic: 0.5 + (defaultCoefficients.optimistic - 0.5) * multiplier, + pessimistic: 0.5 + (defaultCoefficients.pessimistic - 0.5) * multiplier, + }; + } + + static override getOptimisticGraph(dependencyGraph: Node): Node { + return dependencyGraph; + } + + static override getPessimisticGraph(dependencyGraph: Node): Node { + return dependencyGraph; + } + + static override getEstimateFromSimulation(simulationResult: Lantern.Simulation.Result, extras: Extras): + Lantern.Simulation.Result { + if (!extras.fcpResult) { + throw new Error('missing fcpResult'); + } + if (extras.observedSpeedIndex === undefined) { + throw new Error('missing observedSpeedIndex'); + } + + const fcpTimeInMs = extras.fcpResult.pessimisticEstimate.timeInMs; + const estimate = extras.optimistic ? + extras.observedSpeedIndex : + SpeedIndex.computeLayoutBasedSpeedIndex(simulationResult.nodeTimings, fcpTimeInMs); + return { + timeInMs: estimate, + nodeTimings: simulationResult.nodeTimings, + }; + } + + static override async compute( + data: Lantern.Simulation.MetricComputationDataInput, + extras?: Omit): Promise { + const fcpResult = extras?.fcpResult; + if (!fcpResult) { + throw new Error('FCP is required to calculate the SpeedIndex metric'); + } + + const metricResult = await super.compute(data, extras); + metricResult.timing = Math.max(metricResult.timing, fcpResult.timing); + return metricResult; + } + + /** + * Approximate speed index using layout events from the simulated node timings. + * The layout-based speed index is the weighted average of the endTime of CPU nodes that contained + * a 'Layout' task. log(duration) is used as the weight to stand for "significance" to the page. + * + * If no layout events can be found or the endTime of a CPU task is too early, FCP is used instead. + * + * This approach was determined after evaluating the accuracy/complexity tradeoff of many + * different methods. Read more in the evaluation doc. + * + * @see https://docs.google.com/document/d/1qJWXwxoyVLVadezIp_Tgdk867G3tDNkkVRvUJSH3K1E/edit# + */ + static computeLayoutBasedSpeedIndex(nodeTimings: Lantern.Simulation.Result['nodeTimings'], fcpTimeInMs: number): + number { + const layoutWeights: Array<{time: number, weight: number}> = []; + for (const [node, timing] of nodeTimings.entries()) { + if (node.type !== BaseNode.types.CPU) { + continue; + } + + if (node.childEvents.some(x => x.name === 'Layout')) { + const timingWeight = Math.max(Math.log2(timing.endTime - timing.startTime), 0); + layoutWeights.push({time: timing.endTime, weight: timingWeight}); + } + } + + const totalWeightedTime = + layoutWeights.map(evt => evt.weight * Math.max(evt.time, fcpTimeInMs)).reduce((a, b) => a + b, 0); + const totalWeight = layoutWeights.map(evt => evt.weight).reduce((a, b) => a + b, 0); + + if (!totalWeight) { + return fcpTimeInMs; + } + return totalWeightedTime / totalWeight; + } +} + +export {SpeedIndex}; diff --git a/front_end/models/trace/lantern/metrics/TotalBlockingTime.ts b/front_end/models/trace/lantern/metrics/TotalBlockingTime.ts new file mode 100644 index 00000000000..dff32acf64c --- /dev/null +++ b/front_end/models/trace/lantern/metrics/TotalBlockingTime.ts @@ -0,0 +1,114 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {BaseNode, type Node} from '../BaseNode.js'; +import {type Extras, Metric} from '../Metric.js'; +import {BLOCKING_TIME_THRESHOLD, calculateSumOfBlockingTime} from '../TBTUtils.js'; +import type * as Lantern from '../types/lantern.js'; + +class TotalBlockingTime extends Metric { + // eslint-disable-next-line @typescript-eslint/naming-convention + static override get coefficients(): Lantern.Simulation.MetricCoefficients { + return { + intercept: 0, + optimistic: 0.5, + pessimistic: 0.5, + }; + } + + static override getOptimisticGraph(dependencyGraph: Node): Node { + return dependencyGraph; + } + + static override getPessimisticGraph(dependencyGraph: Node): Node { + return dependencyGraph; + } + + static override getEstimateFromSimulation(simulation: Lantern.Simulation.Result, extras: Extras): + Lantern.Simulation.Result { + if (!extras.fcpResult) { + throw new Error('missing fcpResult'); + } + if (!extras.interactiveResult) { + throw new Error('missing interactiveResult'); + } + + // Intentionally use the opposite FCP estimate. A pessimistic FCP is higher than equal to an + // optimistic FCP, which means potentially more tasks are excluded from the Total Blocking Time + // computation. So a more pessimistic FCP gives a more optimistic Total Blocking Time for the + // same work. + const fcpTimeInMs = extras.optimistic ? extras.fcpResult.pessimisticEstimate.timeInMs : + extras.fcpResult.optimisticEstimate.timeInMs; + + // Similarly, we always have pessimistic TTI >= optimistic TTI. Therefore, picking optimistic + // TTI means our window of interest is smaller and thus potentially more tasks are excluded from + // Total Blocking Time computation, yielding a lower (more optimistic) Total Blocking Time value + // for the same work. + const interactiveTimeMs = extras.optimistic ? extras.interactiveResult.optimisticEstimate.timeInMs : + extras.interactiveResult.pessimisticEstimate.timeInMs; + + const minDurationMs = BLOCKING_TIME_THRESHOLD; + + const events = TotalBlockingTime.getTopLevelEvents( + simulation.nodeTimings, + minDurationMs, + ); + + return { + timeInMs: calculateSumOfBlockingTime( + events, + fcpTimeInMs, + interactiveTimeMs, + ), + nodeTimings: simulation.nodeTimings, + }; + } + + static override async compute( + data: Lantern.Simulation.MetricComputationDataInput, + extras?: Omit): Promise { + const fcpResult = extras?.fcpResult; + if (!fcpResult) { + throw new Error('FCP is required to calculate the TBT metric'); + } + + const interactiveResult = extras?.fcpResult; + if (!interactiveResult) { + throw new Error('Interactive is required to calculate the TBT metric'); + } + + return super.compute(data, extras); + } + + static getTopLevelEvents(nodeTimings: Lantern.Simulation.Result['nodeTimings'], minDurationMs: number): + {start: number, end: number, duration: number}[] { + const events: Array<{start: number, end: number, duration: number}> = []; + + for (const [node, timing] of nodeTimings.entries()) { + if (node.type !== BaseNode.types.CPU) { + continue; + } + // Filtering out events below minimum duration. + if (timing.duration < minDurationMs) { + continue; + } + + events.push({ + start: timing.startTime, + end: timing.endTime, + duration: timing.duration, + }); + } + + return events; + } +} + +export {TotalBlockingTime}; diff --git a/front_end/models/trace/lantern/metrics/metrics.ts b/front_end/models/trace/lantern/metrics/metrics.ts new file mode 100644 index 00000000000..e56ba44230d --- /dev/null +++ b/front_end/models/trace/lantern/metrics/metrics.ts @@ -0,0 +1,19 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type * as Lantern from '../types/lantern.js'; + +export {FirstContentfulPaint} from './FirstContentfulPaint.js'; +export {Interactive} from './Interactive.js'; +export {LargestContentfulPaint} from './LargestContentfulPaint.js'; +export {MaxPotentialFID} from './MaxPotentialFID.js'; +export {SpeedIndex} from './SpeedIndex.js'; +export {TotalBlockingTime} from './TotalBlockingTime.js'; +export type Result = Lantern.Metrics.Result; diff --git a/front_end/models/trace/lantern/simulation/ConnectionPool.test.ts b/front_end/models/trace/lantern/simulation/ConnectionPool.test.ts new file mode 100644 index 00000000000..59da02b219a --- /dev/null +++ b/front_end/models/trace/lantern/simulation/ConnectionPool.test.ts @@ -0,0 +1,193 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2018 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// @ts-nocheck TODO(crbug.com/348449529) + +import * as Lantern from '../lantern.js'; + +const {ConnectionPool} = Lantern.Simulation; + +describe('ConnectionPool', () => { + const rtt = 100; + const throughput = 10000 * 1024; + let requestId: number; + + function request(data: Partial = {}): Lantern.NetworkRequest { + const url = data.url || 'http://example.com'; + const origin = new URL(url).origin; + const scheme = url.split(':')[0]; + + return { + requestId: String(requestId++), + url, + protocol: 'http/1.1', + parsedURL: {scheme, securityOrigin: origin}, + ...data, + }; + } + + function simulationOptions(options) { + return Object.assign( + { + rtt: 150, + throughput: 1024, + additionalRttByOrigin: new Map(), + serverResponseTimeByOrigin: new Map(), + }, + options, + ); + } + + beforeEach(() => { + requestId = 1; + }); + + describe('#constructor', () => { + it('should create the pool', () => { + const pool = new ConnectionPool([request()], simulationOptions({rtt, throughput})); + // Make sure 6 connections are created for each origin + assert.strictEqual(pool._connectionsByOrigin.get('http://example.com').length, 6); + // Make sure it populates connectionWasReused + assert.strictEqual(pool._connectionReusedByRequestId.get('1'), false); + + const connection = pool._connectionsByOrigin.get('http://example.com')[0]; + assert.strictEqual(connection._rtt, rtt); + assert.strictEqual(connection._throughput, throughput); + assert.strictEqual(connection._serverLatency, 30); // sets to default value + }); + + it('should set TLS properly', () => { + const recordA = request({url: 'https://example.com'}); + const pool = new ConnectionPool([recordA], simulationOptions({rtt, throughput})); + const connection = pool._connectionsByOrigin.get('https://example.com')[0]; + assert.ok(connection._ssl, 'should have set connection TLS'); + }); + + it('should set H2 properly', () => { + const recordA = request({protocol: 'h2'}); + const pool = new ConnectionPool([recordA], simulationOptions({rtt, throughput})); + const connection = pool._connectionsByOrigin.get('http://example.com')[0]; + assert.ok(connection.isH2(), 'should have set HTTP/2'); + assert.strictEqual(pool._connectionsByOrigin.get('http://example.com').length, 1); + }); + + it('should set origin-specific RTT properly', () => { + const additionalRttByOrigin = new Map([['http://example.com', 63]]); + const pool = new ConnectionPool([request()], simulationOptions({rtt, throughput, additionalRttByOrigin})); + const connection = pool._connectionsByOrigin.get('http://example.com')[0]; + assert.ok(connection._rtt, rtt + 63); + }); + + it('should set origin-specific server latency properly', () => { + const serverResponseTimeByOrigin = new Map([['http://example.com', 63]]); + const pool = new ConnectionPool([request()], simulationOptions({rtt, throughput, serverResponseTimeByOrigin})); + const connection = pool._connectionsByOrigin.get('http://example.com')[0]; + assert.ok(connection._serverLatency, 63); + }); + }); + + describe('.acquire', () => { + it('should remember the connection associated with each request', () => { + const requestA = request(); + const requestB = request(); + const pool = new ConnectionPool([requestA, requestB], simulationOptions({rtt, throughput})); + + const connectionForA = pool.acquire(requestA); + const connectionForB = pool.acquire(requestB); + for (let i = 0; i < 10; i++) { + assert.strictEqual(pool.acquireActiveConnectionFromRequest(requestA), connectionForA); + assert.strictEqual(pool.acquireActiveConnectionFromRequest(requestB), connectionForB); + } + + assert.deepStrictEqual(pool.connectionsInUse(), [connectionForA, connectionForB]); + }); + + it('should allocate at least 6 connections', () => { + const pool = new ConnectionPool([request()], simulationOptions({rtt, throughput})); + for (let i = 0; i < 6; i++) { + assert.ok(pool.acquire(request()), `did not find connection for ${i}th request`); + } + }); + + it('should allocate all connections', () => { + const records = new Array(7).fill(undefined, 0, 7).map(() => request()); + const pool = new ConnectionPool(records, simulationOptions({rtt, throughput})); + const connections = records.map(request => pool.acquire(request)); + assert.ok(connections[0], 'did not find connection for 1st request'); + assert.ok(connections[5], 'did not find connection for 6th request'); + assert.ok(connections[6], 'did not find connection for 7th request'); + }); + + it('should be oblivious to connection reuse', () => { + const coldRecord = request(); + const warmRecord = request(); + const pool = new ConnectionPool([coldRecord, warmRecord], simulationOptions({rtt, throughput})); + pool._connectionReusedByRequestId.set(warmRecord.requestId, true); + + assert.ok(pool.acquire(coldRecord), 'should have acquired connection'); + assert.ok(pool.acquire(warmRecord), 'should have acquired connection'); + pool.release(coldRecord); + + for (const connection of pool._connectionsByOrigin.get('http://example.com')) { + connection.setWarmed(true); + } + + assert.ok(pool.acquire(coldRecord), 'should have acquired connection'); + assert.ok(pool.acquireActiveConnectionFromRequest(warmRecord), 'should have acquired connection'); + }); + + it('should acquire in order of warmness', () => { + const recordA = request(); + const recordB = request(); + const recordC = request(); + const pool = new ConnectionPool([recordA, recordB, recordC], simulationOptions({rtt, throughput})); + pool._connectionReusedByRequestId.set(recordA.requestId, true); + pool._connectionReusedByRequestId.set(recordB.requestId, true); + pool._connectionReusedByRequestId.set(recordC.requestId, true); + + const [connectionWarm, connectionWarmer, connectionWarmest] = pool._connectionsByOrigin.get('http://example.com'); + connectionWarm.setWarmed(true); + connectionWarm.setCongestionWindow(10); + connectionWarmer.setWarmed(true); + connectionWarmer.setCongestionWindow(100); + connectionWarmest.setWarmed(true); + connectionWarmest.setCongestionWindow(1000); + + assert.strictEqual(pool.acquire(recordA), connectionWarmest); + assert.strictEqual(pool.acquire(recordB), connectionWarmer); + assert.strictEqual(pool.acquire(recordC), connectionWarm); + }); + }); + + describe('.release', () => { + it('noop for request without connection', () => { + const requestA = request(); + const pool = new ConnectionPool([requestA], simulationOptions({rtt, throughput})); + assert.strictEqual(pool.release(requestA), undefined); + }); + + it('frees the connection for reissue', () => { + const requests = new Array(6).fill(undefined, 0, 7).map(() => request()); + const pool = new ConnectionPool(requests, simulationOptions({rtt, throughput})); + requests.push(request()); + + requests.forEach(request => pool.acquire(request)); + + assert.strictEqual(pool.connectionsInUse().length, 6); + assert.ok(!pool.acquire(requests[6]), 'had connection that is in use'); + + pool.release(requests[0]); + assert.strictEqual(pool.connectionsInUse().length, 5); + + assert.ok(pool.acquire(requests[6]), 'could not reissue released connection'); + assert.ok(!pool.acquire(requests[0]), 'had connection that is in use'); + }); + }); +}); diff --git a/front_end/models/trace/lantern/simulation/ConnectionPool.ts b/front_end/models/trace/lantern/simulation/ConnectionPool.ts new file mode 100644 index 00000000000..7b9d880c32c --- /dev/null +++ b/front_end/models/trace/lantern/simulation/ConnectionPool.ts @@ -0,0 +1,156 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2018 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type * as Lantern from '../types/lantern.js'; + +import {NetworkAnalyzer} from './NetworkAnalyzer.js'; +import {TcpConnection} from './TcpConnection.js'; + +const DEFAULT_SERVER_RESPONSE_TIME = 30; +const TLS_SCHEMES = ['https', 'wss']; + +// Each origin can have 6 simulatenous connections open +// https://cs.chromium.org/chromium/src/net/socket/client_socket_pool_manager.cc?type=cs&q="int+g_max_sockets_per_group" +const CONNECTIONS_PER_ORIGIN = 6; + +export class ConnectionPool { + _options: Required; + _records: Lantern.NetworkRequest[]; + _connectionsByOrigin: Map; + _connectionsByRequest: Map; + _connectionsInUse: Set; + _connectionReusedByRequestId: Map; + + constructor(records: Lantern.NetworkRequest[], options: Required) { + this._options = options; + + this._records = records; + this._connectionsByOrigin = new Map(); + this._connectionsByRequest = new Map(); + this._connectionsInUse = new Set(); + this._connectionReusedByRequestId = NetworkAnalyzer.estimateIfConnectionWasReused(records, { + forceCoarseEstimates: true, + }); + + this._initializeConnections(); + } + + connectionsInUse(): TcpConnection[] { + return Array.from(this._connectionsInUse); + } + + _initializeConnections(): void { + const connectionReused = this._connectionReusedByRequestId; + const additionalRttByOrigin = this._options.additionalRttByOrigin; + const serverResponseTimeByOrigin = this._options.serverResponseTimeByOrigin; + + const recordsByOrigin = NetworkAnalyzer.groupByOrigin(this._records); + for (const [origin, requests] of recordsByOrigin.entries()) { + const connections = []; + const additionalRtt = additionalRttByOrigin.get(origin) || 0; + const responseTime = serverResponseTimeByOrigin.get(origin) || DEFAULT_SERVER_RESPONSE_TIME; + + for (const request of requests) { + if (connectionReused.get(request.requestId)) { + continue; + } + + const isTLS = TLS_SCHEMES.includes(request.parsedURL.scheme); + const isH2 = request.protocol === 'h2'; + const connection = new TcpConnection( + this._options.rtt + additionalRtt, + this._options.throughput, + responseTime, + isTLS, + isH2, + ); + + connections.push(connection); + } + + if (!connections.length) { + throw new Error(`Could not find a connection for origin: ${origin}`); + } + + // Make sure each origin has minimum number of connections available for max throughput. + // But only if it's not over H2 which maximizes throughput already. + const minConnections = connections[0].isH2() ? 1 : CONNECTIONS_PER_ORIGIN; + while (connections.length < minConnections) { + connections.push(connections[0].clone()); + } + + this._connectionsByOrigin.set(origin, connections); + } + } + + _findAvailableConnectionWithLargestCongestionWindow(connections: TcpConnection[]): TcpConnection|null { + let maxConnection: TcpConnection|null = null; + for (let i = 0; i < connections.length; i++) { + const connection = connections[i]; + + // Connections that are in use are never available. + if (this._connectionsInUse.has(connection)) { + continue; + } + + // This connection is a match and is available! Update our max if it has a larger congestionWindow + const currentMax = (maxConnection?.congestionWindow) || -Infinity; + if (connection.congestionWindow > currentMax) { + maxConnection = connection; + } + } + + return maxConnection; + } + + /** + * This method finds an available connection to the origin specified by the network request or null + * if no connection was available. If returned, connection will not be available for other network + * records until release is called. + */ + acquire(request: Lantern.NetworkRequest): TcpConnection|null { + if (this._connectionsByRequest.has(request)) { + throw new Error('Record already has a connection'); + } + + const origin = request.parsedURL.securityOrigin; + const connections = this._connectionsByOrigin.get(origin) || []; + const connectionToUse = this._findAvailableConnectionWithLargestCongestionWindow(connections); + + if (!connectionToUse) { + return null; + } + + this._connectionsInUse.add(connectionToUse); + this._connectionsByRequest.set(request, connectionToUse); + return connectionToUse; + } + + /** + * Return the connection currently being used to fetch a request. If no connection + * currently being used for this request, an error will be thrown. + */ + acquireActiveConnectionFromRequest(request: Lantern.NetworkRequest): TcpConnection { + const activeConnection = this._connectionsByRequest.get(request); + if (!activeConnection) { + throw new Error('Could not find an active connection for request'); + } + + return activeConnection; + } + + release(request: Lantern.NetworkRequest): void { + const connection = this._connectionsByRequest.get(request); + this._connectionsByRequest.delete(request); + if (connection) { + this._connectionsInUse.delete(connection); + } + } +} diff --git a/front_end/models/trace/lantern/simulation/Constants.ts b/front_end/models/trace/lantern/simulation/Constants.ts new file mode 100644 index 00000000000..8d8cba03f3e --- /dev/null +++ b/front_end/models/trace/lantern/simulation/Constants.ts @@ -0,0 +1,52 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +const DEVTOOLS_RTT_ADJUSTMENT_FACTOR = 3.75; +const DEVTOOLS_THROUGHPUT_ADJUSTMENT_FACTOR = 0.9; + +const throttling = { + DEVTOOLS_RTT_ADJUSTMENT_FACTOR, + DEVTOOLS_THROUGHPUT_ADJUSTMENT_FACTOR, + // These values align with WebPageTest's definition of "Fast 3G" + // But offer similar characteristics to roughly the 75th percentile of 4G connections. + mobileSlow4G: { + rttMs: 150, + throughputKbps: 1.6 * 1024, + requestLatencyMs: 150 * DEVTOOLS_RTT_ADJUSTMENT_FACTOR, + downloadThroughputKbps: 1.6 * 1024 * DEVTOOLS_THROUGHPUT_ADJUSTMENT_FACTOR, + uploadThroughputKbps: 750 * DEVTOOLS_THROUGHPUT_ADJUSTMENT_FACTOR, + cpuSlowdownMultiplier: 4, + }, + // These values partially align with WebPageTest's definition of "Regular 3G". + // These values are meant to roughly align with Chrome UX report's 3G definition which are based + // on HTTP RTT of 300-1400ms and downlink throughput of <700kbps. + mobileRegular3G: { + rttMs: 300, + throughputKbps: 700, + requestLatencyMs: 300 * DEVTOOLS_RTT_ADJUSTMENT_FACTOR, + downloadThroughputKbps: 700 * DEVTOOLS_THROUGHPUT_ADJUSTMENT_FACTOR, + uploadThroughputKbps: 700 * DEVTOOLS_THROUGHPUT_ADJUSTMENT_FACTOR, + cpuSlowdownMultiplier: 4, + }, + // Using a "broadband" connection type + // Corresponds to "Dense 4G 25th percentile" in https://docs.google.com/document/d/1Ft1Bnq9-t4jK5egLSOc28IL4TvR-Tt0se_1faTA4KTY/edit#heading=h.bb7nfy2x9e5v + desktopDense4G: { + rttMs: 40, + throughputKbps: 10 * 1024, + cpuSlowdownMultiplier: 1, + requestLatencyMs: 0, // 0 means unset + downloadThroughputKbps: 0, + uploadThroughputKbps: 0, + }, +}; + +const Constants = {throttling}; + +export {Constants}; diff --git a/front_end/models/trace/lantern/simulation/DNSCache.test.ts b/front_end/models/trace/lantern/simulation/DNSCache.test.ts new file mode 100644 index 00000000000..1cc81bd0383 --- /dev/null +++ b/front_end/models/trace/lantern/simulation/DNSCache.test.ts @@ -0,0 +1,84 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2018 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// @ts-nocheck - TODO(crbug.com/348449529) requests need to be whole Lantern.NetworkRequest objects + +import * as Lantern from '../lantern.js'; + +const {DNSCache} = Lantern.Simulation; + +const MULTIPLIER = DNSCache.rttMultiplier; + +describe('DNSCache', () => { + let dns: Lantern.Simulation.DNSCache; + let request: Lantern.NetworkRequest; + + beforeEach(() => { + dns = new DNSCache({rtt: 100}); + request = { + parsedURL: { + host: 'example.com', + scheme: 'https', + securityOrigin: '', + }, + } as Lantern.NetworkRequest; + }); + + describe('.getTimeUntilResolution', () => { + it('should return the RTT multiplied', () => { + const resolutionTime = dns.getTimeUntilResolution(request); + expect(resolutionTime).to.equal(100 * MULTIPLIER); + }); + + it('should return time with requestedAt', () => { + const resolutionTime = dns.getTimeUntilResolution(request, {requestedAt: 1500}); + expect(resolutionTime).to.equal(100 * MULTIPLIER); + }); + + it('should not cache by default', () => { + dns.getTimeUntilResolution(request, {requestedAt: 0}); + const resolutionTime = dns.getTimeUntilResolution(request, {requestedAt: 1000}); + expect(resolutionTime).to.equal(100 * MULTIPLIER); + }); + + it('should cache when told', () => { + dns.getTimeUntilResolution(request, {requestedAt: 0, shouldUpdateCache: true}); + const resolutionTime = dns.getTimeUntilResolution(request, {requestedAt: 1000}); + expect(resolutionTime).to.equal(0); + }); + + it('should cache by domain', () => { + dns.getTimeUntilResolution(request, {requestedAt: 0, shouldUpdateCache: true}); + const otherRequest = {parsedURL: {host: 'other-example.com'}}; + const resolutionTime = dns.getTimeUntilResolution(otherRequest, {requestedAt: 1000}); + expect(resolutionTime).to.equal(100 * MULTIPLIER); + }); + + it('should not update cache with later times', () => { + dns.getTimeUntilResolution(request, {requestedAt: 1000, shouldUpdateCache: true}); + dns.getTimeUntilResolution(request, {requestedAt: 1500, shouldUpdateCache: true}); + dns.getTimeUntilResolution(request, {requestedAt: 500, shouldUpdateCache: true}); + dns.getTimeUntilResolution(request, {requestedAt: 5000, shouldUpdateCache: true}); + + expect(dns.getTimeUntilResolution(request, {requestedAt: 0})).to.equal(100 * MULTIPLIER); + expect(dns.getTimeUntilResolution(request, {requestedAt: 550})).to.equal(100 * MULTIPLIER - 50); + expect(dns.getTimeUntilResolution(request, {requestedAt: 1000})).to.equal(0); + expect(dns.getTimeUntilResolution(request, {requestedAt: 2000})).to.equal(0); + }); + }); + + describe('.setResolvedAt', () => { + it('should set the DNS resolution time for a request', () => { + dns.setResolvedAt(request.parsedURL.host, 123); + const resolutionTime = dns.getTimeUntilResolution(request); + expect(resolutionTime).to.equal(123); + }); + }); +}); diff --git a/front_end/models/trace/lantern/simulation/DNSCache.ts b/front_end/models/trace/lantern/simulation/DNSCache.ts new file mode 100644 index 00000000000..f5f75964723 --- /dev/null +++ b/front_end/models/trace/lantern/simulation/DNSCache.ts @@ -0,0 +1,67 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2018 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type * as Lantern from '../types/lantern.js'; + +// A DNS lookup will usually take ~1-2 roundtrips of connection latency plus the extra DNS routing time. +// Example: https://www.webpagetest.org/result/180703_3A_e33ec79747c002ed4d7bcbfc81462203/1/details/#waterfall_view_step1 +// Example: https://www.webpagetest.org/result/180707_1M_89673eb633b5d98386de95dfcf9b33d5/1/details/#waterfall_view_step1 +// DNS is highly variable though, many times it's a little more than 1, but can easily be 4-5x RTT. +// We'll use 2 since it seems to give the most accurate results on average, but this can be tweaked. +const DNS_RESOLUTION_RTT_MULTIPLIER = 2; + +class DNSCache { + static rttMultiplier = DNS_RESOLUTION_RTT_MULTIPLIER; + + _rtt: number; + _resolvedDomainNames: Map; + + constructor({rtt}: {rtt: number}) { + this._rtt = rtt; + this._resolvedDomainNames = new Map(); + } + + getTimeUntilResolution(request: Lantern.NetworkRequest, options?: {requestedAt: number, shouldUpdateCache: boolean}): + number { + const {requestedAt = 0, shouldUpdateCache = false} = options || {}; + + const domain = request.parsedURL.host; + const cacheEntry = this._resolvedDomainNames.get(domain); + let timeUntilResolved = this._rtt * DNSCache.rttMultiplier; + if (cacheEntry) { + const timeUntilCachedIsResolved = Math.max(cacheEntry.resolvedAt - requestedAt, 0); + timeUntilResolved = Math.min(timeUntilCachedIsResolved, timeUntilResolved); + } + + const resolvedAt = requestedAt + timeUntilResolved; + if (shouldUpdateCache) { + this._updateCacheResolvedAtIfNeeded(request, resolvedAt); + } + + return timeUntilResolved; + } + + _updateCacheResolvedAtIfNeeded(request: Lantern.NetworkRequest, resolvedAt: number): void { + const domain = request.parsedURL.host; + const cacheEntry = this._resolvedDomainNames.get(domain) || {resolvedAt}; + cacheEntry.resolvedAt = Math.min(cacheEntry.resolvedAt, resolvedAt); + this._resolvedDomainNames.set(domain, cacheEntry); + } + + /** + * Forcefully sets the DNS resolution time for a request. + * Useful for testing and alternate execution simulations. + */ + setResolvedAt(domain: string, resolvedAt: number): void { + this._resolvedDomainNames.set(domain, {resolvedAt}); + } +} + +export {DNSCache}; diff --git a/front_end/models/trace/lantern/simulation/NetworkAnalyzer.test.ts b/front_end/models/trace/lantern/simulation/NetworkAnalyzer.test.ts new file mode 100644 index 00000000000..1258ec70eac --- /dev/null +++ b/front_end/models/trace/lantern/simulation/NetworkAnalyzer.test.ts @@ -0,0 +1,503 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2018 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// @ts-nocheck TODO(crbug.com/348449529) + +import * as TraceModel from '../../trace.js'; +import * as Lantern from '../lantern.js'; +import {loadTrace, runTraceEngine} from '../testing/MetricTestUtils.js'; + +const {NetworkAnalyzer} = Lantern.Simulation; + +async function createRequests(trace: Lantern.Trace) { + const traceEngineData = await runTraceEngine(trace); + return TraceModel.LanternComputationData.createNetworkRequests(trace, traceEngineData); +} + +describe('NetworkAnalyzer', () => { + let trace: Lantern.Trace; + let traceWithRedirect: Lantern.Trace; + before(async function() { + trace = await loadTrace(this, 'lantern/paul/trace.json.gz'); + traceWithRedirect = await loadTrace(this, 'lantern/redirect/trace.json.gz'); + }); + + let recordId; + + function createRecord(opts) { + const url = opts.url || 'https://example.com'; + if (opts.networkRequestTime) { + opts.networkRequestTime *= 1000; + } + if (opts.networkEndTime) { + opts.networkEndTime *= 1000; + } + return Object.assign( + { + url, + requestId: recordId++, + connectionId: 0, + connectionReused: false, + networkRequestTime: 10, + networkEndTime: 10, + transferSize: 0, + protocol: opts.protocol || 'http/1.1', + parsedURL: {scheme: url.match(/https?/)[0], securityOrigin: url.match(/.*\.com/)[0]}, + timing: opts.timing || null, + }, + opts, + ); + } + + beforeEach(() => { + recordId = 1; + }); + + function assertCloseEnough(valueA, valueB, threshold = 1) { + const message = `${valueA} was not close enough to ${valueB}`; + assert.ok(Math.abs(valueA - valueB) < threshold, message); + } + + describe('#estimateIfConnectionWasReused', () => { + it('should use built-in value when trustworthy', () => { + const records = [ + {requestId: 1, connectionId: 1, connectionReused: false}, + {requestId: 2, connectionId: 1, connectionReused: true}, + {requestId: 3, connectionId: 2, connectionReused: false}, + {requestId: 4, connectionId: 3, connectionReused: false}, + {requestId: 5, connectionId: 2, connectionReused: true}, + ]; + + const result = NetworkAnalyzer.estimateIfConnectionWasReused(records); + const expected = new Map([[1, false], [2, true], [3, false], [4, false], [5, true]]); + assert.deepStrictEqual(result, expected); + }); + + it('should estimate values when not trustworthy (duplicate IDs)', () => { + const records = [ + createRecord({requestId: 1, networkRequestTime: 0, networkEndTime: 15}), + createRecord({requestId: 2, networkRequestTime: 10, networkEndTime: 25}), + createRecord({requestId: 3, networkRequestTime: 20, networkEndTime: 40}), + createRecord({requestId: 4, networkRequestTime: 30, networkEndTime: 40}), + ]; + + const result = NetworkAnalyzer.estimateIfConnectionWasReused(records); + const expected = new Map([[1, false], [2, false], [3, true], [4, true]]); + assert.deepStrictEqual(result, expected); + }); + + it('should estimate values when not trustworthy (connectionReused nonsense)', () => { + const records = [ + createRecord({ + requestId: 1, + connectionId: 1, + connectionReused: true, + networkRequestTime: 0, + networkEndTime: 15, + }), + createRecord({ + requestId: 2, + connectionId: 1, + connectionReused: true, + networkRequestTime: 10, + networkEndTime: 25, + }), + createRecord({ + requestId: 3, + connectionId: 1, + connectionReused: true, + networkRequestTime: 20, + networkEndTime: 40, + }), + createRecord({ + requestId: 4, + connectionId: 2, + connectionReused: false, + networkRequestTime: 30, + networkEndTime: 40, + }), + ]; + + const result = NetworkAnalyzer.estimateIfConnectionWasReused(records); + const expected = new Map([[1, false], [2, false], [3, true], [4, true]]); + assert.deepStrictEqual(result, expected); + }); + + it('should estimate with earliest allowed reuse', () => { + const records = [ + createRecord({requestId: 1, networkRequestTime: 0, networkEndTime: 40}), + createRecord({requestId: 2, networkRequestTime: 10, networkEndTime: 15}), + createRecord({requestId: 3, networkRequestTime: 20, networkEndTime: 30}), + createRecord({requestId: 4, networkRequestTime: 35, networkEndTime: 40}), + ]; + + const result = NetworkAnalyzer.estimateIfConnectionWasReused(records); + const expected = new Map([[1, false], [2, false], [3, true], [4, true]]); + assert.deepStrictEqual(result, expected); + }); + + it('should work on a real trace', async () => { + const requests = await createRequests(trace); + const result = NetworkAnalyzer.estimateIfConnectionWasReused(requests); + const distinctConnections = Array.from(result.values()).filter(item => !item).length; + assert.strictEqual(result.size, 25); + assert.strictEqual(distinctConnections, 9); + }); + }); + + describe('#estimateRTTByOrigin', () => { + it('should infer from tcp timing when available', () => { + const timing = {connectStart: 0, connectEnd: 99}; + const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing}); + const result = NetworkAnalyzer.estimateRTTByOrigin([request]); + const expected = {min: 99, max: 99, avg: 99, median: 99}; + assert.deepStrictEqual(result.get('https://example.com'), expected); + }); + + it('should infer only one estimate if tcp and ssl start times are equal', () => { + const timing = {connectStart: 0, connectEnd: 99, sslStart: 0, sslEnd: 99}; + const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing}); + const result = NetworkAnalyzer.estimateRTTByOrigin([request]); + const expected = {min: 99, max: 99, avg: 99, median: 99}; + assert.deepStrictEqual(result.get('https://example.com'), expected); + }); + + it('should infer from tcp and ssl timing when available', () => { + const timing = {connectStart: 0, connectEnd: 99, sslStart: 50, sslEnd: 99}; + const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing}); + const result = NetworkAnalyzer.estimateRTTByOrigin([request]); + const expected = {min: 49, max: 50, avg: 49.5, median: 49.5}; + assert.deepStrictEqual(result.get('https://example.com'), expected); + }); + + it('should infer from connection timing when available for h3 (one estimate)', () => { + const timing = {connectStart: 0, connectEnd: 99, sslStart: 1, sslEnd: 99}; + const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing, protocol: 'h3'}); + const result = NetworkAnalyzer.estimateRTTByOrigin([request]); + const expected = {min: 99, max: 99, avg: 99, median: 99}; + assert.deepStrictEqual(result.get('https://example.com'), expected); + }); + + it('should infer from sendStart when available', () => { + const timing = {sendStart: 150}; + // this request took 150ms before Chrome could send the request + // i.e. DNS (maybe) + queuing (maybe) + TCP handshake took ~100ms + // 150ms / 3 round trips ~= 50ms RTT + const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing}); + const result = NetworkAnalyzer.estimateRTTByOrigin([request], {coarseEstimateMultiplier: 1}); + const expected = {min: 50, max: 50, avg: 50, median: 50}; + assert.deepStrictEqual(result.get('https://example.com'), expected); + }); + + it('should infer from download timing when available', () => { + const timing = {receiveHeadersEnd: 100}; + // this request took 1000ms after the first byte was received to download the payload + // i.e. it took at least one full additional roundtrip after first byte to download the rest + // 1000ms / 1 round trip ~= 1000ms RTT + const request = createRecord({networkRequestTime: 0, networkEndTime: 1.1, transferSize: 28 * 1024, timing}); + const result = NetworkAnalyzer.estimateRTTByOrigin([request], { + coarseEstimateMultiplier: 1, + useHeadersEndEstimates: false, + }); + const expected = {min: 1000, max: 1000, avg: 1000, median: 1000}; + assert.deepStrictEqual(result.get('https://example.com'), expected); + }); + + it('should infer from TTFB when available', () => { + const timing = {receiveHeadersEnd: 1000}; + const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing, resourceType: 'Other'}); + const result = NetworkAnalyzer.estimateRTTByOrigin([request], { + coarseEstimateMultiplier: 1, + }); + + // this request's TTFB was 1000ms, it used SSL and was a fresh connection requiring a handshake + // which needs ~4 RTs. We don't know its resource type so it'll be assumed that 40% of it was + // server response time. + // 600 ms / 4 = 150ms + const expected = {min: 150, max: 150, avg: 150, median: 150}; + assert.deepStrictEqual(result.get('https://example.com'), expected); + }); + + it('should use coarse estimates on a per-origin basis', () => { + const records = [ + createRecord({url: 'https://example.com', timing: {connectStart: 1, connectEnd: 100, sendStart: 150}}), + createRecord({url: 'https://example2.com', timing: {sendStart: 150}}), + ]; + const result = NetworkAnalyzer.estimateRTTByOrigin(records); + assert.deepStrictEqual(result.get('https://example.com'), {min: 99, max: 99, avg: 99, median: 99}); + assert.deepStrictEqual(result.get('https://example2.com'), {min: 15, max: 15, avg: 15, median: 15}); + }); + + it('should handle untrustworthy connection information', () => { + const timing = {sendStart: 150}; + const recordA = createRecord({networkRequestTime: 0, networkEndTime: 1, timing, connectionReused: true}); + const recordB = createRecord({ + networkRequestTime: 0, + networkEndTime: 1, + timing, + connectionId: 2, + connectionReused: true, + }); + const result = NetworkAnalyzer.estimateRTTByOrigin([recordA, recordB], { + coarseEstimateMultiplier: 1, + }); + const expected = {min: 50, max: 50, avg: 50, median: 50}; + assert.deepStrictEqual(result.get('https://example.com'), expected); + }); + + it('should work on a real trace', async () => { + const requests = await createRequests(trace); + const result = NetworkAnalyzer.estimateRTTByOrigin(requests); + assertCloseEnough(result.get('https://www.paulirish.com').min, 10); + assertCloseEnough(result.get('https://www.googletagmanager.com').min, 17); + assertCloseEnough(result.get('https://www.google-analytics.com').min, 10); + }); + + it('should approximate well with either method', async () => { + const requests = await createRequests(trace); + const result = NetworkAnalyzer.estimateRTTByOrigin(requests).get(NetworkAnalyzer.summary); + const resultApprox = NetworkAnalyzer + .estimateRTTByOrigin(requests, { + forceCoarseEstimates: true, + }) + .get(NetworkAnalyzer.summary); + assertCloseEnough(result.min, resultApprox.min, 20); + assertCloseEnough(result.avg, resultApprox.avg, 30); + assertCloseEnough(result.median, resultApprox.median, 30); + }); + }); + + describe('#estimateServerResponseTimeByOrigin', () => { + it('should estimate server response time using ttfb times', () => { + const timing = {sendEnd: 100, receiveHeadersEnd: 200}; + const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing}); + const rttByOrigin = new Map([[NetworkAnalyzer.summary, 0]]); + const result = NetworkAnalyzer.estimateServerResponseTimeByOrigin([request], {rttByOrigin}); + const expected = {min: 100, max: 100, avg: 100, median: 100}; + assert.deepStrictEqual(result.get('https://example.com'), expected); + }); + + it('should subtract out rtt', () => { + const timing = {sendEnd: 100, receiveHeadersEnd: 200}; + const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing}); + const rttByOrigin = new Map([[NetworkAnalyzer.summary, 50]]); + const result = NetworkAnalyzer.estimateServerResponseTimeByOrigin([request], {rttByOrigin}); + const expected = {min: 50, max: 50, avg: 50, median: 50}; + assert.deepStrictEqual(result.get('https://example.com'), expected); + }); + + it('should compute rtts when not provided', () => { + const timing = {connectStart: 5, connectEnd: 55, sendEnd: 100, receiveHeadersEnd: 200}; + const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing}); + const result = NetworkAnalyzer.estimateServerResponseTimeByOrigin([request]); + const expected = {min: 50, max: 50, avg: 50, median: 50}; + assert.deepStrictEqual(result.get('https://example.com'), expected); + }); + + it('should work on a real trace', async () => { + const requests = await createRequests(trace); + const result = NetworkAnalyzer.estimateServerResponseTimeByOrigin(requests); + assertCloseEnough(result.get('https://www.paulirish.com').avg, 35); + assertCloseEnough(result.get('https://www.googletagmanager.com').avg, 8); + assertCloseEnough(result.get('https://www.google-analytics.com').avg, 8); + }); + + it('should approximate well with either method', async () => { + const requests = await createRequests(trace); + const result = NetworkAnalyzer.estimateServerResponseTimeByOrigin(requests).get( + NetworkAnalyzer.summary, + ); + const resultApprox = NetworkAnalyzer + .estimateServerResponseTimeByOrigin(requests, { + forceCoarseEstimates: true, + }) + .get(NetworkAnalyzer.summary); + assertCloseEnough(result.min, resultApprox.min, 20); + assertCloseEnough(result.avg, resultApprox.avg, 30); + assertCloseEnough(result.median, resultApprox.median, 30); + }); + }); + + describe('#estimateThroughput', () => { + const estimateThroughput = NetworkAnalyzer.estimateThroughput; + + function createThroughputRecord(responseHeadersEndTimeInS, networkEndTimeInS, extras) { + return Object.assign( + { + responseHeadersEndTime: responseHeadersEndTimeInS * 1000, + networkEndTime: networkEndTimeInS * 1000, + transferSize: 1000, + finished: true, + failed: false, + statusCode: 200, + url: 'https://google.com/logo.png', + parsedURL: {scheme: 'https'}, + }, + extras, + ); + } + + it('should return Infinity for no/missing records', () => { + assert.strictEqual(estimateThroughput([]), Infinity); + assert.strictEqual(estimateThroughput([createThroughputRecord(0, 0, {finished: false})]), Infinity); + }); + + it('should compute correctly for a basic waterfall', () => { + const result = estimateThroughput([ + createThroughputRecord(0, 1), + createThroughputRecord(1, 2), + createThroughputRecord(2, 6), + ]); + + assert.strictEqual(result, 500 * 8); + }); + + it('should compute correctly for concurrent requests', () => { + const result = estimateThroughput([ + createThroughputRecord(0, 1), + createThroughputRecord(0.5, 1), + ]); + + assert.strictEqual(result, 2000 * 8); + }); + + it('should compute correctly for gaps', () => { + const result = estimateThroughput([ + createThroughputRecord(0, 1), + createThroughputRecord(3, 4), + ]); + + assert.strictEqual(result, 1000 * 8); + }); + + it('should compute correctly for partially overlapping requests', () => { + const result = estimateThroughput([ + createThroughputRecord(0, 1), + createThroughputRecord(0.5, 1.5), + createThroughputRecord(1.25, 3), + createThroughputRecord(1.4, 4), + createThroughputRecord(5, 9), + ]); + + assert.strictEqual(result, 625 * 8); + }); + + it('should exclude failed records', () => { + const result = estimateThroughput([ + createThroughputRecord(0, 2), + createThroughputRecord(3, 4, {failed: true}), + ]); + assert.strictEqual(result, 500 * 8); + }); + + it('should exclude cached records', () => { + const result = estimateThroughput([ + createThroughputRecord(0, 2), + createThroughputRecord(3, 4, {statusCode: 304}), + ]); + assert.strictEqual(result, 500 * 8); + }); + + it('should exclude unfinished records', () => { + const result = estimateThroughput([ + createThroughputRecord(0, 2), + createThroughputRecord(3, 4, {finished: false}), + ]); + assert.strictEqual(result, 500 * 8); + }); + + it('should exclude data URIs', () => { + const result = estimateThroughput([ + createThroughputRecord(0, 2), + createThroughputRecord(3, 4, {parsedURL: {scheme: 'data'}}), + ]); + assert.strictEqual(result, 500 * 8); + }); + }); + + describe('#computeRTTAndServerResponseTime', () => { + it('should work', async () => { + const requests = await createRequests(trace); + const result = NetworkAnalyzer.computeRTTAndServerResponseTime(requests); + + expect(result.rtt).to.be.closeTo(0.082, 0.001); + assert.deepStrictEqual([...result.additionalRttByOrigin.entries()], [ + [ + 'https://www.paulirish.com', + 9.788999999999994, + ], + [ + 'https://www.googletagmanager.com', + 17.21999999999999, + ], + [ + 'https://fonts.googleapis.com', + 16.816000000000003, + ], + [ + 'https://fonts.gstatic.com', + 1.6889999999999998, + ], + [ + 'https://www.google-analytics.com', + 9.924999999999997, + ], + [ + 'https://paulirish.disqus.com', + 9.000999999999998, + ], + [ + 'https://firebaseinstallations.googleapis.com', + 0, + ], + [ + 'https://firebaseremoteconfig.googleapis.com', + 0.1823, + ], + [ + '__SUMMARY__', + 0, + ], + ]); + }); + }); + + describe('#findMainDocument', () => { + it('should find the main document', async () => { + const requests = await createRequests(trace); + const mainDocument = NetworkAnalyzer.findResourceForUrl(requests, 'https://www.paulirish.com/'); + assert.strictEqual(mainDocument.url, 'https://www.paulirish.com/'); + }); + + it('should find the main document if the URL includes a fragment', async () => { + const requests = await createRequests(trace); + const mainDocument = NetworkAnalyzer.findResourceForUrl(requests, 'https://www.paulirish.com/#info'); + assert.strictEqual(mainDocument.url, 'https://www.paulirish.com/'); + }); + }); + + describe('#resolveRedirects', () => { + it('should resolve to the same document when no redirect', async () => { + const requests = await createRequests(trace); + const mainDocument = NetworkAnalyzer.findResourceForUrl(requests, 'https://www.paulirish.com/'); + const finalDocument = NetworkAnalyzer.resolveRedirects(mainDocument); + assert.strictEqual(mainDocument.url, finalDocument.url); + assert.strictEqual(finalDocument.url, 'https://www.paulirish.com/'); + }); + + it('should resolve to the final document with redirects', async () => { + const requests = await createRequests(traceWithRedirect); + const mainDocument = NetworkAnalyzer.findResourceForUrl(requests, 'http://www.vkontakte.ru/'); + const finalDocument = NetworkAnalyzer.resolveRedirects(mainDocument); + assert.notEqual(mainDocument.url, finalDocument.url); + assert.strictEqual(finalDocument.url, 'https://m.vk.com/'); + }); + }); +}); diff --git a/front_end/models/trace/lantern/simulation/NetworkAnalyzer.ts b/front_end/models/trace/lantern/simulation/NetworkAnalyzer.ts new file mode 100644 index 00000000000..41a355c8810 --- /dev/null +++ b/front_end/models/trace/lantern/simulation/NetworkAnalyzer.ts @@ -0,0 +1,620 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2018 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type * as Lantern from '../types/lantern.js'; + +class UrlUtils { + /** + * There is fancy URL rewriting logic for the chrome://settings page that we need to work around. + * Why? Special handling was added by Chrome team to allow a pushState transition between chrome:// pages. + * As a result, the network URL (chrome://chrome/settings/) doesn't match the final document URL (chrome://settings/). + */ + static rewriteChromeInternalUrl(url: string): string { + if (!url || !url.startsWith('chrome://')) { + return url; + } + // Chrome adds a trailing slash to `chrome://` URLs, but the spec does not. + // https://github.com/GoogleChrome/lighthouse/pull/3941#discussion_r154026009 + if (url.endsWith('/')) { + url = url.replace(/\/$/, ''); + } + return url.replace(/^chrome:\/\/chrome\//, 'chrome://'); + } + + /** + * Determine if url1 equals url2, ignoring URL fragments. + */ + static equalWithExcludedFragments(url1: string, url2: string): boolean { + [url1, url2] = [url1, url2].map(this.rewriteChromeInternalUrl); + try { + const urla = new URL(url1); + urla.hash = ''; + + const urlb = new URL(url2); + urlb.hash = ''; + + return urla.href === urlb.href; + } catch (e) { + return false; + } + } +} + +interface Summary { + min: number; + max: number; + avg: number; + median: number; +} + +interface RTTEstimateOptions { + /** + * TCP connection handshake information will be used when available, but in + * some circumstances this data can be unreliable. This flag exposes an + * option to ignore the handshake data and use the coarse download/TTFB timing data. + */ + forceCoarseEstimates?: boolean; + /** + * Coarse estimates include lots of extra time and noise multiply by some factor + * to deflate the estimates a bit. + */ + coarseEstimateMultiplier?: number; + /** Useful for testing to isolate the different methods of estimation. */ + useDownloadEstimates?: boolean; + /** Useful for testing to isolate the different methods of estimation. */ + useSendStartEstimates?: boolean; + /** Useful for testing to isolate the different methods of estimation. */ + useHeadersEndEstimates?: boolean; +} + +type RequestInfo = { + request: Lantern.NetworkRequest, + timing: Lantern.ResourceTiming, + connectionReused?: boolean, +}; + +const INITIAL_CWD = 14 * 1024; + +// Assume that 40% of TTFB was server response time by default for static assets +const DEFAULT_SERVER_RESPONSE_PERCENTAGE = 0.4; + +/** + * For certain resource types, server response time takes up a greater percentage of TTFB (dynamic + * assets like HTML documents, XHR/API calls, etc) + */ +const SERVER_RESPONSE_PERCENTAGE_OF_TTFB: Partial> = { + Document: 0.9, + XHR: 0.9, + Fetch: 0.9, +}; + +class NetworkAnalyzer { + static get summary(): string { + return '__SUMMARY__'; + } + + static groupByOrigin(records: Lantern.NetworkRequest[]): Map { + const grouped = new Map(); + records.forEach(item => { + const key = item.parsedURL.securityOrigin; + const group = grouped.get(key) || []; + group.push(item); + grouped.set(key, group); + }); + return grouped; + } + + static getSummary(values: number[]): Summary { + values.sort((a, b) => a - b); + + let median; + if (values.length === 0) { + median = values[0]; + } else if (values.length % 2 === 0) { + const a = values[Math.floor((values.length - 1) / 2)]; + const b = values[Math.floor((values.length - 1) / 2) + 1]; + median = (a + b) / 2; + } else { + median = values[Math.floor((values.length - 1) / 2)]; + } + + return { + min: values[0], + max: values[values.length - 1], + avg: values.reduce((a, b) => a + b, 0) / values.length, + median, + }; + } + + static summarize(values: Map): Map { + const summaryByKey = new Map(); + const allEstimates = []; + for (const [key, estimates] of values) { + summaryByKey.set(key, NetworkAnalyzer.getSummary(estimates)); + allEstimates.push(...estimates); + } + + summaryByKey.set(NetworkAnalyzer.summary, NetworkAnalyzer.getSummary(allEstimates)); + return summaryByKey; + } + + static _estimateValueByOrigin( + requests: Lantern.NetworkRequest[], + iteratee: (e: RequestInfo) => number | number[] | undefined): Map { + const connectionWasReused = NetworkAnalyzer.estimateIfConnectionWasReused(requests); + const groupedByOrigin = NetworkAnalyzer.groupByOrigin(requests); + + const estimates = new Map(); + for (const [origin, originRequests] of groupedByOrigin.entries()) { + let originEstimates: number[] = []; + + for (const request of originRequests) { + const timing = request.timing; + if (!timing) { + continue; + } + + const value = iteratee({ + request, + timing, + connectionReused: connectionWasReused.get(request.requestId), + }); + if (typeof value !== 'undefined') { + originEstimates = originEstimates.concat(value); + } + } + + if (!originEstimates.length) { + continue; + } + estimates.set(origin, originEstimates); + } + + return estimates; + } + + /** + * Estimates the observed RTT to each origin based on how long the connection setup. + * For h1 and h2, this could includes two estimates - one for the TCP handshake, another for + * SSL negotiation. + * For h3, we get only one estimate since QUIC establishes a secure connection in a + * single handshake. + * This is the most accurate and preferred method of measurement when the data is available. + */ + static _estimateRTTViaConnectionTiming(info: RequestInfo): number[]|number|undefined { + const {timing, connectionReused, request} = info; + if (connectionReused) { + return; + } + + const {connectStart, sslStart, sslEnd, connectEnd} = timing; + if (connectEnd >= 0 && connectStart >= 0 && request.protocol.startsWith('h3')) { + // These values are equal to sslStart and sslEnd for h3. + return connectEnd - connectStart; + } + if (sslStart >= 0 && sslEnd >= 0 && sslStart !== connectStart) { + // SSL can also be more than 1 RT but assume False Start was used. + return [connectEnd - sslStart, sslStart - connectStart]; + } + if (connectStart >= 0 && connectEnd >= 0) { + return connectEnd - connectStart; + } + + return; + } + + /** + * Estimates the observed RTT to each origin based on how long a download took on a fresh connection. + * NOTE: this will tend to overestimate the actual RTT quite significantly as the download can be + * slow for other reasons as well such as bandwidth constraints. + */ + static _estimateRTTViaDownloadTiming(info: RequestInfo): number|undefined { + const {timing, connectionReused, request} = info; + if (connectionReused) { + return; + } + + // Only look at downloads that went past the initial congestion window + if (request.transferSize <= INITIAL_CWD) { + return; + } + if (!Number.isFinite(timing.receiveHeadersEnd) || timing.receiveHeadersEnd < 0) { + return; + } + + // Compute the amount of time downloading everything after the first congestion window took + const totalTime = request.networkEndTime - request.networkRequestTime; + const downloadTimeAfterFirstByte = totalTime - timing.receiveHeadersEnd; + const numberOfRoundTrips = Math.log2(request.transferSize / INITIAL_CWD); + + // Ignore requests that required a high number of round trips since bandwidth starts to play + // a larger role than latency + if (numberOfRoundTrips > 5) { + return; + } + + return downloadTimeAfterFirstByte / numberOfRoundTrips; + } + + /** + * Estimates the observed RTT to each origin based on how long it took until Chrome could + * start sending the actual request when a new connection was required. + * NOTE: this will tend to overestimate the actual RTT as the request can be delayed for other + * reasons as well such as more SSL handshakes if TLS False Start is not enabled. + */ + static _estimateRTTViaSendStartTiming(info: RequestInfo): number|undefined { + const {timing, connectionReused, request} = info; + if (connectionReused) { + return; + } + + if (!Number.isFinite(timing.sendStart) || timing.sendStart < 0) { + return; + } + + // Assume everything before sendStart was just DNS + (SSL)? + TCP handshake + // 1 RT for DNS, 1 RT (maybe) for SSL, 1 RT for TCP + let roundTrips = 1; + // TCP + if (!request.protocol.startsWith('h3')) { + roundTrips += 1; + } + if (request.parsedURL.scheme === 'https') { + roundTrips += 1; + } + return timing.sendStart / roundTrips; + } + + /** + * Estimates the observed RTT to each origin based on how long it took until Chrome received the + * headers of the response (~TTFB). + * NOTE: this is the most inaccurate way to estimate the RTT, but in some environments it's all + * we have access to :( + */ + static _estimateRTTViaHeadersEndTiming(info: RequestInfo): number|undefined { + const {timing, connectionReused, request} = info; + if (!Number.isFinite(timing.receiveHeadersEnd) || timing.receiveHeadersEnd < 0) { + return; + } + if (!request.resourceType) { + return; + } + + const serverResponseTimePercentage = + SERVER_RESPONSE_PERCENTAGE_OF_TTFB[request.resourceType] || DEFAULT_SERVER_RESPONSE_PERCENTAGE; + const estimatedServerResponseTime = timing.receiveHeadersEnd * serverResponseTimePercentage; + + // When connection was reused... + // TTFB = 1 RT for request + server response time + let roundTrips = 1; + + // When connection was fresh... + // TTFB = DNS + (SSL)? + TCP handshake + 1 RT for request + server response time + if (!connectionReused) { + roundTrips += 1; // DNS + if (!request.protocol.startsWith('h3')) { + roundTrips += 1; // TCP + } + if (request.parsedURL.scheme === 'https') { + roundTrips += 1; // SSL + } + } + + // subtract out our estimated server response time + return Math.max((timing.receiveHeadersEnd - estimatedServerResponseTime) / roundTrips, 3); + } + + /** + * Given the RTT to each origin, estimates the observed server response times. + */ + static _estimateResponseTimeByOrigin(records: Lantern.NetworkRequest[], rttByOrigin: Map): + Map { + return NetworkAnalyzer._estimateValueByOrigin(records, ({request, timing}) => { + if (request.serverResponseTime !== undefined) { + return request.serverResponseTime; + } + + if (!Number.isFinite(timing.receiveHeadersEnd) || timing.receiveHeadersEnd < 0) { + return; + } + if (!Number.isFinite(timing.sendEnd) || timing.sendEnd < 0) { + return; + } + + const ttfb = timing.receiveHeadersEnd - timing.sendEnd; + const origin = request.parsedURL.securityOrigin; + const rtt = rttByOrigin.get(origin) || rttByOrigin.get(NetworkAnalyzer.summary) || 0; + return Math.max(ttfb - rtt, 0); + }); + } + + static canTrustConnectionInformation(requests: Lantern.NetworkRequest[]): boolean { + const connectionIdWasStarted = new Map(); + for (const request of requests) { + const started = connectionIdWasStarted.get(request.connectionId) || !request.connectionReused; + connectionIdWasStarted.set(request.connectionId, started); + } + + // We probably can't trust the network information if all the connection IDs were the same + if (connectionIdWasStarted.size <= 1) { + return false; + } + // Or if there were connections that were always reused (a connection had to have started at some point) + return Array.from(connectionIdWasStarted.values()).every(started => started); + } + + /** + * Returns a map of requestId -> connectionReused, estimating the information if the information + * available in the records themselves appears untrustworthy. + */ + static estimateIfConnectionWasReused(records: Lantern.NetworkRequest[], options?: {forceCoarseEstimates: boolean}): + Map { + const {forceCoarseEstimates = false} = options || {}; + + // Check if we can trust the connection information coming from the protocol + if (!forceCoarseEstimates && NetworkAnalyzer.canTrustConnectionInformation(records)) { + return new Map(records.map(request => [request.requestId, Boolean(request.connectionReused)])); + } + + // Otherwise we're on our own, a request may not have needed a fresh connection if... + // - It was not the first request to the domain + // - It was H2 + // - It was after the first request to the domain ended + const connectionWasReused = new Map(); + const groupedByOrigin = NetworkAnalyzer.groupByOrigin(records); + for (const originRecords of groupedByOrigin.values()) { + const earliestReusePossible = + originRecords.map(request => request.networkEndTime).reduce((a, b) => Math.min(a, b), Infinity); + + for (const request of originRecords) { + connectionWasReused.set( + request.requestId, + request.networkRequestTime >= earliestReusePossible || request.protocol === 'h2', + ); + } + + const firstRecord = originRecords.reduce((a, b) => { + return a.networkRequestTime > b.networkRequestTime ? b : a; + }); + connectionWasReused.set(firstRecord.requestId, false); + } + + return connectionWasReused; + } + + /** + * Estimates the RTT to each origin by examining observed network timing information. + * Attempts to use the most accurate information first and falls back to coarser estimates when it + * is unavailable. + */ + static estimateRTTByOrigin(records: Lantern.NetworkRequest[], options?: RTTEstimateOptions): Map { + const { + forceCoarseEstimates = false, + // coarse estimates include lots of extra time and noise + // multiply by some factor to deflate the estimates a bit. + coarseEstimateMultiplier = 0.3, + useDownloadEstimates = true, + useSendStartEstimates = true, + useHeadersEndEstimates = true, + } = options || {}; + + const connectionWasReused = NetworkAnalyzer.estimateIfConnectionWasReused(records); + const groupedByOrigin = NetworkAnalyzer.groupByOrigin(records); + + const estimatesByOrigin = new Map(); + for (const [origin, originRequests] of groupedByOrigin.entries()) { + const originEstimates: number[] = []; + + // eslint-disable-next-line no-inner-declarations + function collectEstimates(estimator: (e: RequestInfo) => number[] | number | undefined, multiplier = 1): void { + for (const request of originRequests) { + const timing = request.timing; + if (!timing) { + continue; + } + + const estimates = estimator({ + request, + timing, + connectionReused: connectionWasReused.get(request.requestId), + }); + if (estimates === undefined) { + continue; + } + + if (!Array.isArray(estimates)) { + originEstimates.push(estimates * multiplier); + } else { + originEstimates.push(...estimates.map(e => e * multiplier)); + } + } + } + + if (!forceCoarseEstimates) { + collectEstimates(this._estimateRTTViaConnectionTiming); + } + + // Connection timing can be missing for a few reasons: + // - Origin was preconnected, which we don't have instrumentation for. + // - Trace began recording after a connection has already been established (for example, in timespan mode) + // - Perhaps Chrome established a connection already in the background (service worker? Just guessing here) + // - Not provided in LR netstack. + if (!originEstimates.length) { + if (useDownloadEstimates) { + collectEstimates(this._estimateRTTViaDownloadTiming, coarseEstimateMultiplier); + } + if (useSendStartEstimates) { + collectEstimates(this._estimateRTTViaSendStartTiming, coarseEstimateMultiplier); + } + if (useHeadersEndEstimates) { + collectEstimates(this._estimateRTTViaHeadersEndTiming, coarseEstimateMultiplier); + } + } + + if (originEstimates.length) { + estimatesByOrigin.set(origin, originEstimates); + } + } + + if (!estimatesByOrigin.size) { + throw new Error('No timing information available'); + } + return NetworkAnalyzer.summarize(estimatesByOrigin); + } + + /** + * Estimates the server response time of each origin. RTT times can be passed in or will be + * estimated automatically if not provided. + */ + static estimateServerResponseTimeByOrigin(records: Lantern.NetworkRequest[], options?: RTTEstimateOptions&{ + rttByOrigin?: Map, + }): Map { + let rttByOrigin = (options || {}).rttByOrigin; + if (!rttByOrigin) { + rttByOrigin = new Map(); + + const rttSummaryByOrigin = NetworkAnalyzer.estimateRTTByOrigin(records, options); + for (const [origin, summary] of rttSummaryByOrigin.entries()) { + rttByOrigin.set(origin, summary.min); + } + } + + const estimatesByOrigin = NetworkAnalyzer._estimateResponseTimeByOrigin(records, rttByOrigin); + return NetworkAnalyzer.summarize(estimatesByOrigin); + } + + /** + * Computes the average throughput for the given requests in bits/second. + * Excludes data URI, failed or otherwise incomplete, and cached requests. + * Returns Infinity if there were no analyzable network requests. + */ + static estimateThroughput(records: Lantern.NetworkRequest[]): number { + let totalBytes = 0; + + // We will measure throughput by summing the total bytes downloaded by the total time spent + // downloading those bytes. We slice up all the network requests into start/end boundaries, so + // it's easier to deal with the gaps in downloading. + const timeBoundaries = records + .reduce( + (boundaries, request) => { + const scheme = request.parsedURL?.scheme; + // Requests whose bodies didn't come over the network or didn't completely finish will mess + // with the computation, just skip over them. + if (scheme === 'data' || request.failed || !request.finished || + request.statusCode > 300 || !request.transferSize) { + return boundaries; + } + + // If we've made it this far, all the times we need should be valid (i.e. not undefined/-1). + totalBytes += request.transferSize; + boundaries.push({time: request.responseHeadersEndTime / 1000, isStart: true}); + boundaries.push({time: request.networkEndTime / 1000, isStart: false}); + return boundaries; + }, + [] as Array<{time: number, isStart: boolean}>) + .sort((a, b) => a.time - b.time); + + if (!timeBoundaries.length) { + return Infinity; + } + + let inflight = 0; + let currentStart = 0; + let totalDuration = 0; + + timeBoundaries.forEach(boundary => { + if (boundary.isStart) { + if (inflight === 0) { + // We just ended a quiet period, keep track of when the download period started + currentStart = boundary.time; + } + inflight++; + } else { + inflight--; + if (inflight === 0) { + // We just entered a quiet period, update our duration with the time we spent downloading + totalDuration += boundary.time - currentStart; + } + } + }); + + return totalBytes * 8 / totalDuration; + } + + static computeRTTAndServerResponseTime(records: Lantern.NetworkRequest[]): + {rtt: number, additionalRttByOrigin: Map, serverResponseTimeByOrigin: Map} { + // First pass compute the estimated observed RTT to each origin's servers. + const rttByOrigin = new Map(); + for (const [origin, summary] of NetworkAnalyzer.estimateRTTByOrigin(records).entries()) { + rttByOrigin.set(origin, summary.min); + } + + // We'll use the minimum RTT as the assumed connection latency since we care about how much addt'l + // latency each origin introduces as Lantern will be simulating with its own connection latency. + const minimumRtt = Math.min(...Array.from(rttByOrigin.values())); + // We'll use the observed RTT information to help estimate the server response time + const responseTimeSummaries = NetworkAnalyzer.estimateServerResponseTimeByOrigin(records, { + rttByOrigin, + }); + + const additionalRttByOrigin = new Map(); + const serverResponseTimeByOrigin = new Map(); + for (const [origin, summary] of responseTimeSummaries.entries()) { + // Not all origins have usable timing data, we'll default to using no additional latency. + const rttForOrigin = rttByOrigin.get(origin) || minimumRtt; + additionalRttByOrigin.set(origin, rttForOrigin - minimumRtt); + serverResponseTimeByOrigin.set(origin, summary.median); + } + + return { + rtt: minimumRtt, + additionalRttByOrigin, + serverResponseTimeByOrigin, + }; + } + + static analyze(records: Lantern.NetworkRequest[]): Lantern.Simulation.Settings['networkAnalysis'] { + const throughput = NetworkAnalyzer.estimateThroughput(records); + return { + throughput, + ...NetworkAnalyzer.computeRTTAndServerResponseTime(records), + }; + } + + static findResourceForUrl(records: Array, resourceUrl: string): T|undefined { + // equalWithExcludedFragments is expensive, so check that the resourceUrl starts with the request url first + return records.find( + request => resourceUrl.startsWith(request.url) && UrlUtils.equalWithExcludedFragments(request.url, resourceUrl), + ); + } + + static findLastDocumentForUrl(records: Array, resourceUrl: string): T|undefined { + // equalWithExcludedFragments is expensive, so check that the resourceUrl starts with the request url first + const matchingRequests = records.filter( + request => request.resourceType === 'Document' && + // Note: `request.url` should never have a fragment, else this optimization gives wrong results. + resourceUrl.startsWith(request.url) && UrlUtils.equalWithExcludedFragments(request.url, resourceUrl), + ); + return matchingRequests[matchingRequests.length - 1]; + } + + /** + * Resolves redirect chain given a main document. + * See: {@link NetworkAnalyzer.findLastDocumentForUrl} for how to retrieve main document. + */ + static resolveRedirects(request: T): T { + while (request.redirectDestination) { + request = request.redirectDestination as T; + } + return request; + } +} + +export {NetworkAnalyzer}; diff --git a/front_end/models/trace/lantern/simulation/SimulationTimingMap.ts b/front_end/models/trace/lantern/simulation/SimulationTimingMap.ts new file mode 100644 index 00000000000..219bc23b2e2 --- /dev/null +++ b/front_end/models/trace/lantern/simulation/SimulationTimingMap.ts @@ -0,0 +1,202 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2020 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @fileoverview + * + * This class encapsulates the type-related validation logic for moving timing information for nodes + * through the different simulation phases. Methods here ensure that the invariants of simulation hold + * as nodes are queued, partially simulated, and completed. + */ + +import {BaseNode, type Node} from '../BaseNode.js'; +import {type CPUNode} from '../CpuNode.js'; +import {type NetworkNode} from '../NetworkNode.js'; + +interface NodeTimingComplete { + startTime: number; + endTime: number; + queuedTime: number; + estimatedTimeElapsed: number; + timeElapsed: number; + timeElapsedOvershoot: number; + bytesDownloaded: number; +} + +type NodeTimingQueued = Pick; + +type CpuNodeTimingStarted = NodeTimingQueued&Pick; +type NetworkNodeTimingStarted = CpuNodeTimingStarted&Pick; + +type CpuNodeTimingInProgress = CpuNodeTimingStarted&Pick; +type NetworkNodeTimingInProgress = NetworkNodeTimingStarted&Pick; + +export type CpuNodeTimingComplete = CpuNodeTimingInProgress&Pick; +export type NetworkNodeTimingComplete = + NetworkNodeTimingInProgress&Pick&{connectionTiming: ConnectionTiming}; +export type CompleteNodeTiming = CpuNodeTimingComplete|NetworkNodeTimingComplete; + +type NodeTimingData = NodeTimingQueued|CpuNodeTimingStarted|NetworkNodeTimingStarted|CpuNodeTimingInProgress| + NetworkNodeTimingInProgress|CpuNodeTimingComplete|NetworkNodeTimingComplete; + +export interface ConnectionTiming { + dnsResolutionTime?: number; + connectionTime?: number; + sslTime?: number; + timeToFirstByte: number; +} + +class SimulatorTimingMap { + _nodeTimings: Map; + + constructor() { + this._nodeTimings = new Map(); + } + + getNodes(): Node[] { + return Array.from(this._nodeTimings.keys()); + } + + setReadyToStart(node: Node, values: {queuedTime: number}): void { + this._nodeTimings.set(node, values); + } + + setInProgress(node: Node, values: {startTime: number}): void { + const nodeTiming = { + ...this.getQueued(node), + startTime: values.startTime, + timeElapsed: 0, + }; + + this._nodeTimings.set( + node, + node.type === BaseNode.types.NETWORK ? {...nodeTiming, timeElapsedOvershoot: 0, bytesDownloaded: 0} : + nodeTiming, + ); + } + + setCompleted(node: Node, values: {endTime: number, connectionTiming?: ConnectionTiming}): void { + const nodeTiming = { + ...this.getInProgress(node), + endTime: values.endTime, + connectionTiming: values.connectionTiming, + }; + + this._nodeTimings.set(node, nodeTiming); + } + + setCpu(node: CPUNode, values: {timeElapsed: number}): void { + const nodeTiming = { + ...this.getCpuStarted(node), + timeElapsed: values.timeElapsed, + }; + + this._nodeTimings.set(node, nodeTiming); + } + + setCpuEstimated(node: CPUNode, values: {estimatedTimeElapsed: number}): void { + const nodeTiming = { + ...this.getCpuStarted(node), + estimatedTimeElapsed: values.estimatedTimeElapsed, + }; + + this._nodeTimings.set(node, nodeTiming); + } + + setNetwork(node: NetworkNode, values: {timeElapsed: number, timeElapsedOvershoot: number, bytesDownloaded: number}): + void { + const nodeTiming = { + ...this.getNetworkStarted(node), + timeElapsed: values.timeElapsed, + timeElapsedOvershoot: values.timeElapsedOvershoot, + bytesDownloaded: values.bytesDownloaded, + }; + + this._nodeTimings.set(node, nodeTiming); + } + + setNetworkEstimated(node: NetworkNode, values: {estimatedTimeElapsed: number}): void { + const nodeTiming = { + ...this.getNetworkStarted(node), + estimatedTimeElapsed: values.estimatedTimeElapsed, + }; + + this._nodeTimings.set(node, nodeTiming); + } + + getQueued(node: Node): NodeTimingData { + const timing = this._nodeTimings.get(node); + if (!timing) { + throw new Error(`Node ${node.id} not yet queued`); + } + return timing; + } + + getCpuStarted(node: CPUNode): CpuNodeTimingStarted { + const timing = this._nodeTimings.get(node); + if (!timing) { + throw new Error(`Node ${node.id} not yet queued`); + } + if (!('startTime' in timing)) { + throw new Error(`Node ${node.id} not yet started`); + } + if ('bytesDownloaded' in timing) { + throw new Error(`Node ${node.id} timing not valid`); + } + return timing; + } + + getNetworkStarted(node: NetworkNode): NetworkNodeTimingStarted { + const timing = this._nodeTimings.get(node); + if (!timing) { + throw new Error(`Node ${node.id} not yet queued`); + } + if (!('startTime' in timing)) { + throw new Error(`Node ${node.id} not yet started`); + } + if (!('bytesDownloaded' in timing)) { + throw new Error(`Node ${node.id} timing not valid`); + } + return timing; + } + + getInProgress(node: Node): CpuNodeTimingInProgress|NetworkNodeTimingInProgress { + const timing = this._nodeTimings.get(node); + if (!timing) { + throw new Error(`Node ${node.id} not yet queued`); + } + if (!('startTime' in timing)) { + throw new Error(`Node ${node.id} not yet started`); + } + if (!('estimatedTimeElapsed' in timing)) { + throw new Error(`Node ${node.id} not yet in progress`); + } + return timing; + } + + getCompleted(node: Node): CpuNodeTimingComplete|NetworkNodeTimingComplete { + const timing = this._nodeTimings.get(node); + if (!timing) { + throw new Error(`Node ${node.id} not yet queued`); + } + if (!('startTime' in timing)) { + throw new Error(`Node ${node.id} not yet started`); + } + if (!('estimatedTimeElapsed' in timing)) { + throw new Error(`Node ${node.id} not yet in progress`); + } + if (!('endTime' in timing)) { + throw new Error(`Node ${node.id} not yet completed`); + } + return timing; + } +} + +export {SimulatorTimingMap}; diff --git a/front_end/models/trace/lantern/simulation/Simulator.test.ts b/front_end/models/trace/lantern/simulation/Simulator.test.ts new file mode 100644 index 00000000000..ebb3b2a741b --- /dev/null +++ b/front_end/models/trace/lantern/simulation/Simulator.test.ts @@ -0,0 +1,426 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2017 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// @ts-nocheck TODO(crbug.com/348449529) + +import * as TraceModel from '../../trace.js'; +import * as Lantern from '../lantern.js'; +import {loadTrace, runTraceEngine} from '../testing/MetricTestUtils.js'; + +const {NetworkNode, CPUNode} = Lantern; +const {Simulator, DNSCache} = Lantern.Simulation; + +let nextRequestId = 1; +let nextTid = 1; + +async function createGraph(trace: Lantern.Trace) { + const traceEngineData = await runTraceEngine(trace); + const requests = TraceModel.LanternComputationData.createNetworkRequests(trace, traceEngineData); + return TraceModel.LanternComputationData.createGraph(requests, trace, traceEngineData); +} + +function request(opts) { + const scheme = opts.scheme || 'http'; + const url = `${scheme}://example.com`; + const rendererStartTime = opts.startTime; + const networkEndTime = opts.endTime; + delete opts.startTime; + delete opts.endTime; + + return Object.assign( + { + requestId: opts.requestId || nextRequestId++, + url, + transferSize: opts.transferSize || 1000, + protocol: scheme, + parsedURL: {scheme, host: 'example.com', securityOrigin: url}, + timing: opts.timing, + rendererStartTime, + networkEndTime, + }, + opts); +} + +function cpuTask({tid, ts, duration}) { + tid = tid || nextTid++; + ts = ts || 0; + const dur = ((duration || 0) * 1000) / 5; + return {tid, ts, dur}; +} + +describe('DependencyGraph/Simulator', () => { + // Insulate the simulator tests from DNS multiplier changes + let originalDNSMultiplier; + let trace: Lantern.Trace; + + before(async function() { + trace = await loadTrace(this, 'lantern/progressive-app/trace.json.gz'); + originalDNSMultiplier = DNSCache.rttMultiplier; + DNSCache.rttMultiplier = 1; + }); + + after(() => { + DNSCache.rttMultiplier = originalDNSMultiplier; + }); + + describe('.simulate', () => { + const serverResponseTimeByOrigin = new Map([['http://example.com', 500]]); + + function assertNodeTiming(result, node, assertions) { + const timing = result.nodeTimings.get(node); + assert.ok(timing, 'missing node timing information'); + Object.keys(assertions).forEach(key => { + assert.strictEqual(timing[key], assertions[key]); + }); + } + + it('should simulate basic network graphs', () => { + const rootNode = new NetworkNode(request({})); + const simulator = new Simulator({serverResponseTimeByOrigin}); + const result = simulator.simulate(rootNode); + // should be 3 RTTs and 500ms for the server response time + assert.strictEqual(result.timeInMs, 450 + 500); + assertNodeTiming(result, rootNode, {startTime: 0, endTime: 950}); + }); + + it('should simulate basic mixed graphs', () => { + const rootNode = new NetworkNode(request({})); + const cpuNode = new CPUNode(cpuTask({duration: 200})); + cpuNode.addDependency(rootNode); + + const simulator = new Simulator({ + serverResponseTimeByOrigin, + cpuSlowdownMultiplier: 5, + }); + const result = simulator.simulate(rootNode); + // should be 3 RTTs and 500ms for the server response time + 200 CPU + assert.strictEqual(result.timeInMs, 450 + 500 + 200); + assertNodeTiming(result, rootNode, {startTime: 0, endTime: 950}); + assertNodeTiming(result, cpuNode, {startTime: 950, endTime: 1150}); + }); + + it('should simulate basic network waterfall graphs', () => { + const nodeA = new NetworkNode(request({startTime: 0, endTime: 1})); + const nodeB = new NetworkNode(request({startTime: 0, endTime: 3})); + const nodeC = new NetworkNode(request({startTime: 0, endTime: 5})); + const nodeD = new NetworkNode(request({startTime: 0, endTime: 7})); + + nodeA.addDependent(nodeB); + nodeB.addDependent(nodeC); + nodeC.addDependent(nodeD); + + const simulator = new Simulator({serverResponseTimeByOrigin}); + const result = simulator.simulate(nodeA); + // should be 950ms for A, 650ms each for B, C, D (no DNS and one-way connection) + assert.strictEqual(result.timeInMs, 2900); + assertNodeTiming(result, nodeA, {startTime: 0, endTime: 950}); + assertNodeTiming(result, nodeB, {startTime: 950, endTime: 1600}); + assertNodeTiming(result, nodeC, {startTime: 1600, endTime: 2250}); + assertNodeTiming(result, nodeD, {startTime: 2250, endTime: 2900}); + }); + + it('should simulate cached network graphs', () => { + const nodeA = new NetworkNode(request({startTime: 0, endTime: 1, fromDiskCache: true})); + const nodeB = new NetworkNode(request({startTime: 0, endTime: 3, fromDiskCache: true})); + nodeA.addDependent(nodeB); + + const simulator = new Simulator({serverResponseTimeByOrigin}); + const result = simulator.simulate(nodeA); + // should be ~8ms each for A, B + assert.strictEqual(result.timeInMs, 16); + assertNodeTiming(result, nodeA, {startTime: 0, endTime: 8}); + assertNodeTiming(result, nodeB, {startTime: 8, endTime: 16}); + }); + + it('should simulate data URL network graphs', () => { + const url = 'data:image/jpeg;base64,foobar'; + const protocol = 'data'; + const parsedURL = {scheme: 'data', host: '', securityOrigin: 'null'}; + const nodeA = new NetworkNode(request({startTime: 0, endTime: 1, url, parsedURL, protocol})); + const nodeB = + new NetworkNode(request({startTime: 0, endTime: 3, url, parsedURL, protocol, resourceSize: 1024 * 1024})); + nodeA.addDependent(nodeB); + + const simulator = new Simulator({serverResponseTimeByOrigin}); + const result = simulator.simulate(nodeA); + + // should be ~2ms for A (resourceSize 0), ~12ms for B (resourceSize 1MB) + assert.strictEqual(result.timeInMs, 14); + assertNodeTiming(result, nodeA, {startTime: 0, endTime: 2}); + assertNodeTiming(result, nodeB, {startTime: 2, endTime: 14}); + }); + + it('should simulate basic CPU queue graphs', () => { + const nodeA = new NetworkNode(request({})); + const nodeB = new CPUNode(cpuTask({duration: 100})); + const nodeC = new CPUNode(cpuTask({duration: 600})); + const nodeD = new CPUNode(cpuTask({duration: 300})); + + nodeA.addDependent(nodeB); + nodeA.addDependent(nodeC); + nodeA.addDependent(nodeD); + + const simulator = new Simulator({ + serverResponseTimeByOrigin, + cpuSlowdownMultiplier: 5, + }); + const result = simulator.simulate(nodeA); + // should be 800ms A, then 1000 ms total for B, C, D in serial + assert.strictEqual(result.timeInMs, 1950); + assertNodeTiming(result, nodeA, {startTime: 0, endTime: 950}); + assertNodeTiming(result, nodeB, {startTime: 950, endTime: 1050}); + assertNodeTiming(result, nodeC, {startTime: 1050, endTime: 1650}); + assertNodeTiming(result, nodeD, {startTime: 1650, endTime: 1950}); + }); + + it('should simulate basic network waterfall graphs with CPU', () => { + const nodeA = new NetworkNode(request({})); + const nodeB = new NetworkNode(request({})); + const nodeC = new NetworkNode(request({})); + const nodeD = new NetworkNode(request({})); + const nodeE = new CPUNode(cpuTask({duration: 1000})); + const nodeF = new CPUNode(cpuTask({duration: 1000})); + + nodeA.addDependent(nodeB); + nodeB.addDependent(nodeC); + nodeB.addDependent(nodeE); // finishes 350 ms after C + nodeC.addDependent(nodeD); + nodeC.addDependent(nodeF); // finishes 700 ms after D + + const simulator = new Simulator({ + serverResponseTimeByOrigin, + cpuSlowdownMultiplier: 5, + }); + const result = simulator.simulate(nodeA); + // should be 950ms for A, 650ms each for B, C, D, with F finishing 700 ms after D + assert.strictEqual(result.timeInMs, 3600); + }); + + it('should simulate basic parallel requests', () => { + const nodeA = new NetworkNode(request({})); + const nodeB = new NetworkNode(request({})); + const nodeC = new NetworkNode(request({transferSize: 15000})); + const nodeD = new NetworkNode(request({})); + + nodeA.addDependent(nodeB); + nodeA.addDependent(nodeC); + nodeA.addDependent(nodeD); + + const simulator = new Simulator({serverResponseTimeByOrigin}); + const result = simulator.simulate(nodeA); + // should be 950ms for A and 950ms for C (2 round trips of downloading, but no DNS) + assert.strictEqual(result.timeInMs, 950 + 950); + }); + + it('should make connections in parallel', () => { + const nodeA = new NetworkNode(request({startTime: 0, networkRequestTime: 0, endTime: 1})); + const nodeB = new NetworkNode(request({startTime: 2, networkRequestTime: 2, endTime: 3})); + const nodeC = new NetworkNode(request({startTime: 2, networkRequestTime: 2, endTime: 5})); + const nodeD = new NetworkNode(request({startTime: 2, networkRequestTime: 2, endTime: 7})); + + nodeA.addDependent(nodeB); + nodeA.addDependent(nodeC); + nodeA.addDependent(nodeD); + + const simulator = new Simulator({serverResponseTimeByOrigin}); + const result = simulator.simulate(nodeA); + // should be 950ms for A, 650ms for B reusing connection, 800ms for C and D in parallel. + assert.strictEqual(result.timeInMs, 950 + 800); + assertNodeTiming(result, nodeA, {startTime: 0, endTime: 950}); + assertNodeTiming(result, nodeB, {startTime: 950, endTime: 1600}); + assertNodeTiming(result, nodeC, {startTime: 950, endTime: 1750}); + assertNodeTiming(result, nodeD, {startTime: 950, endTime: 1750}); + }); + + it('should adjust throughput based on number of requests', () => { + const nodeA = new NetworkNode(request({})); + const nodeB = new NetworkNode(request({})); + const nodeC = new NetworkNode(request({transferSize: 14000})); + const nodeD = new NetworkNode(request({})); + + nodeA.addDependent(nodeB); + nodeA.addDependent(nodeC); + nodeA.addDependent(nodeD); + + // 80 kbps while all 3 download at 150ms/RT = ~1460 bytes/RT + // 240 kbps while the last one finishes at 150ms/RT = ~4380 bytes/RT + // ~14000 bytes = 5 RTs + // 1 RT 80 kbps b/c its shared + // 1 RT 80 kbps b/c it needs to grow congestion window from being shared + // 1 RT 160 kbps b/c TCP + // 2 RT 240 kbps b/c throughput cap + const simulator = new Simulator({serverResponseTimeByOrigin, throughput: 240000}); + const result = simulator.simulate(nodeA); + // should be 950ms for A and 1400ms for C (5 round trips of downloading) + assert.strictEqual(result.timeInMs, 950 + (150 + 750 + 500)); + }); + + it('should start network requests in startTime order', () => { + const rootNode = new NetworkNode(request({startTime: 0, endTime: 0.05, connectionId: 1})); + const imageNodes = [ + new NetworkNode(request({startTime: 5})), + new NetworkNode(request({startTime: 4})), + new NetworkNode(request({startTime: 3})), + new NetworkNode(request({startTime: 2})), + new NetworkNode(request({startTime: 1})), + ]; + + for (const imageNode of imageNodes) { + imageNode.request.connectionReused = true; + imageNode.request.connectionId = 1; + rootNode.addDependent(imageNode); + } + + const simulator = new Simulator({serverResponseTimeByOrigin, maximumConcurrentRequests: 1}); + const result = simulator.simulate(rootNode); + + // should be 3 RTs + SRT for rootNode (950ms) + // should be 1 RT + SRT for image nodes in observed order (650ms) + assertNodeTiming(result, rootNode, {startTime: 0, endTime: 950}); + assertNodeTiming(result, imageNodes[4], {startTime: 950, endTime: 1600}); + assertNodeTiming(result, imageNodes[3], {startTime: 1600, endTime: 2250}); + assertNodeTiming(result, imageNodes[2], {startTime: 2250, endTime: 2900}); + assertNodeTiming(result, imageNodes[1], {startTime: 2900, endTime: 3550}); + assertNodeTiming(result, imageNodes[0], {startTime: 3550, endTime: 4200}); + }); + + it('should start network requests in priority order to break startTime ties', () => { + const rootNode = new NetworkNode(request({startTime: 0, endTime: 0.05, connectionId: 1})); + const imageNodes = [ + new NetworkNode(request({startTime: 0.1, priority: 'VeryLow'})), + new NetworkNode(request({startTime: 0.2, priority: 'Low'})), + new NetworkNode(request({startTime: 0.3, priority: 'Medium'})), + new NetworkNode(request({startTime: 0.4, priority: 'High'})), + new NetworkNode(request({startTime: 0.5, priority: 'VeryHigh'})), + ]; + + for (const imageNode of imageNodes) { + imageNode.request.connectionReused = true; + imageNode.request.connectionId = 1; + rootNode.addDependent(imageNode); + } + + const simulator = new Simulator({serverResponseTimeByOrigin, maximumConcurrentRequests: 1}); + const result = simulator.simulate(rootNode); + + // should be 3 RTs + SRT for rootNode (950ms) + // should be 1 RT + SRT for image nodes in priority order (650ms) + assertNodeTiming(result, rootNode, {startTime: 0, endTime: 950}); + assertNodeTiming(result, imageNodes[4], {startTime: 950, endTime: 1600}); + assertNodeTiming(result, imageNodes[3], {startTime: 1600, endTime: 2250}); + assertNodeTiming(result, imageNodes[2], {startTime: 2250, endTime: 2900}); + assertNodeTiming(result, imageNodes[1], {startTime: 2900, endTime: 3550}); + assertNodeTiming(result, imageNodes[0], {startTime: 3550, endTime: 4200}); + }); + + it('should simulate two graphs in a row', () => { + const simulator = new Simulator({serverResponseTimeByOrigin}); + + const nodeA = new NetworkNode(request({})); + const nodeB = new NetworkNode(request({})); + const nodeC = new NetworkNode(request({transferSize: 15000})); + const nodeD = new NetworkNode(request({})); + + nodeA.addDependent(nodeB); + nodeA.addDependent(nodeC); + nodeA.addDependent(nodeD); + + const resultA = simulator.simulate(nodeA); + // should be 950ms for A and 950ms for C (2 round trips of downloading, no DNS) + assert.strictEqual(resultA.timeInMs, 950 + 950); + + const nodeE = new NetworkNode(request({})); + const nodeF = new NetworkNode(request({})); + const nodeG = new NetworkNode(request({})); + + nodeE.addDependent(nodeF); + nodeE.addDependent(nodeG); + + const resultB = simulator.simulate(nodeE); + // should be 950ms for E and 800ms for F/G + assert.strictEqual(resultB.timeInMs, 950 + 800); + }); + + it('should maximize throughput with H2', () => { + const simulator = new Simulator({serverResponseTimeByOrigin}); + const connectionDefaults = {protocol: 'h2', connectionId: 1}; + const nodeA = new NetworkNode(request({startTime: 0, endTime: 1, ...connectionDefaults})); + const nodeB = new NetworkNode(request({startTime: 1, endTime: 2, ...connectionDefaults})); + const nodeC = new NetworkNode(request({startTime: 2, endTime: 3, ...connectionDefaults})); + const nodeD = new NetworkNode(request({startTime: 3, endTime: 4, ...connectionDefaults})); + + nodeA.addDependent(nodeB); + nodeB.addDependent(nodeC); + nodeB.addDependent(nodeD); + + // Run two simulations: + // - The first with C & D in parallel. + // - The second with C & D in series. + // Under HTTP/2 simulation these should be equivalent, but definitely parallel + // shouldn't be slower. + const resultA = simulator.simulate(nodeA); + nodeC.addDependent(nodeD); + const resultB = simulator.simulate(nodeA); + expect(resultA.timeInMs).to.be.lessThanOrEqual(resultB.timeInMs); + }); + + it('should throw (not hang) on graphs with cycles', () => { + const rootNode = new NetworkNode(request({})); + const depNode = new NetworkNode(request({})); + rootNode.addDependency(depNode); + depNode.addDependency(rootNode); + + const simulator = new Simulator({serverResponseTimeByOrigin}); + assert.throws(() => simulator.simulate(rootNode), /cycle/); + }); + + describe('on a real trace', () => { + it('should compute a timeInMs', async () => { + const graph = await createGraph(trace); + const simulator = new Simulator({serverResponseTimeByOrigin}); + const result = simulator.simulate(graph); + expect(result.timeInMs).to.be.greaterThan(100); + }); + + it('should sort the task event times', async () => { + const graph = await createGraph(trace); + const simulator = new Simulator({serverResponseTimeByOrigin}); + const result = simulator.simulate(graph); + const nodeTimings = Array.from(result.nodeTimings.entries()); + + for (let i = 1; i < nodeTimings.length; i++) { + const startTime = nodeTimings[i][1].startTime; + const previousStartTime = nodeTimings[i - 1][1].startTime; + expect(startTime).to.be.greaterThanOrEqual(previousStartTime); + } + }); + }); + }); + + describe('.simulateTimespan', () => { + it('calculates savings using throughput', () => { + const simulator = new Simulator({throughput: 1000, observedThroughput: 2000}); + const wastedMs = simulator.computeWastedMsFromWastedBytes(500); + expect(wastedMs).to.be.closeTo(4000, 0.1); + }); + + it('falls back to observed throughput if throughput is 0', () => { + const simulator = new Simulator({throughput: 0, observedThroughput: 2000}); + const wastedMs = simulator.computeWastedMsFromWastedBytes(500); + expect(wastedMs).to.be.closeTo(2000, 0.1); + }); + + it('returns 0 if throughput and observed throughput are 0', () => { + const simulator = new Simulator({throughput: 0, observedThroughput: 0}); + const wastedMs = simulator.computeWastedMsFromWastedBytes(500); + expect(wastedMs).to.equal(0); + }); + }); +}); diff --git a/front_end/models/trace/lantern/simulation/Simulator.ts b/front_end/models/trace/lantern/simulation/Simulator.ts new file mode 100644 index 00000000000..44bdcbb4701 --- /dev/null +++ b/front_end/models/trace/lantern/simulation/Simulator.ts @@ -0,0 +1,555 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2017 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {BaseNode, type Node} from '../BaseNode.js'; +import {type CPUNode} from '../CpuNode.js'; +import {type NetworkNode} from '../NetworkNode.js'; +import type * as Lantern from '../types/lantern.js'; + +import {ConnectionPool} from './ConnectionPool.js'; +import {Constants} from './Constants.js'; +import {DNSCache} from './DNSCache.js'; +import {type CompleteNodeTiming, type ConnectionTiming, SimulatorTimingMap} from './SimulationTimingMap.js'; +import {TcpConnection} from './TcpConnection.js'; + +const defaultThrottling = Constants.throttling.mobileSlow4G; + +// see https://cs.chromium.org/search/?q=kDefaultMaxNumDelayableRequestsPerClient&sq=package:chromium&type=cs +const DEFAULT_MAXIMUM_CONCURRENT_REQUESTS = 10; +// layout tasks tend to be less CPU-bound and do not experience the same increase in duration +const DEFAULT_LAYOUT_TASK_MULTIPLIER = 0.5; +// if a task takes more than 10 seconds it's usually a sign it isn't actually CPU bound and we're overestimating +const DEFAULT_MAXIMUM_CPU_TASK_DURATION = 10000; + +const NodeState = { + NotReadyToStart: 0, + ReadyToStart: 1, + InProgress: 2, + Complete: 3, +}; + +const PriorityStartTimePenalty: Record = { + VeryHigh: 0, + High: 0.25, + Medium: 0.5, + Low: 1, + VeryLow: 2, +}; + +const ALL_SIMULATION_NODE_TIMINGS = new Map>(); + +class Simulator { + static createSimulator(settings: Lantern.Simulation.Settings): Simulator { + const {throttlingMethod, throttling, precomputedLanternData, networkAnalysis} = settings; + + const options: Lantern.Simulation.Options = { + additionalRttByOrigin: networkAnalysis.additionalRttByOrigin, + serverResponseTimeByOrigin: networkAnalysis.serverResponseTimeByOrigin, + observedThroughput: networkAnalysis.throughput, + }; + + // If we have precomputed lantern data, overwrite our observed estimates and use precomputed instead + // for increased stability. + if (precomputedLanternData) { + options.additionalRttByOrigin = new Map(Object.entries(precomputedLanternData.additionalRttByOrigin)); + options.serverResponseTimeByOrigin = new Map(Object.entries(precomputedLanternData.serverResponseTimeByOrigin)); + } + + switch (throttlingMethod) { + case 'provided': + options.rtt = networkAnalysis.rtt; + options.throughput = networkAnalysis.throughput; + options.cpuSlowdownMultiplier = 1; + options.layoutTaskMultiplier = 1; + break; + case 'devtools': + if (throttling) { + options.rtt = throttling.requestLatencyMs / Constants.throttling.DEVTOOLS_RTT_ADJUSTMENT_FACTOR; + options.throughput = + throttling.downloadThroughputKbps * 1024 / Constants.throttling.DEVTOOLS_THROUGHPUT_ADJUSTMENT_FACTOR; + } + + options.cpuSlowdownMultiplier = 1; + options.layoutTaskMultiplier = 1; + break; + case 'simulate': + if (throttling) { + options.rtt = throttling.rttMs; + options.throughput = throttling.throughputKbps * 1024; + options.cpuSlowdownMultiplier = throttling.cpuSlowdownMultiplier; + } + break; + default: + // intentionally fallback to simulator defaults + break; + } + + return new Simulator(options); + } + + _options: Required; + _rtt: number; + _throughput: number; + _maximumConcurrentRequests: number; + _cpuSlowdownMultiplier: number; + _layoutTaskMultiplier: number; + _cachedNodeListByStartPosition: Node[]; + _nodeTimings: SimulatorTimingMap; + _numberInProgressByType: Map; + _nodes: Record>; + _dns: DNSCache; + _connectionPool: ConnectionPool; + + constructor(options?: Lantern.Simulation.Options) { + this._options = Object.assign( + { + rtt: defaultThrottling.rttMs, + throughput: defaultThrottling.throughputKbps * 1024, + maximumConcurrentRequests: DEFAULT_MAXIMUM_CONCURRENT_REQUESTS, + cpuSlowdownMultiplier: defaultThrottling.cpuSlowdownMultiplier, + layoutTaskMultiplier: DEFAULT_LAYOUT_TASK_MULTIPLIER, + additionalRttByOrigin: new Map(), + serverResponseTimeByOrigin: new Map(), + }, + options, + ); + + this._rtt = this._options.rtt; + this._throughput = this._options.throughput; + this._maximumConcurrentRequests = Math.max( + Math.min( + TcpConnection.maximumSaturatedConnections(this._rtt, this._throughput), + this._options.maximumConcurrentRequests, + ), + 1); + this._cpuSlowdownMultiplier = this._options.cpuSlowdownMultiplier; + this._layoutTaskMultiplier = this._cpuSlowdownMultiplier * this._options.layoutTaskMultiplier; + this._cachedNodeListByStartPosition = []; + + // Properties reset on every `.simulate` call but duplicated here for type checking + this._nodeTimings = new SimulatorTimingMap(); + this._numberInProgressByType = new Map(); + this._nodes = {}; + this._dns = new DNSCache({rtt: this._rtt}); + // @ts-expect-error + this._connectionPool = null; + + if (!Number.isFinite(this._rtt)) { + throw new Error(`Invalid rtt ${this._rtt}`); + } + if (!Number.isFinite(this._throughput)) { + throw new Error(`Invalid rtt ${this._throughput}`); + } + } + + get rtt(): number { + return this._rtt; + } + + _initializeConnectionPool(graph: Node): void { + const records: Lantern.NetworkRequest[] = []; + graph.getRootNode().traverse(node => { + if (node.type === BaseNode.types.NETWORK) { + records.push(node.request); + } + }); + + this._connectionPool = new ConnectionPool(records, this._options); + } + + /** + * Initializes the various state data structures such _nodeTimings and the _node Sets by state. + */ + _initializeAuxiliaryData(): void { + this._nodeTimings = new SimulatorTimingMap(); + this._numberInProgressByType = new Map(); + + this._nodes = {}; + this._cachedNodeListByStartPosition = []; + // NOTE: We don't actually need *all* of these sets, but the clarity that each node progresses + // through the system is quite nice. + for (const state of Object.values(NodeState)) { + this._nodes[state] = new Set(); + } + } + + _numberInProgress(type: string): number { + return this._numberInProgressByType.get(type) || 0; + } + + _markNodeAsReadyToStart(node: Node, queuedTime: number): void { + const nodeStartPosition = Simulator._computeNodeStartPosition(node); + const firstNodeIndexWithGreaterStartPosition = this._cachedNodeListByStartPosition.findIndex( + candidate => Simulator._computeNodeStartPosition(candidate) > nodeStartPosition); + const insertionIndex = firstNodeIndexWithGreaterStartPosition === -1 ? this._cachedNodeListByStartPosition.length : + firstNodeIndexWithGreaterStartPosition; + this._cachedNodeListByStartPosition.splice(insertionIndex, 0, node); + + this._nodes[NodeState.ReadyToStart].add(node); + this._nodes[NodeState.NotReadyToStart].delete(node); + this._nodeTimings.setReadyToStart(node, {queuedTime}); + } + + _markNodeAsInProgress(node: Node, startTime: number): void { + const indexOfNodeToStart = this._cachedNodeListByStartPosition.indexOf(node); + this._cachedNodeListByStartPosition.splice(indexOfNodeToStart, 1); + + this._nodes[NodeState.InProgress].add(node); + this._nodes[NodeState.ReadyToStart].delete(node); + this._numberInProgressByType.set(node.type, this._numberInProgress(node.type) + 1); + this._nodeTimings.setInProgress(node, {startTime}); + } + + _markNodeAsComplete(node: Node, endTime: number, connectionTiming?: ConnectionTiming): void { + this._nodes[NodeState.Complete].add(node); + this._nodes[NodeState.InProgress].delete(node); + this._numberInProgressByType.set(node.type, this._numberInProgress(node.type) - 1); + this._nodeTimings.setCompleted(node, {endTime, connectionTiming}); + + // Try to add all its dependents to the queue + for (const dependent of node.getDependents()) { + // Skip dependent node if one of its dependencies hasn't finished yet + const dependencies = dependent.getDependencies(); + if (dependencies.some(dep => !this._nodes[NodeState.Complete].has(dep))) { + continue; + } + + // Otherwise add it to the queue + this._markNodeAsReadyToStart(dependent, endTime); + } + } + + _acquireConnection(request: Lantern.NetworkRequest): TcpConnection|null { + return this._connectionPool.acquire(request); + } + + _getNodesSortedByStartPosition(): Node[] { + // Make a copy so we don't skip nodes due to concurrent modification + return Array.from(this._cachedNodeListByStartPosition); + } + + _startNodeIfPossible(node: Node, totalElapsedTime: number): void { + if (node.type === BaseNode.types.CPU) { + // Start a CPU task if there's no other CPU task in process + if (this._numberInProgress(node.type) === 0) { + this._markNodeAsInProgress(node, totalElapsedTime); + } + + return; + } + + if (node.type !== BaseNode.types.NETWORK) { + throw new Error('Unsupported'); + } + + // If a network request is connectionless, we can always start it, so skip the connection checks + if (!node.isConnectionless) { + // Start a network request if we're not at max requests and a connection is available + const numberOfActiveRequests = this._numberInProgress(node.type); + if (numberOfActiveRequests >= this._maximumConcurrentRequests) { + return; + } + const connection = this._acquireConnection(node.request); + if (!connection) { + return; + } + } + + this._markNodeAsInProgress(node, totalElapsedTime); + } + + /** + * Updates each connection in use with the available throughput based on the number of network requests + * currently in flight. + */ + _updateNetworkCapacity(): void { + const inFlight = this._numberInProgress(BaseNode.types.NETWORK); + if (inFlight === 0) { + return; + } + + for (const connection of this._connectionPool.connectionsInUse()) { + connection.setThroughput(this._throughput / inFlight); + } + } + + /** + * Estimates the number of milliseconds remaining given current condidtions before the node is complete. + */ + _estimateTimeRemaining(node: Node): number { + if (node.type === BaseNode.types.CPU) { + return this._estimateCPUTimeRemaining(node); + } + if (node.type === BaseNode.types.NETWORK) { + return this._estimateNetworkTimeRemaining(node); + } + throw new Error('Unsupported'); + } + + _estimateCPUTimeRemaining(cpuNode: CPUNode): number { + const timingData = this._nodeTimings.getCpuStarted(cpuNode); + const multiplier = cpuNode.didPerformLayout() ? this._layoutTaskMultiplier : this._cpuSlowdownMultiplier; + const totalDuration = Math.min( + Math.round(cpuNode.duration / 1000 * multiplier), + DEFAULT_MAXIMUM_CPU_TASK_DURATION, + ); + const estimatedTimeElapsed = totalDuration - timingData.timeElapsed; + this._nodeTimings.setCpuEstimated(cpuNode, {estimatedTimeElapsed}); + return estimatedTimeElapsed; + } + + _estimateNetworkTimeRemaining(networkNode: NetworkNode): number { + const request = networkNode.request; + const timingData = this._nodeTimings.getNetworkStarted(networkNode); + + let timeElapsed = 0; + if (networkNode.fromDiskCache) { + // Rough access time for seeking to location on disk and reading sequentially. + // 8ms per seek + 20ms/MB + // @see http://norvig.com/21-days.html#answers + const sizeInMb = (request.resourceSize || 0) / 1024 / 1024; + timeElapsed = 8 + 20 * sizeInMb - timingData.timeElapsed; + } else if (networkNode.isNonNetworkProtocol) { + // Estimates for the overhead of a data URL in Chromium and the decoding time for base64-encoded data. + // 2ms per request + 10ms/MB + // @see traces on https://dopiaza.org/tools/datauri/examples/index.php + const sizeInMb = (request.resourceSize || 0) / 1024 / 1024; + timeElapsed = 2 + 10 * sizeInMb - timingData.timeElapsed; + } else { + const connection = this._connectionPool.acquireActiveConnectionFromRequest(request); + const dnsResolutionTime = this._dns.getTimeUntilResolution(request, { + requestedAt: timingData.startTime, + shouldUpdateCache: true, + }); + const timeAlreadyElapsed = timingData.timeElapsed; + const calculation = connection.simulateDownloadUntil( + request.transferSize - timingData.bytesDownloaded, + {timeAlreadyElapsed, dnsResolutionTime, maximumTimeToElapse: Infinity}, + ); + + timeElapsed = calculation.timeElapsed; + } + + const estimatedTimeElapsed = timeElapsed + timingData.timeElapsedOvershoot; + this._nodeTimings.setNetworkEstimated(networkNode, {estimatedTimeElapsed}); + return estimatedTimeElapsed; + } + + /** + * Computes and returns the minimum estimated completion time of the nodes currently in progress. + */ + _findNextNodeCompletionTime(): number { + let minimumTime = Infinity; + for (const node of this._nodes[NodeState.InProgress]) { + minimumTime = Math.min(minimumTime, this._estimateTimeRemaining(node)); + } + + return minimumTime; + } + + /** + * Given a time period, computes the progress toward completion that the node made durin that time. + */ + _updateProgressMadeInTimePeriod(node: Node, timePeriodLength: number, totalElapsedTime: number): void { + const timingData = this._nodeTimings.getInProgress(node); + const isFinished = timingData.estimatedTimeElapsed === timePeriodLength; + + if (node.type === BaseNode.types.CPU || node.isConnectionless) { + if (isFinished) { + this._markNodeAsComplete(node, totalElapsedTime); + } else { + timingData.timeElapsed += timePeriodLength; + } + return; + } + + if (node.type !== BaseNode.types.NETWORK) { + throw new Error('Unsupported'); + } + if (!('bytesDownloaded' in timingData)) { + throw new Error('Invalid timing data'); + } + + const request = node.request; + const connection = this._connectionPool.acquireActiveConnectionFromRequest(request); + const dnsResolutionTime = this._dns.getTimeUntilResolution(request, { + requestedAt: timingData.startTime, + shouldUpdateCache: true, + }); + const calculation = connection.simulateDownloadUntil( + request.transferSize - timingData.bytesDownloaded, + { + dnsResolutionTime, + timeAlreadyElapsed: timingData.timeElapsed, + maximumTimeToElapse: timePeriodLength - timingData.timeElapsedOvershoot, + }, + ); + + connection.setCongestionWindow(calculation.congestionWindow); + connection.setH2OverflowBytesDownloaded(calculation.extraBytesDownloaded); + + if (isFinished) { + connection.setWarmed(true); + this._connectionPool.release(request); + this._markNodeAsComplete(node, totalElapsedTime, calculation.connectionTiming); + } else { + timingData.timeElapsed += calculation.timeElapsed; + timingData.timeElapsedOvershoot += calculation.timeElapsed - timePeriodLength; + timingData.bytesDownloaded += calculation.bytesDownloaded; + } + } + + _computeFinalNodeTimings(): + {nodeTimings: Map, completeNodeTimings: Map} { + const completeNodeTimingEntries: Array<[Node, CompleteNodeTiming]> = this._nodeTimings.getNodes().map(node => { + return [node, this._nodeTimings.getCompleted(node)]; + }); + + // Most consumers will want the entries sorted by startTime, so insert them in that order + completeNodeTimingEntries.sort((a, b) => a[1].startTime - b[1].startTime); + + // Trimmed version of type `Lantern.Simulation.NodeTiming`. + const nodeTimingEntries: Array<[Node, Lantern.Simulation.NodeTiming]> = + completeNodeTimingEntries.map(([node, timing]) => { + return [ + node, + { + startTime: timing.startTime, + endTime: timing.endTime, + duration: timing.endTime - timing.startTime, + }, + ]; + }); + + return { + nodeTimings: new Map(nodeTimingEntries), + completeNodeTimings: new Map(completeNodeTimingEntries), + }; + } + + getOptions(): Required { + return this._options; + } + + /** + * Estimates the time taken to process all of the graph's nodes, returns the overall time along with + * each node annotated by start/end times. + * + * Simulator/connection pool are allowed to deviate from what was + * observed in the trace/devtoolsLog and start requests as soon as they are queued (i.e. do not + * wait around for a warm connection to be available if the original request was fetched on a warm + * connection). + */ + simulate(graph: Node, options?: {label?: string}): Lantern.Simulation.Result { + if (BaseNode.hasCycle(graph)) { + throw new Error('Cannot simulate graph with cycle'); + } + + options = Object.assign( + { + label: undefined, + }, + options); + + // initialize the necessary data containers + this._dns = new DNSCache({rtt: this._rtt}); + this._initializeConnectionPool(graph); + this._initializeAuxiliaryData(); + + const nodesNotReadyToStart = this._nodes[NodeState.NotReadyToStart]; + const nodesReadyToStart = this._nodes[NodeState.ReadyToStart]; + const nodesInProgress = this._nodes[NodeState.InProgress]; + + const rootNode = graph.getRootNode(); + rootNode.traverse(node => nodesNotReadyToStart.add(node)); + let totalElapsedTime = 0; + let iteration = 0; + + // root node is always ready to start + this._markNodeAsReadyToStart(rootNode, totalElapsedTime); + + // loop as long as we have nodes in the queue or currently in progress + while (nodesReadyToStart.size || nodesInProgress.size) { + // move all possible queued nodes to in progress + for (const node of this._getNodesSortedByStartPosition()) { + this._startNodeIfPossible(node, totalElapsedTime); + } + + if (!nodesInProgress.size) { + // Interplay between fromDiskCache and connectionReused can be incorrect, + // have to give up. + throw new Error('Failed to start a node'); + } + + // set the available throughput for all connections based on # inflight + this._updateNetworkCapacity(); + + // find the time that the next node will finish + const minimumTime = this._findNextNodeCompletionTime(); + totalElapsedTime += minimumTime; + + // While this is no longer strictly necessary, it's always better than hanging + if (!Number.isFinite(minimumTime) || iteration > 100000) { + throw new Error('Simulation failed, depth exceeded'); + } + + iteration++; + // update how far each node will progress until that point + for (const node of nodesInProgress) { + this._updateProgressMadeInTimePeriod(node, minimumTime, totalElapsedTime); + } + } + + // `nodeTimings` are used for simulator consumers, `completeNodeTimings` kept for debugging. + const {nodeTimings, completeNodeTimings} = this._computeFinalNodeTimings(); + ALL_SIMULATION_NODE_TIMINGS.set(options.label || 'unlabeled', completeNodeTimings); + + return { + timeInMs: totalElapsedTime, + nodeTimings, + }; + } + + computeWastedMsFromWastedBytes(wastedBytes: number): number { + const {throughput, observedThroughput} = this._options; + + // https://github.com/GoogleChrome/lighthouse/pull/13323#issuecomment-962031709 + // 0 throughput means the no (additional) throttling is expected. + // This is common for desktop + devtools throttling where throttling is additive and we don't want any additional. + const bitsPerSecond = throughput === 0 ? observedThroughput : throughput; + if (bitsPerSecond === 0) { + return 0; + } + + const wastedBits = wastedBytes * 8; + const wastedMs = wastedBits / bitsPerSecond * 1000; + + // This is an estimate of wasted time, so we won't be more precise than 10ms. + return Math.round(wastedMs / 10) * 10; + } + + static get allNodeTimings(): Map> { + return ALL_SIMULATION_NODE_TIMINGS; + } + + /** + * We attempt to start nodes by their observed start time using the request priority as a tie breaker. + * When simulating, just because a low priority image started 5ms before a high priority image doesn't mean + * it would have happened like that when the network was slower. + */ + static _computeNodeStartPosition(node: Node): number { + if (node.type === 'cpu') { + return node.startTime; + } + return node.startTime + (PriorityStartTimePenalty[node.request.priority] * 1000 * 1000 || 0); + } +} + +export {Simulator}; diff --git a/front_end/models/trace/lantern/simulation/TCPConnection.test.ts b/front_end/models/trace/lantern/simulation/TCPConnection.test.ts new file mode 100644 index 00000000000..b80acf45827 --- /dev/null +++ b/front_end/models/trace/lantern/simulation/TCPConnection.test.ts @@ -0,0 +1,374 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2017 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as Lantern from '../lantern.js'; + +const {TcpConnection} = Lantern.Simulation; + +describe('TCPConnection', () => { + describe('#constructor', () => { + it('should create the connection', () => { + const rtt = 150; + const throughput = 1600 * 1024; + const connection = new TcpConnection(rtt, throughput); + assert.ok(connection); + assert.strictEqual(connection._rtt, rtt); + }); + }); + + describe('#maximumSaturatedConnections', () => { + it('should compute number of supported simulated requests', () => { + const availableThroughput = 1460 * 8 * 10; // 10 TCP segments/second + assert.strictEqual(TcpConnection.maximumSaturatedConnections(100, availableThroughput), 1); + assert.strictEqual(TcpConnection.maximumSaturatedConnections(300, availableThroughput), 3); + assert.strictEqual(TcpConnection.maximumSaturatedConnections(1000, availableThroughput), 10); + }); + }); + + describe('.setWarmed', () => { + it('adjusts the time to download appropriately', () => { + const connection = new TcpConnection(100, Infinity); + assert.strictEqual(connection.simulateDownloadUntil(0).timeElapsed, 300); + connection.setWarmed(true); + assert.strictEqual(connection.simulateDownloadUntil(0).timeElapsed, 100); + }); + }); + + describe('.setCongestionWindow', () => { + it('adjusts the time to download appropriately', () => { + const connection = new TcpConnection(100, Infinity); + assert.deepEqual(connection.simulateDownloadUntil(50000), { + bytesDownloaded: 50000, + extraBytesDownloaded: 0, + congestionWindow: 40, + roundTrips: 5, + timeElapsed: 500, + connectionTiming: { + connectionTime: 250, + dnsResolutionTime: 0, + sslTime: 100, + timeToFirstByte: 300, + }, + }); + connection.setCongestionWindow(40); // will download all in one round trip + assert.deepEqual(connection.simulateDownloadUntil(50000), { + bytesDownloaded: 50000, + extraBytesDownloaded: 0, + congestionWindow: 40, + roundTrips: 3, + timeElapsed: 300, + connectionTiming: { + connectionTime: 250, + dnsResolutionTime: 0, + sslTime: 100, + timeToFirstByte: 300, + }, + }); + }); + }); + + describe('.setH2OverflowBytesDownloaded', () => { + it('adjusts the time to download appropriately for H2 connections', () => { + const connection = new TcpConnection(100, Infinity, 0, true, true); + connection.setWarmed(true); + assert.strictEqual(connection.simulateDownloadUntil(30000).timeElapsed, 200); + connection.setH2OverflowBytesDownloaded(20000); + assert.strictEqual(connection.simulateDownloadUntil(30000).timeElapsed, 100); + connection.setH2OverflowBytesDownloaded(50000); + assert.strictEqual(connection.simulateDownloadUntil(30000).timeElapsed, 0); + }); + + it('does not adjust the time to download for non-H2 connections', () => { + const connection = new TcpConnection(100, Infinity, 0, true, false); + connection.setWarmed(true); + assert.strictEqual(connection.simulateDownloadUntil(30000).timeElapsed, 200); + connection.setH2OverflowBytesDownloaded(20000); + assert.strictEqual(connection.simulateDownloadUntil(30000).timeElapsed, 200); + connection.setH2OverflowBytesDownloaded(50000); + assert.strictEqual(connection.simulateDownloadUntil(30000).timeElapsed, 200); + }); + }); + + describe('.simulateDownloadUntil', () => { + describe('when maximumTime is not set', () => { + it('should provide the correct values small payload non-SSL', () => { + const connection = new TcpConnection(100, Infinity, 0, false); + assert.deepEqual(connection.simulateDownloadUntil(7300), { + bytesDownloaded: 7300, + extraBytesDownloaded: 0, + congestionWindow: 10, + roundTrips: 2, + timeElapsed: 200, + connectionTiming: { + connectionTime: 150, + dnsResolutionTime: 0, + sslTime: undefined, // non-SSL + timeToFirstByte: 200, + }, + }); + }); + + it('should provide the correct values small payload SSL', () => { + const connection = new TcpConnection(100, Infinity, 0, true); + assert.deepEqual(connection.simulateDownloadUntil(7300), { + bytesDownloaded: 7300, + extraBytesDownloaded: 0, + congestionWindow: 10, + roundTrips: 3, + timeElapsed: 300, + connectionTiming: { + connectionTime: 250, + dnsResolutionTime: 0, + sslTime: 100, + timeToFirstByte: 300, + }, + }); + }); + + it('should provide the correct values small payload H2', () => { + const connection = new TcpConnection(100, Infinity, 0, true, true); + assert.deepEqual(connection.simulateDownloadUntil(7300), { + bytesDownloaded: 7300, + extraBytesDownloaded: 7300, + congestionWindow: 10, + roundTrips: 3, + timeElapsed: 300, + connectionTiming: { + connectionTime: 250, + dnsResolutionTime: 0, + sslTime: 100, + timeToFirstByte: 300, + }, + }); + }); + + it('should provide the correct values response time', () => { + const responseTime = 78; + const connection = new TcpConnection(100, Infinity, responseTime, true); + assert.deepEqual(connection.simulateDownloadUntil(7300), { + bytesDownloaded: 7300, + extraBytesDownloaded: 0, + congestionWindow: 10, + roundTrips: 3, + timeElapsed: 300 + responseTime, + connectionTiming: { + connectionTime: 250, + dnsResolutionTime: 0, + sslTime: 100, + timeToFirstByte: 378, + }, + }); + }); + + it('should provide the correct values large payload', () => { + const connection = new TcpConnection(100, 8 * 1000 * 1000); + const bytesToDownload = 10 * 1000 * 1000; // 10 mb + assert.deepEqual(connection.simulateDownloadUntil(bytesToDownload), { + bytesDownloaded: bytesToDownload, + extraBytesDownloaded: 0, + congestionWindow: 68, + roundTrips: 105, + timeElapsed: 10500, + connectionTiming: { + connectionTime: 250, + dnsResolutionTime: 0, + sslTime: 100, + timeToFirstByte: 300, + }, + }); + }); + + it('should provide the correct values resumed small payload', () => { + const connection = new TcpConnection(100, Infinity, 0, true); + assert.deepEqual(connection.simulateDownloadUntil(7300, {timeAlreadyElapsed: 250}), { + bytesDownloaded: 7300, + extraBytesDownloaded: 0, + congestionWindow: 10, + roundTrips: 3, + timeElapsed: 50, + connectionTiming: { + connectionTime: 250, + dnsResolutionTime: 0, + sslTime: 100, + timeToFirstByte: 300, + }, + }); + }); + + it('should provide the correct values resumed small payload H2', () => { + const connection = new TcpConnection(100, Infinity, 0, true, true); + connection.setWarmed(true); + connection.setH2OverflowBytesDownloaded(10000); + assert.deepEqual(connection.simulateDownloadUntil(7300), { + bytesDownloaded: 0, + extraBytesDownloaded: 2700, // 10000 - 7300 + congestionWindow: 10, + roundTrips: 0, + timeElapsed: 0, + connectionTiming: { + timeToFirstByte: 0, + }, + }); + }); + + it('should provide the correct values resumed large payload', () => { + const connection = new TcpConnection(100, 8 * 1000 * 1000); + const bytesToDownload = 5 * 1000 * 1000; // 5 mb + connection.setCongestionWindow(68); + assert.deepEqual( + connection.simulateDownloadUntil(bytesToDownload, {timeAlreadyElapsed: 5234}), + { + bytesDownloaded: bytesToDownload, + extraBytesDownloaded: 0, + congestionWindow: 68, + roundTrips: 51, // 5 mb / (1460 * 68) + timeElapsed: 5100, + connectionTiming: { + connectionTime: 250, + dnsResolutionTime: 0, + sslTime: 100, + timeToFirstByte: 300, + }, + }, + ); + }); + }); + + describe('when maximumTime is set', () => { + it('should provide the correct values less than TTFB', () => { + const connection = new TcpConnection(100, Infinity, 0, false); + assert.deepEqual( + connection.simulateDownloadUntil(7300, {timeAlreadyElapsed: 0, maximumTimeToElapse: 68}), + { + bytesDownloaded: 7300, + extraBytesDownloaded: 0, + congestionWindow: 10, + roundTrips: 2, + timeElapsed: 200, + connectionTiming: { + connectionTime: 150, + dnsResolutionTime: 0, + sslTime: undefined, // non-SSL + timeToFirstByte: 200, + }, + }, + ); + }); + + it('should provide the correct values just over TTFB', () => { + const connection = new TcpConnection(100, Infinity, 0, false); + assert.deepEqual( + connection.simulateDownloadUntil(7300, {timeAlreadyElapsed: 0, maximumTimeToElapse: 250}), + { + bytesDownloaded: 7300, + extraBytesDownloaded: 0, + congestionWindow: 10, + roundTrips: 2, + timeElapsed: 200, + connectionTiming: { + connectionTime: 150, + dnsResolutionTime: 0, + sslTime: undefined, // non-SSL + timeToFirstByte: 200, + }, + }, + ); + }); + + it('should provide the correct values with already elapsed', () => { + const connection = new TcpConnection(100, Infinity, 0, false); + assert.deepEqual( + connection.simulateDownloadUntil(7300, { + timeAlreadyElapsed: 75, + maximumTimeToElapse: 250, + }), + { + bytesDownloaded: 7300, + extraBytesDownloaded: 0, + congestionWindow: 10, + roundTrips: 2, + timeElapsed: 125, + connectionTiming: { + connectionTime: 150, + dnsResolutionTime: 0, + sslTime: undefined, // non-SSL + timeToFirstByte: 200, + }, + }, + ); + }); + + it('should provide the correct values large payloads', () => { + const connection = new TcpConnection(100, 8 * 1000 * 1000); + const bytesToDownload = 10 * 1000 * 1000; // 10 mb + assert.deepEqual( + connection.simulateDownloadUntil(bytesToDownload, { + timeAlreadyElapsed: 500, + maximumTimeToElapse: 740, + }), + { + bytesDownloaded: 683280, // should be less than 68 * 1460 * 8 + extraBytesDownloaded: 0, + congestionWindow: 68, + roundTrips: 8, + timeElapsed: 800, // skips the handshake because time already elapsed + connectionTiming: { + connectionTime: 250, + dnsResolutionTime: 0, + sslTime: 100, + timeToFirstByte: 300, + }, + }, + ); + }); + + it('should all add up', () => { + const connection = new TcpConnection(100, 8 * 1000 * 1000); + const bytesToDownload = 10 * 1000 * 1000; // 10 mb + const firstStoppingPoint = 5234; + const secondStoppingPoint = 315; + const thirdStoppingPoint = 10500 - firstStoppingPoint - secondStoppingPoint; + + const firstSegment = connection.simulateDownloadUntil(bytesToDownload, { + timeAlreadyElapsed: 0, + maximumTimeToElapse: firstStoppingPoint, + }); + const firstOvershoot = firstSegment.timeElapsed - firstStoppingPoint; + + connection.setCongestionWindow(firstSegment.congestionWindow); + const secondSegment = connection.simulateDownloadUntil( + bytesToDownload - firstSegment.bytesDownloaded, + { + timeAlreadyElapsed: firstSegment.timeElapsed, + maximumTimeToElapse: secondStoppingPoint - firstOvershoot, + }, + ); + const secondOvershoot = firstOvershoot + secondSegment.timeElapsed - secondStoppingPoint; + + connection.setCongestionWindow(secondSegment.congestionWindow); + const thirdSegment = connection.simulateDownloadUntil( + bytesToDownload - firstSegment.bytesDownloaded - secondSegment.bytesDownloaded, + {timeAlreadyElapsed: firstSegment.timeElapsed + secondSegment.timeElapsed}, + ); + const thirdOvershoot = secondOvershoot + thirdSegment.timeElapsed - thirdStoppingPoint; + + assert.strictEqual(thirdOvershoot, 0); + assert.strictEqual( + firstSegment.bytesDownloaded + secondSegment.bytesDownloaded + thirdSegment.bytesDownloaded, + bytesToDownload, + ); + assert.strictEqual( + firstSegment.timeElapsed + secondSegment.timeElapsed + thirdSegment.timeElapsed, + 10500, + ); + }); + }); + }); +}); diff --git a/front_end/models/trace/lantern/simulation/TcpConnection.ts b/front_end/models/trace/lantern/simulation/TcpConnection.ts new file mode 100644 index 00000000000..2ca421596ad --- /dev/null +++ b/front_end/models/trace/lantern/simulation/TcpConnection.ts @@ -0,0 +1,202 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2017 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {type ConnectionTiming} from './SimulationTimingMap.js'; + +interface DownloadOptions { + dnsResolutionTime?: number; + timeAlreadyElapsed?: number; + maximumTimeToElapse?: number; +} + +interface DownloadResults { + roundTrips: number; + timeElapsed: number; + bytesDownloaded: number; + extraBytesDownloaded: number; + congestionWindow: number; + connectionTiming: ConnectionTiming; +} + +const INITIAL_CONGESTION_WINDOW = 10; +const TCP_SEGMENT_SIZE = 1460; + +class TcpConnection { + _warmed: boolean; + _ssl: boolean; + _h2: boolean; + _rtt: number; + _throughput: number; + _serverLatency: number; + _congestionWindow: number; + _h2OverflowBytesDownloaded: number; + + constructor(rtt: number, throughput: number, serverLatency = 0, ssl = true, h2 = false) { + this._warmed = false; + this._ssl = ssl; + this._h2 = h2; + this._rtt = rtt; + this._throughput = throughput; + this._serverLatency = serverLatency; + this._congestionWindow = INITIAL_CONGESTION_WINDOW; + this._h2OverflowBytesDownloaded = 0; + } + + static maximumSaturatedConnections(rtt: number, availableThroughput: number): number { + const roundTripsPerSecond = 1000 / rtt; + const bytesPerRoundTrip = TCP_SEGMENT_SIZE; + const bytesPerSecond = roundTripsPerSecond * bytesPerRoundTrip; + const minimumThroughputRequiredPerRequest = bytesPerSecond * 8; + return Math.floor(availableThroughput / minimumThroughputRequiredPerRequest); + } + + _computeMaximumCongestionWindowInSegments(): number { + const bytesPerSecond = this._throughput / 8; + const secondsPerRoundTrip = this._rtt / 1000; + const bytesPerRoundTrip = bytesPerSecond * secondsPerRoundTrip; + return Math.floor(bytesPerRoundTrip / TCP_SEGMENT_SIZE); + } + + setThroughput(throughput: number): void { + this._throughput = throughput; + } + + setCongestionWindow(congestion: number): void { + this._congestionWindow = congestion; + } + + setWarmed(warmed: boolean): void { + this._warmed = warmed; + } + + isWarm(): boolean { + return this._warmed; + } + + isH2(): boolean { + return this._h2; + } + + get congestionWindow(): number { + return this._congestionWindow; + } + + /** + * Sets the number of excess bytes that are available to this connection on future downloads, only + * applies to H2 connections. + */ + setH2OverflowBytesDownloaded(bytes: number): void { + if (!this._h2) { + return; + } + this._h2OverflowBytesDownloaded = bytes; + } + + clone(): TcpConnection { + return Object.assign(new TcpConnection(this._rtt, this._throughput), this); + } + + /** + * Simulates a network download of a particular number of bytes over an optional maximum amount of time + * and returns information about the ending state. + * + * See https://hpbn.co/building-blocks-of-tcp/#three-way-handshake and + * https://hpbn.co/transport-layer-security-tls/#tls-handshake for details. + */ + simulateDownloadUntil(bytesToDownload: number, options?: DownloadOptions): DownloadResults { + const {timeAlreadyElapsed = 0, maximumTimeToElapse = Infinity, dnsResolutionTime = 0} = options || {}; + + if (this._warmed && this._h2) { + bytesToDownload -= this._h2OverflowBytesDownloaded; + } + const twoWayLatency = this._rtt; + const oneWayLatency = twoWayLatency / 2; + const maximumCongestionWindow = this._computeMaximumCongestionWindowInSegments(); + + let handshakeAndRequest = oneWayLatency; + if (!this._warmed) { + handshakeAndRequest = + // DNS lookup + dnsResolutionTime + + // SYN + oneWayLatency + + // SYN ACK + oneWayLatency + + // ACK + initial request + oneWayLatency + + // ClientHello/ServerHello assuming TLS False Start is enabled (https://istlsfastyet.com/#server-performance). + (this._ssl ? twoWayLatency : 0); + } + + let roundTrips = Math.ceil(handshakeAndRequest / twoWayLatency); + let timeToFirstByte = handshakeAndRequest + this._serverLatency + oneWayLatency; + if (this._warmed && this._h2) { + timeToFirstByte = 0; + } + + const timeElapsedForTTFB = Math.max(timeToFirstByte - timeAlreadyElapsed, 0); + const maximumDownloadTimeToElapse = maximumTimeToElapse - timeElapsedForTTFB; + + let congestionWindow = Math.min(this._congestionWindow, maximumCongestionWindow); + let totalBytesDownloaded = 0; + if (timeElapsedForTTFB > 0) { + totalBytesDownloaded = congestionWindow * TCP_SEGMENT_SIZE; + } else { + roundTrips = 0; + } + + let downloadTimeElapsed = 0; + let bytesRemaining = bytesToDownload - totalBytesDownloaded; + while (bytesRemaining > 0 && downloadTimeElapsed <= maximumDownloadTimeToElapse) { + roundTrips++; + downloadTimeElapsed += twoWayLatency; + congestionWindow = Math.max(Math.min(maximumCongestionWindow, congestionWindow * 2), 1); + + const bytesDownloadedInWindow = congestionWindow * TCP_SEGMENT_SIZE; + totalBytesDownloaded += bytesDownloadedInWindow; + bytesRemaining -= bytesDownloadedInWindow; + } + + const timeElapsed = timeElapsedForTTFB + downloadTimeElapsed; + const extraBytesDownloaded = this._h2 ? Math.max(totalBytesDownloaded - bytesToDownload, 0) : 0; + const bytesDownloaded = Math.max(Math.min(totalBytesDownloaded, bytesToDownload), 0); + + let connectionTiming: ConnectionTiming; + if (!this._warmed) { + connectionTiming = { + dnsResolutionTime, + connectionTime: handshakeAndRequest - dnsResolutionTime, + sslTime: this._ssl ? twoWayLatency : undefined, + timeToFirstByte, + }; + } else if (this._h2) { + // TODO: timing information currently difficult to model for warm h2 connections. + connectionTiming = { + timeToFirstByte, + }; + } else { + connectionTiming = { + connectionTime: handshakeAndRequest, + timeToFirstByte, + }; + } + + return { + roundTrips, + timeElapsed, + bytesDownloaded, + extraBytesDownloaded, + congestionWindow, + connectionTiming, + }; + } +} + +export {TcpConnection}; diff --git a/front_end/models/trace/lantern/simulation/simulation.ts b/front_end/models/trace/lantern/simulation/simulation.ts new file mode 100644 index 00000000000..774f08371d5 --- /dev/null +++ b/front_end/models/trace/lantern/simulation/simulation.ts @@ -0,0 +1,25 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {type Simulation} from '../types/lantern.js'; + +export {ConnectionPool} from './ConnectionPool.js'; +export {Constants} from './Constants.js'; +export {DNSCache} from './DNSCache.js'; +export {NetworkAnalyzer} from './NetworkAnalyzer.js'; +export {SimulatorTimingMap} from './SimulationTimingMap.js'; +export {Simulator} from './Simulator.js'; +export {TcpConnection} from './TcpConnection.js'; + +export type MetricComputationDataInput = Simulation.MetricComputationDataInput; +export type Options = Simulation.Options; +export type ProcessedNavigation = Simulation.ProcessedNavigation; +export type Settings = Simulation.Settings; +export type URL = Simulation.URL; diff --git a/front_end/models/trace/lantern/testing/MetricTestUtils.ts b/front_end/models/trace/lantern/testing/MetricTestUtils.ts new file mode 100644 index 00000000000..db8b91c3200 --- /dev/null +++ b/front_end/models/trace/lantern/testing/MetricTestUtils.ts @@ -0,0 +1,55 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// Why can other tests import this directly but we get yelled at here? +// eslint-disable-next-line rulesdir/es_modules_import +import {TraceLoader} from '../../../../testing/TraceLoader.js'; +import * as TraceModel from '../../trace.js'; +import * as Lantern from '../lantern.js'; + +async function loadTrace(context: Mocha.Context|Mocha.Suite|null, name: string): Promise { + const traceEvents = await TraceLoader.rawEvents(context, name); + return { + traceEvents: traceEvents as unknown as Lantern.TraceEvent[], + }; +} + +async function runTraceEngine(trace: Lantern.Trace) { + const processor = TraceModel.Processor.TraceProcessor.createWithAllHandlers(); + await processor.parse(trace.traceEvents as TraceModel.Types.TraceEvents.TraceEventData[]); + if (!processor.traceParsedData) { + throw new Error('No data'); + } + return processor.traceParsedData; +} + +async function getComputationDataFromFixture( + {trace, settings, url}: + {trace: Lantern.Trace, settings?: Lantern.Simulation.Settings, url?: Lantern.Simulation.URL}) { + settings = settings ?? {} as Lantern.Simulation.Settings; + if (!settings.throttlingMethod) { + settings.throttlingMethod = 'simulate'; + } + const traceEngineData = await runTraceEngine(trace); + const requests = TraceModel.LanternComputationData.createNetworkRequests(trace, traceEngineData); + const networkAnalysis = Lantern.Simulation.NetworkAnalyzer.analyze(requests); + + return { + simulator: Lantern.Simulation.Simulator.createSimulator({...settings, networkAnalysis}), + graph: TraceModel.LanternComputationData.createGraph(requests, trace, traceEngineData, url), + processedNavigation: TraceModel.LanternComputationData.createProcessedNavigation(traceEngineData), + }; +} + +export { + loadTrace, + runTraceEngine, + getComputationDataFromFixture, +}; diff --git a/front_end/models/trace/lantern/types/lantern.ts b/front_end/models/trace/lantern/types/lantern.ts new file mode 100644 index 00000000000..89f7ea187c8 --- /dev/null +++ b/front_end/models/trace/lantern/types/lantern.ts @@ -0,0 +1,236 @@ +// Copyright 2024 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * @license + * Copyright 2024 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type * as Protocol from '../../../../generated/protocol.js'; +import {type Node} from '../BaseNode.js'; +import {type Simulator} from '../simulation/Simulator.js'; + +export type TraceEvent = { + name: string, + args: { + name?: string, + data?: { + frame?: string, + readyState?: number, + stackTrace?: { + url: string, + }[], + url?: string, + }, + }, + pid: number, + tid: number, + /** Timestamp of the event in microseconds. */ + ts: number, + dur: number, +}; +export type Trace = { + traceEvents: TraceEvent[], +}; +export type ResourcePriority = ('VeryLow'|'Low'|'Medium'|'High'|'VeryHigh'); +export type ResourceType = + ('Document'|'Stylesheet'|'Image'|'Media'|'Font'|'Script'|'TextTrack'|'XHR'|'Fetch'|'Prefetch'|'EventSource'| + 'WebSocket'|'Manifest'|'SignedExchange'|'Ping'|'CSPViolationReport'|'Preflight'|'Other'); +type InitiatorType = ('parser'|'script'|'preload'|'SignedExchange'|'preflight'|'other'); +export type ResourceTiming = Protocol.Network.ResourceTiming; +type CallStack = { + callFrames: Array<{ + scriptId: string, + url: string, + lineNumber: number, + columnNumber: number, + functionName: string, + }>, + parent?: CallStack, +}; + +export type ParsedURL = { + /** + * Equivalent to a `new URL(url).protocol` BUT w/o the trailing colon (:) + */ + scheme: string, + /** + * Equivalent to a `new URL(url).hostname` + */ + host: string, + securityOrigin: string, +}; + +// When Lantern NetworkRequests are constructed, the source-of-truth of the network record is given as `rawRequest`. +// Internally Lantern doesn't care about the type of this field, so a default type is given to simplify internal code +// by avoiding unnecessary typescript overhead. +// If callers want to access the underlying network record, they are expected to make use of this generic on top-level +// interfaces like Simulator. +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export type AnyNetworkObject = any; + +export type NetworkRequest = { + requestId: string, + connectionId: number, + connectionReused: boolean, + url: string, + protocol: string, + parsedURL: ParsedURL, + documentURL: string, + /** When the renderer process initially discovers a network request, in milliseconds. */ + rendererStartTime: number, + /** + * When the network service is about to handle a request, ie. just before going to the + * HTTP cache or going to the network for DNS/connection setup, in milliseconds. + */ + networkRequestTime: number, + /** + * When the last byte of the response headers is received, in milliseconds. + * Equal to networkRequestTime if no data is recieved over the + * network (ex: cached requests or data urls). + */ + responseHeadersEndTime: number, + /** When the last byte of the response body is received, in milliseconds. */ + networkEndTime: number, + transferSize: number, + resourceSize: number, + fromDiskCache: boolean, + fromMemoryCache: boolean, + isLinkPreload: boolean, + finished: boolean, + failed: boolean, + statusCode: number, + /** The network request that redirected to this one */ + redirectSource: NetworkRequest|undefined, + /** The network request that this one redirected to */ + redirectDestination: NetworkRequest|undefined, + // TODO: can't use Protocol.Network.Initiator because of type mismatch in Lighthouse initiator. + initiator: { + type: InitiatorType, + url?: string, + stack?: CallStack, + }, + initiatorRequest: NetworkRequest|undefined, + /** The chain of network requests that redirected to this one */ + redirects: NetworkRequest[]|undefined, + timing: Protocol.Network.ResourceTiming|undefined, + resourceType: ResourceType|undefined, + mimeType: string, + priority: ResourcePriority, + frameId: string|undefined, + fromWorker: boolean, + /** + * Optional value for how long the server took to respond to this request. + * When not provided, the server response time is derived from the timing object. + */ + serverResponseTime?: number, + /** + * Implementation-specific canoncial data structure that this Lantern NetworkRequest + * was derived from. + * Users of Lantern create a NetworkRequest matching this interface, + * but can store the source-of-truth for their network model in this property. + * This is then accessible as a read-only property on NetworkNode. + */ + rawRequest?: T, +}; + +export namespace Simulation { + export interface URL { + /** URL of the initially requested URL */ + requestedUrl?: string; + /** URL of the last document request */ + mainDocumentUrl?: string; + } + + /** Simulation settings that control the amount of network & cpu throttling in the run. */ + export interface ThrottlingSettings { + /** The round trip time in milliseconds. */ + rttMs?: number; + /** The network throughput in kilobits per second. */ + throughputKbps?: number; + // devtools settings + /** The network request latency in milliseconds. */ + requestLatencyMs?: number; + /** The network download throughput in kilobits per second. */ + downloadThroughputKbps?: number; + /** The network upload throughput in kilobits per second. */ + uploadThroughputKbps?: number; + // used by both + /** The amount of slowdown applied to the cpu (1/). */ + cpuSlowdownMultiplier?: number; + } + + export interface PrecomputedLanternData { + additionalRttByOrigin: {[origin: string]: number}; + serverResponseTimeByOrigin: {[origin: string]: number}; + } + + export interface Settings { + networkAnalysis: { + rtt: number, + additionalRttByOrigin: Map, + serverResponseTimeByOrigin: Map, + throughput: number, + }; + /** The method used to throttle the network. */ + throttlingMethod: 'devtools'|'simulate'|'provided'; + /** The throttling config settings. */ + throttling: Required; + /** Precomputed lantern estimates to use instead of observed analysis. */ + precomputedLanternData?: PrecomputedLanternData|null; + } + + export interface Options { + rtt?: number; + throughput?: number; + observedThroughput: number; + maximumConcurrentRequests?: number; + cpuSlowdownMultiplier?: number; + layoutTaskMultiplier?: number; + additionalRttByOrigin?: Map; + serverResponseTimeByOrigin?: Map; + } + + export interface ProcessedNavigation { + timestamps: { + firstContentfulPaint: number, + largestContentfulPaint?: number, + }; + } + + export interface MetricComputationDataInput { + simulator: Simulator; + graph: Node; + processedNavigation: ProcessedNavigation; + } + + export interface MetricCoefficients { + intercept: number; + optimistic: number; + pessimistic: number; + } + + export interface NodeTiming { + startTime: number; + endTime: number; + duration: number; + } + + export interface Result { + timeInMs: number; + nodeTimings: Map, NodeTiming>; + } +} + +export namespace Metrics { + export interface Result { + timing: number; + timestamp?: never; + optimisticEstimate: Simulation.Result; + pessimisticEstimate: Simulation.Result; + optimisticGraph: Node; + pessimisticGraph: Node; + } +} diff --git a/front_end/models/trace/trace.ts b/front_end/models/trace/trace.ts index 234c3979d11..5c407abac5d 100644 --- a/front_end/models/trace/trace.ts +++ b/front_end/models/trace/trace.ts @@ -7,6 +7,8 @@ import * as Extras from './extras/extras.js'; import * as Handlers from './handlers/handlers.js'; import * as Helpers from './helpers/helpers.js'; import * as Insights from './insights/insights.js'; +import * as Lantern from './lantern/lantern.js'; +import * as LanternComputationData from './LanternComputationData.js'; import * as TraceModel from './ModelImpl.js'; import * as Processor from './Processor.js'; import * as RootCauses from './root-causes/root-causes.js'; @@ -19,6 +21,8 @@ export { Handlers, Helpers, Insights, + Lantern, + LanternComputationData, Processor, RootCauses, TraceModel, diff --git a/front_end/panels/timeline/fixtures/traces/BUILD.gn b/front_end/panels/timeline/fixtures/traces/BUILD.gn index 0a745dae1c2..29c989f703a 100644 --- a/front_end/panels/timeline/fixtures/traces/BUILD.gn +++ b/front_end/panels/timeline/fixtures/traces/BUILD.gn @@ -29,6 +29,10 @@ copy_to_gen("traces") { "interactive-time.json.gz", "invalid-animation-events.json.gz", "invalidate-style-class-name-change.json.gz", + "lantern/iframe/trace.json.gz", + "lantern/paul/trace.json.gz", + "lantern/progressive-app/trace.json.gz", + "lantern/redirect/trace.json.gz", "large-layout-small-recalc.json.gz", "large-profile.cpuprofile.gz", "large-recalc-style.json.gz", diff --git a/front_end/panels/timeline/fixtures/traces/lantern/README.md b/front_end/panels/timeline/fixtures/traces/lantern/README.md new file mode 100644 index 00000000000..d17c462f5bc --- /dev/null +++ b/front_end/panels/timeline/fixtures/traces/lantern/README.md @@ -0,0 +1,4 @@ +The traces in this folder are originally from the Lighthouse repo. They can be quickly updated by using +Lighthouse's `regenerate.js` scripts. + +See https://github.com/GoogleChrome/lighthouse/blob/main/core/test/fixtures/artifacts/progressive-app/regenerate.js diff --git a/front_end/panels/timeline/fixtures/traces/lantern/iframe/trace.json.gz b/front_end/panels/timeline/fixtures/traces/lantern/iframe/trace.json.gz new file mode 100644 index 00000000000..91e6ccd75b2 Binary files /dev/null and b/front_end/panels/timeline/fixtures/traces/lantern/iframe/trace.json.gz differ diff --git a/front_end/panels/timeline/fixtures/traces/lantern/paul/trace.json.gz b/front_end/panels/timeline/fixtures/traces/lantern/paul/trace.json.gz new file mode 100644 index 00000000000..f81b8974fab Binary files /dev/null and b/front_end/panels/timeline/fixtures/traces/lantern/paul/trace.json.gz differ diff --git a/front_end/panels/timeline/fixtures/traces/lantern/progressive-app/trace.json.gz b/front_end/panels/timeline/fixtures/traces/lantern/progressive-app/trace.json.gz new file mode 100644 index 00000000000..bbe07f24bbb Binary files /dev/null and b/front_end/panels/timeline/fixtures/traces/lantern/progressive-app/trace.json.gz differ diff --git a/front_end/panels/timeline/fixtures/traces/lantern/redirect/trace.json.gz b/front_end/panels/timeline/fixtures/traces/lantern/redirect/trace.json.gz new file mode 100644 index 00000000000..8b5a7417a20 Binary files /dev/null and b/front_end/panels/timeline/fixtures/traces/lantern/redirect/trace.json.gz differ