Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Introduce computedArtifacts #583

Merged
merged 21 commits into from
Aug 20, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 24 additions & 22 deletions lighthouse-core/audits/critical-request-chains.js
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class CriticalRequestChains extends Audit {
name: 'critical-request-chains',
description: 'Critical Request Chains',
optimalValue: 0,
requiredArtifacts: ['CriticalRequestChains']
requiredArtifacts: ['networkRecords']
};
}

Expand All @@ -40,31 +40,33 @@ class CriticalRequestChains extends Audit {
* @return {!AuditResult} The score from the audit, ranging from 0-100.
*/
static audit(artifacts) {
let chainCount = 0;
function walk(node, depth) {
const children = Object.keys(node);
return artifacts.requestCriticalRequestChains(artifacts.networkRecords).then(chains => {
let chainCount = 0;
function walk(node, depth) {
const children = Object.keys(node);

// Since a leaf node indicates the end of a chain, we can inspect the number
// of child nodes, and, if the count is zero, increment the count.
if (children.length === 0) {
chainCount++;
}
// Since a leaf node indicates the end of a chain, we can inspect the number
// of child nodes, and, if the count is zero, increment the count.
if (children.length === 0) {
chainCount++;
}

children.forEach(id => {
const child = node[id];
walk(child.children, depth + 1);
}, '');
}
children.forEach(id => {
const child = node[id];
walk(child.children, depth + 1);
}, '');
}

walk(artifacts.CriticalRequestChains, 0);
walk(chains, 0);

return CriticalRequestChains.generateAuditResult({
rawValue: chainCount,
optimalValue: this.meta.optimalValue,
extendedInfo: {
formatter: Formatter.SUPPORTED_FORMATS.CRITICAL_REQUEST_CHAINS,
value: artifacts.CriticalRequestChains
}
return CriticalRequestChains.generateAuditResult({
rawValue: chainCount,
optimalValue: this.meta.optimalValue,
extendedInfo: {
formatter: Formatter.SUPPORTED_FORMATS.CRITICAL_REQUEST_CHAINS,
value: chains
}
});
});
}
}
Expand Down
82 changes: 43 additions & 39 deletions lighthouse-core/audits/estimated-input-latency.js
Original file line number Diff line number Diff line change
Expand Up @@ -35,56 +35,60 @@ class EstimatedInputLatency extends Audit {
name: 'estimated-input-latency',
description: 'Estimated Input Latency',
optimalValue: SCORING_POINT_OF_DIMINISHING_RETURNS.toLocaleString() + 'ms',
requiredArtifacts: ['traceContents', 'Speedline']
requiredArtifacts: ['traceContents']
};
}

static calculate(speedline, trace) {
// Use speedline's first paint as start of range for input latency check.
const startTime = speedline.first;

const tracingProcessor = new TracingProcessor();
const model = tracingProcessor.init(trace);
const latencyPercentiles = TracingProcessor.getRiskToResponsiveness(model, trace, startTime);

const ninetieth = latencyPercentiles.find(result => result.percentile === 0.9);
const rawValue = parseFloat(ninetieth.time.toFixed(1));

// Use the CDF of a log-normal distribution for scoring.
// 10th Percentile ≈ 58ms
// 25th Percentile ≈ 75ms
// Median = 100ms
// 75th Percentile ≈ 133ms
// 95th Percentile ≈ 199ms
const distribution = TracingProcessor.getLogNormalDistribution(SCORING_MEDIAN,
SCORING_POINT_OF_DIMINISHING_RETURNS);
let score = 100 * distribution.computeComplementaryPercentile(ninetieth.time);

return EstimatedInputLatency.generateAuditResult({
score: Math.round(score),
optimalValue: this.meta.optimalValue,
rawValue,
displayValue: `${rawValue}ms`,
extendedInfo: {
value: latencyPercentiles,
formatter: Formatter.SUPPORTED_FORMATS.ESTIMATED_INPUT_LATENCY
}
});
}

/**
* Audits the page to estimate input latency.
* @see https://github.com/GoogleChrome/lighthouse/issues/28
* @param {!Artifacts} artifacts The artifacts from the gather phase.
* @return {!AuditResult} The score from the audit, ranging from 0-100.
* @return {!Promise<!AuditResult>} The score from the audit, ranging from 0-100.
*/
static audit(artifacts) {
try {
// Use speedline's first paint as start of range for input latency check.
const startTime = artifacts.Speedline.first;

const trace = artifacts.traces[this.DEFAULT_TRACE] &&
artifacts.traces[this.DEFAULT_TRACE].traceEvents;
const tracingProcessor = new TracingProcessor();
const model = tracingProcessor.init(trace);
const latencyPercentiles = TracingProcessor.getRiskToResponsiveness(model, trace, startTime);

const ninetieth = latencyPercentiles.find(result => result.percentile === 0.9);
const rawValue = parseFloat(ninetieth.time.toFixed(1));
const trace = artifacts.traces[this.DEFAULT_TRACE];

// Use the CDF of a log-normal distribution for scoring.
// 10th Percentile ≈ 58ms
// 25th Percentile ≈ 75ms
// Median = 100ms
// 75th Percentile ≈ 133ms
// 95th Percentile ≈ 199ms
const distribution = TracingProcessor.getLogNormalDistribution(SCORING_MEDIAN,
SCORING_POINT_OF_DIMINISHING_RETURNS);
let score = 100 * distribution.computeComplementaryPercentile(ninetieth.time);

return EstimatedInputLatency.generateAuditResult({
score: Math.round(score),
optimalValue: this.meta.optimalValue,
rawValue,
displayValue: `${rawValue}ms`,
extendedInfo: {
value: latencyPercentiles,
formatter: Formatter.SUPPORTED_FORMATS.ESTIMATED_INPUT_LATENCY
}
});
} catch (err) {
return EstimatedInputLatency.generateAuditResult({
rawValue: -1,
debugString: 'Unable to parse trace contents: ' + err.message
return artifacts.requestSpeedline(trace)
.then(speedline => EstimatedInputLatency.calculate(speedline, trace))
.catch(err => {
return EstimatedInputLatency.generateAuditResult({
rawValue: -1,
debugString: 'Speedline unable to parse trace contents: ' + err.message
});
});
}
}
}

Expand Down
1 change: 0 additions & 1 deletion lighthouse-core/audits/first-meaningful-paint.js
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@ class FirstMeaningfulPaint extends Audit {
if (!traceContents || !Array.isArray(traceContents)) {
throw new Error(FAILURE_MESSAGE);
}

const evts = this.collectEvents(traceContents);

const navStart = evts.navigationStart;
Expand Down
34 changes: 21 additions & 13 deletions lighthouse-core/audits/screenshots.js
Original file line number Diff line number Diff line change
Expand Up @@ -29,30 +29,38 @@ class Screenshots extends Audit {
category: 'Performance',
name: 'screenshots',
description: 'Screenshots of all captured frames',
requiredArtifacts: ['ScreenshotFilmstrip']
requiredArtifacts: ['traceContents']
};
}

/**
* @param {!Artifacts} artifacts
* @return {!AuditResultInput}
* @return {!Promise<!AuditResult>}
*/
static audit(artifacts) {
const screenshots = artifacts.ScreenshotFilmstrip;

if (typeof screenshots === 'undefined') {
return Screenshots.generateAuditResult({
const trace = artifacts.traces[this.DEFAULT_TRACE];
if (typeof trace === 'undefined') {
return Promise.resolve(Screenshots.generateAuditResult({
rawValue: -1,
debugString: 'No screenshot artifact'
});
debugString: 'No trace found to generate screenshots'
}));
}

return Screenshots.generateAuditResult({
rawValue: screenshots.length || 0,
extendedInfo: {
formatter: Formatter.SUPPORTED_FORMATS.NULL,
value: screenshots
return artifacts.requestScreenshots(trace).then(screenshots => {
if (typeof screenshots === 'undefined') {
return Screenshots.generateAuditResult({
rawValue: -1,
debugString: 'No screenshot artifact'
});
}

return Screenshots.generateAuditResult({
rawValue: screenshots.length || 0,
extendedInfo: {
formatter: Formatter.SUPPORTED_FORMATS.NULL,
value: screenshots
}
});
});
}
}
Expand Down
44 changes: 24 additions & 20 deletions lighthouse-core/audits/speed-index-metric.js
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class SpeedIndexMetric extends Audit {
name: 'speed-index-metric',
description: 'Speed Index',
optimalValue: SCORING_POINT_OF_DIMINISHING_RETURNS.toLocaleString(),
requiredArtifacts: ['Speedline']
requiredArtifacts: ['traceContents']
};
}

Expand All @@ -47,36 +47,35 @@ class SpeedIndexMetric extends Audit {
* @return {!Promise<!AuditResult>} The score from the audit, ranging from 0-100.
*/
static audit(artifacts) {
return new Promise((resolve, reject) => {
const speedline = artifacts.Speedline;

// Speedline gather failed; pass on error condition.
if (speedline.debugString) {
return resolve(SpeedIndexMetric.generateAuditResult({
rawValue: -1,
debugString: speedline.debugString
}));
}
const trace = artifacts.traces[this.DEFAULT_TRACE];
if (typeof trace === 'undefined') {
return SpeedIndexMetric.generateAuditResult({
rawValue: -1,
debugString: 'No trace found to generate screenshots'
});
}

// run speedline
return artifacts.requestSpeedline(trace).then(speedline => {
if (speedline.frames.length === 0) {
return resolve(SpeedIndexMetric.generateAuditResult({
return SpeedIndexMetric.generateAuditResult({
rawValue: -1,
debugString: 'Trace unable to find visual progress frames.'
}));
});
}

if (speedline.frames.length < 3) {
return resolve(SpeedIndexMetric.generateAuditResult({
return SpeedIndexMetric.generateAuditResult({
rawValue: -1,
debugString: 'Trace unable to find sufficient frames to evaluate Speed Index.'
}));
});
}

if (speedline.speedIndex === 0) {
return resolve(SpeedIndexMetric.generateAuditResult({
return SpeedIndexMetric.generateAuditResult({
rawValue: -1,
debugString: 'Error in Speedline calculating Speed Index (speedIndex of 0).'
}));
});
}

// Use the CDF of a log-normal distribution for scoring.
Expand All @@ -86,7 +85,7 @@ class SpeedIndexMetric extends Audit {
// 75th Percentile = 8,820
// 95th Percentile = 17,400
const distribution = TracingProcessor.getLogNormalDistribution(SCORING_MEDIAN,
SCORING_POINT_OF_DIMINISHING_RETURNS);
SCORING_POINT_OF_DIMINISHING_RETURNS);
let score = 100 * distribution.computeComplementaryPercentile(speedline.speedIndex);

// Clamp the score to 0 <= x <= 100.
Expand All @@ -105,15 +104,20 @@ class SpeedIndexMetric extends Audit {
})
};

resolve(SpeedIndexMetric.generateAuditResult({
return SpeedIndexMetric.generateAuditResult({
score: Math.round(score),
rawValue: Math.round(speedline.speedIndex),
optimalValue: this.meta.optimalValue,
extendedInfo: {
formatter: Formatter.SUPPORTED_FORMATS.SPEEDLINE,
value: extendedInfo
}
}));
});
}).catch(err => {
return SpeedIndexMetric.generateAuditResult({
rawValue: -1,
debugString: err.message
});
});
}
}
Expand Down
26 changes: 16 additions & 10 deletions lighthouse-core/audits/time-to-interactive.js
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class TTIMetric extends Audit {
name: 'time-to-interactive',
description: 'Time To Interactive (alpha)',
optimalValue: SCORING_POINT_OF_DIMINISHING_RETURNS.toLocaleString(),
requiredArtifacts: ['traceContents', 'speedline']
requiredArtifacts: ['traceContents']
};
}

Expand All @@ -54,11 +54,17 @@ class TTIMetric extends Audit {
* will be changing in the future to a more accurate number.
*
* @param {!Artifacts} artifacts The artifacts from the gather phase.
* @return {!AuditResult} The score from the audit, ranging from 0-100.
* @return {!Promise<!AuditResult>} The score from the audit, ranging from 0-100.
*/
static audit(artifacts) {
const trace = artifacts.traces[Audit.DEFAULT_TRACE];
const pendingSpeedline = artifacts.requestSpeedline(trace);
const pendingFMP = FMPMetric.audit(artifacts);

// We start looking at Math.Max(FMPMetric, visProgress[0.85])
return FMPMetric.audit(artifacts).then(fmpResult => {
return Promise.all([pendingSpeedline, pendingFMP]).then(results => {
const speedline = results[0];
const fmpResult = results[1];
if (fmpResult.rawValue === -1) {
return generateError(fmpResult.debugString);
}
Expand All @@ -68,8 +74,8 @@ class TTIMetric extends Audit {

// Process the trace
const tracingProcessor = new TracingProcessor();
const traceContents = artifacts.traces[Audit.DEFAULT_TRACE].traceEvents;
const model = tracingProcessor.init(traceContents);
const trace = artifacts.traces[Audit.DEFAULT_TRACE];
const model = tracingProcessor.init(trace);
const endOfTraceTime = model.bounds.max;

// TODO: Wait for DOMContentLoadedEndEvent
Expand All @@ -81,8 +87,8 @@ class TTIMetric extends Audit {
// look at speedline results for 85% starting at FMP
let visuallyReadyTiming = 0;

if (artifacts.Speedline.frames) {
const eightyFivePctVC = artifacts.Speedline.frames.find(frame => {
if (speedline.frames) {
const eightyFivePctVC = speedline.frames.find(frame => {
return frame.getTimeStamp() >= fMPts && frame.getProgress() >= 85;
});

Expand Down Expand Up @@ -111,7 +117,7 @@ class TTIMetric extends Audit {
}
// Get our expected latency for the time window
const latencies = TracingProcessor.getRiskToResponsiveness(
model, traceContents, startTime, endTime, percentiles);
model, trace, startTime, endTime, percentiles);
const estLatency = latencies[0].time.toFixed(2);
foundLatencies.push({
estLatency: estLatency,
Expand Down Expand Up @@ -151,7 +157,7 @@ class TTIMetric extends Audit {
rawValue: timeToInteractive,
displayValue: `${timeToInteractive}ms`,
optimalValue: this.meta.optimalValue,
debugString: artifacts.Speedline.debugString,
debugString: speedline.debugString,
extendedInfo: {
value: extendedInfo,
formatter: Formatter.SUPPORTED_FORMATS.NULL
Expand All @@ -170,6 +176,6 @@ function generateError(err) {
value: -1,
rawValue: -1,
optimalValue: TTIMetric.meta.optimalValue,
debugString: err
debugString: err.message || err
});
}
Loading