Skip to content

Commit

Permalink
feat: compiler benchmarks and gh action (#3503)
Browse files Browse the repository at this point in the history
Adds a private package @jsii/benchmarks which includes a basic command line application for running jsii benchmarks.

Adds a custom benchmark runner leveraging the NodeJS `perf_hooks` module to time function calls across multiple iterations and return averaged results.

Adds github action workflows to run benchmarks on PRs and compare performance to the target branch. This action will fail when a test suite is slower by 200%, but this threshold can be configured.

Adds to the gh-pages action workflow to run benchmarks and append new results to a file that is displayed as a graph on our docs site at the `/dev/bench` url. This will show benchmark suite results over time and allow us to track overall change in compiler performance across multiple commits.

Example of benchmark results comment MrArnoldPalmer#417 (comment)
Example of benchmark results graph (but only 1 result cause just testing) https://mrarnoldpalmer.github.io/jsii/dev/bench/

---

By submitting this pull request, I confirm that my contribution is made under the terms of the [Apache 2.0 license].

[Apache 2.0 license]: https://www.apache.org/licenses/LICENSE-2.0
  • Loading branch information
MrArnoldPalmer authored May 4, 2022
1 parent f1a56e3 commit 4a91cf0
Show file tree
Hide file tree
Showing 16 changed files with 567 additions and 2 deletions.
1 change: 1 addition & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,5 @@
# Github Linguist configuration (https://github.com/github/linguist)
yarn.lock linguist-generated
*.snap linguist-generated
packages/@jsii/benchmarks/fixtures/** linguist-vendored
docs/** linguist-documentation
2 changes: 1 addition & 1 deletion .github/workflows/gh-pages.yml
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ jobs:
git config user.email "[email protected]"
- name: Prepare Commit
run: |-
rsync --delete --exclude=.git --recursive ${{ runner.temp }}/site/ ./
rsync --delete --exclude=.git --exclude=dev --recursive ${{ runner.temp }}/site/ ./
touch .nojekyll
git add .
git diff --cached --exit-code >/dev/null || (
Expand Down
48 changes: 48 additions & 0 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -365,3 +365,51 @@ jobs:
&& echo "Untracked files: ${untracked:-<none>}" \
&& test -z "${untracked}"
shell: bash
benchmark:
name: Run benchmark suite
runs-on: ubuntu-latest
permissions:
contents: write
needs: build
steps:
# Check out the code
- name: Download Artifact
uses: actions/download-artifact@v3
with:
name: built-tree
- name: Extract Artifact
run: |-
echo "::group::Untar Archive"
tar zxvf built-tree.tgz
echo "::endgroup"
rm built-tree.tgz
- name: Set up Node
uses: actions/setup-node@v3
with:
cache: yarn
node-version: '14'
- name: Install Dependencies
run: yarn install --frozen-lockfile
- name: Run Benchmark
working-directory: packages/@jsii/benchmarks
run: yarn bench --output ${{ runner.temp }}/bench-output.json
- name: Compare Benchmark Results
if: github.event_name == 'pull_request'
uses: benchmark-action/github-action-benchmark@v1
with:
name: jsii Benchmark Regression
tool: 'customSmallerIsBetter'
output-file-path: ${{ runner.temp }}/bench-output.json
comment-always: true
github-token: ${{ secrets.GITHUB_TOKEN }}
fail-on-alert: true
- name: Upload Benchmark Results
if: github.event_name == 'push'
uses: benchmark-action/github-action-benchmark@v1
with:
name: jsii Benchmark
tool: 'customSmallerIsBetter'
output-file-path: ${{ runner.temp }}/bench-output.json
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: true
9 changes: 9 additions & 0 deletions packages/@jsii/benchmarks/.eslintrc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
---
extends: ../../../eslint-config.yaml
ignorePatterns:
- fixtures

rules:
'import/no-extraneous-dependencies':
- error
- devDependencies: ['**/scripts/**']
3 changes: 3 additions & 0 deletions packages/@jsii/benchmarks/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
output.txt
*.d.ts
*.js
25 changes: 25 additions & 0 deletions packages/@jsii/benchmarks/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# jsii Benchmarks

This package is meant to collect benchmarks for `jsii`, `jsii-pacmak`, and any other jsii packages sourced in TS. It
contains a basic benchmark runner in [`benchmark.ts`](lib/benchmark.ts) that uses the `perf_hooks` module in order to
time synchronous functions.

## Usage

There is a small CLI app wrapping calls to the benchmarks defined. To call the benchmarks:

```
yarn benchmark
```

To output benchmark run results to a json file, pass the `--output` option

```
yarn benchmark --output my-file.json
```

## Output Format

The output format is JSON and is used by the
[continous benchmark action](https://github.com/benchmark-action/github-action-benchmark) which tracks the results of
benchmarks over time.
86 changes: 86 additions & 0 deletions packages/@jsii/benchmarks/bin/benchmark.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
import * as fs from 'fs-extra';
import * as yargs from 'yargs';

import { benchmarks } from '../lib';
import { Benchmark } from '../lib/benchmark';

/**
* Format of benchmark output used by continous benchmarking action.
* See [documentation](https://github.com/benchmark-action/github-action-benchmark/blob/master/README.md) for details
*/
interface ResultsJson {
/**
* The name of the benchmark
*/
name: string;

/**
* The unit of measure, usually seconds
*/
unit: string;

/**
* The result of the measurement, usually an average over x iterations
*/
value: number;

/**
* The variance of all runs
*/
range: number;

/**
* Extra information about the benchmark, displayed in a tooltip
*/
extra: string;
}

(async () => {
/* eslint-disable-next-line @typescript-eslint/await-thenable */
const argv = await yargs
.command('$0', 'Runs jsii benchmark tests and displays results', (argv) =>
argv.option('output', {
type: 'string',
desc: 'location of benchmark results json file, does not output to file if not specified.',
}),
)
.help().argv;

// Run list of benchmarks in sequence
const resultsJson: ResultsJson[] = await benchmarks.reduce(
async (
accum: Promise<ResultsJson[]>,
benchmark: Benchmark<any>,
): Promise<ResultsJson[]> => {
const prev = await accum;
const result = await benchmark.run();
const extra = `${result.name} averaged ${result.average} milliseconds over ${result.iterations.length} runs`;
console.log(extra);
return [
...prev,
{
name: result.name,
unit: 'milliseconds',
value: result.average,
range: result.variance,
extra,
},
];
},
Promise.resolve([]),
);

if (argv.output) {
await fs.writeJson(argv.output, resultsJson, { spaces: 2 });
console.log(`results written to ${argv.output}`);
}

return resultsJson;
})()
.then((results) => {
console.log(`successfully completed ${results.length} benchmarks`);
})
.catch((e) => {
console.error(`Error: ${e.stack}`);
process.exitCode = -1;
});
Binary file not shown.
158 changes: 158 additions & 0 deletions packages/@jsii/benchmarks/lib/benchmark.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
import { performance, PerformanceObserver, PerformanceEntry } from 'perf_hooks';

/**
* Result of a benchmark run
*/
interface Result {
/**
* The name of the benchmark
*/
readonly name: string;

/**
* The average duration across all iterations
*/
readonly average: number;

/**
* Maximum duration across all iteraions
*/
readonly max: number;

/**
* Minimum duration across all iterations
*/
readonly min: number;

/**
* max - min
*/
readonly variance: number;

/**
* Results of individual runs
*/
readonly iterations: readonly PerformanceEntry[];
}

/**
* A simple benchmark for measuring synchronous functions. Uses the `perf_hooks`
* module to measure how long a subject takes to execute and averages the result
* over all runs. Runs `setup`, `beforeEach`, `afterEach`, and `teardown`
* lifecycle hooks before, between, and after runs. These functions, and the
* subject function, have access to an optionally defined `context` object that
* can be returned from the `setup` function. This allows referencing shared
* state across benchmark runs and lifecycle hooks to do things like setup,
* teardown, stubbing, etc.
*/
export class Benchmark<C> {
/**
* How many times to run the subject
*/
#iterations = 5;

/**
* Results of individual runs
*/
#results: PerformanceEntry[] = [];

public constructor(private readonly name: string) {}
#setup: () => C | Promise<C> = () => ({} as C);
#subject: (ctx: C) => void = () => undefined;
#beforeEach: (ctx: C) => void = () => undefined;
#afterEach: (ctx: C) => void = () => undefined;
#teardown: (ctx: C) => void = () => undefined;

/**
* Create a setup function to be run once before the benchmark, optionally
* return a context object to be used across runs and lifecycle functions.
*/
public setup<T extends C>(fn: () => T | Promise<T>) {
this.#setup = fn;
return this as unknown as Benchmark<T>;
}

/**
* Create a teardown function to be run once after all benchmark runs. Use to
* clean up your mess.
*/
public teardown(fn: (ctx: C) => void) {
this.#teardown = fn;
return this;
}

/**
* Create a beforeEach function to be run before each iteration. Use to reset
* state the subject may have changed.
*/
public beforeEach(fn: (ctx: C) => void) {
this.#beforeEach = fn;
return this;
}

/**
* Create an afterEach function to be run after each iteration. Use to reset
* state the subject may have changed.
*/
public afterEach(fn: (ctx: C) => void) {
this.#afterEach = fn;
return this;
}

/**
* Setup the subject to be measured.
*/
public subject(fn: (ctx: C) => void) {
this.#subject = fn;
return this;
}

/**
* Set the number of iterations to be run.
*/
public iterations(i: number) {
this.#iterations = i;
return this;
}

/**
* Run and measure the benchmark
*/
public async run(): Promise<Result> {
const c = await this.#setup?.();
return new Promise((ok) => {
const wrapped = performance.timerify(this.#subject);
const obs = new PerformanceObserver((list, observer) => {
this.#results = list.getEntries();
performance.clearMarks();
observer.disconnect();
const durations = this.#results.map((i) => i.duration);
const max = Math.max(...durations);
const min = Math.min(...durations);
const variance = max - min;

return ok({
name: this.name,
average:
durations.reduce((accum, duration) => accum + duration, 0) /
durations.length,
max,
min,
variance,
iterations: this.#results,
});
});
obs.observe({ entryTypes: ['function'] });

try {
for (let i = 0; i < this.#iterations; i++) {
this.#beforeEach(c);
wrapped(c);
this.#afterEach(c);
}
} finally {
this.#teardown(c);
}
});
}
}
9 changes: 9 additions & 0 deletions packages/@jsii/benchmarks/lib/constants.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import * as path from 'path';

export const fixturesDir = path.resolve(__dirname, '..', 'fixtures');

export const cdkTagv2_21_1 = 'v2.21.1';
export const cdkv2_21_1 = path.resolve(
fixturesDir,
`aws-cdk-lib@${cdkTagv2_21_1.replace(/\./g, '-')}.tgz`,
);
Loading

0 comments on commit 4a91cf0

Please sign in to comment.