Skip to content

Commit

Permalink
Merge pull request #2 from ludamad-test-org/report-diff-in-comment
Browse files Browse the repository at this point in the history
Report diff in comment
  • Loading branch information
ludamad authored Apr 4, 2024
2 parents e1bf992 + 130c860 commit cc829af
Show file tree
Hide file tree
Showing 14 changed files with 170 additions and 35 deletions.
2 changes: 1 addition & 1 deletion .github/ci-setup-action/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ runs:
id: cache-submodules
uses: actions/cache@v3
with:
path: .git
path: .git/modules
key: submodules-${{ hashFiles('.gitmodules') }}

- name: Checkout Submodules
Expand Down
66 changes: 58 additions & 8 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,34 @@ on:
workflow_dispatch: {}

jobs:
start-runner:
timeout-minutes: 5 # normally it only takes 1-2 minutes
name: Start self-hosted EC2 runner
runs-on: ubuntu-latest
permissions:
actions: write
steps:
- name: Start EC2 runner
id: start-ec2-runner
uses: AztecProtocol/ec2-action-builder@v11
with:
github_token: ${{ secrets.GH_SELF_HOSTED_RUNNER_TOKEN }}
aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws_region: "us-east-2"
ec2_instance_type: r6a.32xlarge
ec2_ami_id: ami-04d8422a9ba4de80f
ec2_subnet_id: subnet-4cfabd25
ec2_instance_tags: '[]'
github_action_runner_version: v2.315.0
ec2_security_group_id: sg-0ccd4e5df0dcca0c9
ec2_instance_ttl: 40 # 40 minutes to reap
ec2_spot_instance_strategy: BestEffort

# there's a lot of x86 tasks - let's split out the build step
build-x86:
runs-on: ubuntu-latest
needs: start-runner
runs-on: ${{ github.run_id }}
outputs:
e2e_list: ${{ steps.e2e_list.outputs.list }}
env:
Expand All @@ -17,6 +42,8 @@ jobs:
concurrency:
group: build-${{ github.ref_name == 'master' && github.run_id || github.ref_name }}-x86
cancel-in-progress: true
# permission check
if: github.event.pull_request.head.repo.full_name == github.repository || contains(github.event.pull_request.labels.*.name, 'external-ci')
steps:
- name: Checkout
uses: actions/checkout@v4
Expand Down Expand Up @@ -45,7 +72,8 @@ jobs:
# all the end-to-end integration tests for aztec
e2e-arm:
runs-on: ubuntu-latest
needs: start-runner
runs-on: ${{ github.run_id }}
env:
EARTHLY_TOKEN: ${{ secrets.EARTHLY_TOKEN }}
strategy:
Expand All @@ -54,9 +82,8 @@ jobs:
test:
- e2e-card-game
- e2e-crowdfunding-and-claim
# cancel if reran on same PR if exists, otherwise if on same commit
concurrency:
group: ${{ matrix.test }}-${{ github.ref_name == 'master' && github.run_id|| github.ref_name }}-arm
group: ${{ matrix.test }}-${{ github.ref_name == 'master' && github.run_id || github.ref_name }}-arm
cancel-in-progress: true
steps:
- name: Checkout
Expand All @@ -82,7 +109,7 @@ jobs:
# all the end-to-end integration tests for aztec
e2e-x86:
needs: build-x86
runs-on: ubuntu-latest
runs-on: ${{ github.run_id }}
env:
EARTHLY_TOKEN: ${{ secrets.EARTHLY_TOKEN }}
strategy:
Expand Down Expand Up @@ -120,9 +147,14 @@ jobs:
# we blank earthly token just to be sure this is a pure local run
EARTHLY_TOKEN="" earthly -P --no-output +${{ matrix.test }} --e2e_mode=cache
- name: Upload logs
run: |
BRANCH=${{ github.ref_name }} PULL_REQUEST=${{ github.event.number }} scripts/ci/upload_logs_to_s3 ./yarn-project/end-to-end/log
# barretenberg (prover) native tests
bb-native-tests:
runs-on: ubuntu-latest
needs: start-runner
runs-on: ${{ github.run_id }}
env:
EARTHLY_TOKEN: ${{ secrets.EARTHLY_TOKEN }}
strategy:
Expand Down Expand Up @@ -157,8 +189,9 @@ jobs:
# they should use parallelism within the benchmark, but only one thing should run at a time
# for accurate results
# We don't depend on 'build' as we use a different runner and will build components on the fist step that uses them.
bench:
runs-on: ubuntu-latest
bb-bench:
runs-on: ${{ github.run_id }}
needs: start-runner
env:
EARTHLY_TOKEN: ${{ secrets.EARTHLY_TOKEN }}
# cancel if reran on same PR if exists, otherwise if on same commit
Expand Down Expand Up @@ -200,3 +233,20 @@ jobs:
- name: Ultrahonk Bench
working-directory: ./barretenberg/cpp/
run: earthly-cloud bench x86 --no-output +bench-ultra-honk --bench_mode=cache

# # Post actions, deploy and summarize logs
# aztec-bench-summary:
# runs-on: ${{ github.run_id }}
# # IMPORTANT security flaw if we don't need 'check-run-condition'
# needs: e2e-x86
# concurrency:
# group: aztec-bench-summary-${{ github.ref_name == 'master' && github.run_id || github.ref_name }}-x86
# cancel-in-progress: true
# steps:
# - name: Checkout
# uses: actions/checkout@v4
# with:
# ref: ${{ github.event.pull_request.head.sha }}

# - name: "Assemble benchmark summary from uploaded logs"
# command: ./scripts/ci/assemble_e2e_benchmark_earthly.sh
2 changes: 1 addition & 1 deletion .github/workflows/protocol-circuits-gate-diff.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ on:
push:
branches:
- master
pull_request:
pull_request: {}

jobs:
compare_protocol_circuits_gates:
Expand Down
3 changes: 2 additions & 1 deletion .vscode/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -160,5 +160,6 @@
"noir/noir-repo/Cargo.toml",
"noir/noir-repo/acvm-repo/acvm_js/Cargo.toml",
"avm-transpiler/Cargo.toml"
]
],
"cmake.sourceDirectory": "/mnt/user-data/adam/aztec-packages/barretenberg/cpp"
}
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ noirup -v TAG_FROM_THE_FILE

This repository uses CircleCI for continuous integration. Build steps are managed using [`build-system`](https://github.com/AztecProtocol/build-system). Small packages are built and tested as part of a docker build operation, while larger ones and end-to-end tests spin up a large AWS spot instance. Each successful build step creates a new docker image that gets tagged with the package name and commit.

All packages need to be included in the [build manifest](`build_manifest.json`), which declares what paths belong to each package, as well as dependencies between packages. When the CI runs, if none of the rebuild patterns or dependencies were changed, then the build step is skipped and the last successful image is re-tagged with the current commit. Read more on the [`build-system`](https://github.com/AztecProtocol/build-system) repository README.
All packages need to be included in the [build manifest](`build_manifest.yml`), which declares what paths belong to each package, as well as dependencies between packages. When the CI runs, if none of the rebuild patterns or dependencies were changed, then the build step is skipped and the last successful image is re-tagged with the current commit. Read more on the [`build-system`](https://github.com/AztecProtocol/build-system) repository README.

It is faster to debug CI failures within a persistent ssh session compared to pushing and waiting. You can create a session with "Rerun step with SSH" on CircleCI which will generate an ssh command for debugging on a worker. Run that command locally and then do

Expand Down
5 changes: 1 addition & 4 deletions noir-projects/bootstrap.sh
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,4 @@ else
for job in $(jobs -p); do
wait $job || exit 1
done
fi



fi
15 changes: 10 additions & 5 deletions scripts/ci/assemble_e2e_benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,19 @@ BUCKET_NAME="aztec-ci-artifacts"
LOG_FOLDER="${LOG_FOLDER:-log}"
BENCH_FOLDER="${BENCH_FOLDER:-bench}"
COMMIT_HASH="${COMMIT_HASH:-$(git rev-parse HEAD)}"
BASE_COMMIT_HASH=""
BASE_BENCH_PATH=""
BENCHMARK_FILE_JSON="${BENCH_FOLDER}/benchmark.json"
BASE_BENCHMARK_FILE_JSON="${BENCH_FOLDER}/base-benchmark.json"

# Paths from build-system/scripts/upload_logs_to_s3
if [ "${CIRCLE_BRANCH:-}" = "master" ]; then
LOG_SOURCE_FOLDER="logs-v1/master/$COMMIT_HASH"
BARRETENBERG_BENCH_SOURCE_FOLDER="barretenberg-bench-v1/master/$COMMIT_HASH"
BENCHMARK_TARGET_FILE="benchmarks-v1/master/$COMMIT_HASH.json"
BENCHMARK_LATEST_FILE="benchmarks-v1/latest.json"
elif [ -n "${CIRCLE_PULL_REQUEST:-}" ]; then
LOG_SOURCE_FOLDER="logs-v1/pulls/${CIRCLE_PULL_REQUEST##*/}"
BARRETENBERG_BENCH_SOURCE_FOLDER="barretenberg-bench-v1/pulls/${CIRCLE_PULL_REQUEST##*/}"
BENCHMARK_TARGET_FILE="benchmarks-v1/pulls/${CIRCLE_PULL_REQUEST##*/}.json"
elif [ -n "${CIRCLE_TAG:-}" ]; then
echo "Skipping benchmark run for ${CIRCLE_TAG} tagged release."
Expand All @@ -44,9 +46,12 @@ EXPECTED_LOGS_COUNT=$(find yarn-project/end-to-end/src -type f -name "bench*.tes
DOWNLOADED_LOGS_COUNT=$(find $LOG_FOLDER -type f -name "*.jsonl" | wc -l)
if [ "$DOWNLOADED_LOGS_COUNT" -lt "$EXPECTED_LOGS_COUNT" ]; then
echo Found $DOWNLOADED_LOGS_COUNT out of $EXPECTED_LOGS_COUNT benchmark log files in s3://${BUCKET_NAME}/${LOG_SOURCE_FOLDER}/. Exiting.
exit 0
exit 1
fi

# Download barretenberg log files, these are direct benchmarks and separate from the above
aws s3 cp "s3://${BUCKET_NAME}/${BARRETENBERG_BENCH_SOURCE_FOLDER}/" $LOG_FOLDER --exclude '*' --include '*_bench.json' --recursive

# Generate the aggregated benchmark file
mkdir -p $BENCH_FOLDER
CONTAINER_BENCH_FOLDER="/usr/src/yarn-project/bench"
Expand All @@ -56,7 +61,7 @@ export DOCKER_RUN_OPTS="\
-e BENCH_FOLDER=${CONTAINER_BENCH_FOLDER} \
-v $(realpath $LOG_FOLDER):${CONTAINER_LOG_FOLDER}:rw \
-e LOG_FOLDER=${CONTAINER_LOG_FOLDER} \
-e BASE_COMMIT_HASH \
-e BASE_BENCH_PATH \
-e AZTEC_BOT_COMMENTER_GITHUB_TOKEN \
-e CIRCLE_PULL_REQUEST"
yarn-project/scripts/run_script.sh workspace @aztec/scripts bench-aggregate
Expand All @@ -82,13 +87,13 @@ if [ -n "${CIRCLE_PULL_REQUEST:-}" ]; then
aws s3 cp "s3://${BUCKET_NAME}/benchmarks-v1/master/$commit_hash.json" $BASE_BENCHMARK_FILE_JSON
if [ $? -eq 0 ]; then
echo "Downloaded base data from commit $commit_hash"
export BASE_COMMIT_HASH=$commit_hash
export BASE_BENCH_PATH=master/$commit_hash
break;
fi
done
set -e

if [ -z "${BASE_COMMIT_HASH:-}" ]; then
if [ -z "${BASE_BENCH_PATH:-}" ]; then
echo "No base commit data found"
fi

Expand Down
32 changes: 32 additions & 0 deletions scripts/ci/upload_logs_to_s3
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
#!/usr/bin/env bash

[ -n "${BUILD_SYSTEM_DEBUG:-}" ] && set -x # conditionally trace

# Uploads to S3 the contents of the log file mounted on the end-to-end container,
# which contains log entries with an associated event and metrics for it.
# Logs are uploaded to aztec-ci-artifacts/logs-v1/master/$COMMIT/$JOB.jsonl
# or to aztec-ci-artifacts/logs-v1/pulls/$PRNUMBER/$JOB.jsonl if on a PR

set -eu

LOG_FOLDER=$1
BUCKET_NAME="aztec-ci-artifacts"
COMMIT_HASH="${COMMIT_HASH:-$(git rev-parse HEAD)}"

if [ ! -d "$LOG_FOLDER" ] || [ -z "$(ls -A "$LOG_FOLDER")" ]; then
echo "No logs in folder $LOG_FOLDER to upload"
exit 0
fi

# Paths used in scripts/ci/assemble_e2e_benchmark.sh
if [ "${BRANCH:-}" = "master" ]; then
TARGET_FOLDER="logs-v1/master/$COMMIT_HASH/"
elif [ -n "${PULL_REQUEST:-}" ]; then
TARGET_FOLDER="logs-v1/pulls/${PULL_REQUEST##*/}"
fi

if [ -n "${TARGET_FOLDER:-}" ]; then
aws s3 cp $LOG_FOLDER "s3://${BUCKET_NAME}/${TARGET_FOLDER}" --include "*.jsonl" --recursive
else
echo Skipping upload since no target folder was defined
fi
4 changes: 2 additions & 2 deletions yarn-project/Earthfile
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ build:
SAVE ARTIFACT /usr/src

# TODO versioning flow at end before publish?
# ENV COMMIT_TAG=$EARTHLY_BUILD_SHA
# ENV COMMIT_TAG=$EARTHLY_GIT_HASH
# RUN ./scripts/version_packages.sh

# run:
Expand Down Expand Up @@ -86,4 +86,4 @@ build-end-to-end:
FROM +end-to-end-minimal
SAVE IMAGE --push aztecprotocol/end-to-end-cache:$EARTHLY_GIT_HASH
FROM +aztec
SAVE IMAGE --push aztecprotocol/aztec-cache:$EARTHLY_GIT_HASH
SAVE IMAGE --push aztecprotocol/aztec-cache:$EARTHLY_GIT_HASH
15 changes: 14 additions & 1 deletion yarn-project/circuit-types/src/stats/metrics.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@ export type MetricGroupBy =
| 'classes-registered'
| 'leaf-count'
| 'data-writes'
| 'fee-payment-method';
| 'fee-payment-method'
| 'circuit-size-in-gates';

/** Definition of a metric to track in benchmarks. */
export interface Metric {
Expand All @@ -24,6 +25,18 @@ export interface Metric {

/** Metric definitions to track from benchmarks. */
export const Metrics = [
{
name: 'client_ivc_proving_time_in_ms',
groupBy: 'circuit-size-in-gates',
description: 'Proving time for ClientIVC grouped by circuit size.',
events: [],
},
{
name: 'ultrahonk_proving_time_in_ms',
groupBy: 'circuit-size-in-gates',
description: 'Proving time for UltraHonk grouped by circuit size.',
events: [],
},
{
name: 'l1_rollup_calldata_size_in_bytes',
groupBy: 'block-size',
Expand Down
12 changes: 6 additions & 6 deletions yarn-project/end-to-end/Earthfile
Original file line number Diff line number Diff line change
Expand Up @@ -259,11 +259,11 @@ bench-publish-rollup:
DO +E2E_TEST --test=benchmarks/bench_publish_rollup.test.ts --debug="aztec:benchmarks:*,aztec:sequencer,aztec:sequencer:*,aztec:world_state,aztec:merkle_trees" --e2e_mode=$e2e_mode --compose_file=./scripts/docker-compose-no-sandbox.yml

# TODO need to investigate why this isn't working
# bench-process-history:
# ARG e2e_mode=local
# DO +E2E_TEST --test=benchmarks/bench_process_history.test.ts --debug="aztec:benchmarks:*,aztec:sequencer,aztec:sequencer:*,aztec:world_state,aztec:merkle_trees" --e2e_mode=$e2e_mode --compose_file=./scripts/docker-compose-no-sandbox.yml
bench-process-history:
ARG e2e_mode=local
DO +E2E_TEST --test=benchmarks/bench_process_history.test.ts --debug="aztec:benchmarks:*,aztec:sequencer,aztec:sequencer:*,aztec:world_state,aztec:merkle_trees" --e2e_mode=$e2e_mode --compose_file=./scripts/docker-compose-no-sandbox.yml

# TODO need to investigate why this isn't working
# bench-tx-size:
# ARG e2e_mode=local
# DO +E2E_TEST --test=benchmarks/bench_tx_size_fees.test.ts --debug="aztec:benchmarks:*,aztec:sequencer,aztec:sequencer:*,aztec:world_state,aztec:merkle_trees" --e2e_mode=$e2e_mode --compose_file=./scripts/docker-compose-no-sandbox.yml
bench-tx-size:
ARG e2e_mode=local
DO +E2E_TEST --test=benchmarks/bench_tx_size_fees.test.ts --debug="aztec:benchmarks:*,aztec:sequencer,aztec:sequencer:*,aztec:world_state,aztec:merkle_trees" --e2e_mode=$e2e_mode --compose_file=./scripts/docker-compose-no-sandbox.yml
1 change: 0 additions & 1 deletion yarn-project/end-to-end/scripts/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@ services:
yarn test ${TEST:-./src/e2e_deploy_contract.test.ts}
'
volumes:
# TODO(AD) currently earthly uses /build instead of /usr/src
- ../log:/usr/src/yarn-project/end-to-end/log:rw
depends_on:
- sandbox
Expand Down
34 changes: 33 additions & 1 deletion yarn-project/scripts/src/benchmarks/aggregate.ts
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,15 @@ function processEntry(entry: Stats, results: BenchmarkCollectedResults, fileName
}
}

function getBarretenbergMetric(context: { executable?: string }): MetricName | undefined {
if (context.executable?.includes('ultra_honk')) {
return 'ultrahonk_proving_time_in_ms';
} else if (context.executable?.includes('client_ivc')) {
return 'client_ivc_proving_time_in_ms';
}
return undefined;
}

/** Array of collected raw results for a given metric. */
type BenchmarkCollectedMetricResults = Record<string, number[]>;

Expand All @@ -253,7 +262,8 @@ export async function main() {
}
}

log(`Collected entries: ${JSON.stringify(collected)}`);
// Spammy if on by default
// log(`Collected entries: ${JSON.stringify(collected)}`);

// For each bucket of each metric compute the average all collected data points
const results: BenchmarkResults = {};
Expand All @@ -269,6 +279,28 @@ export async function main() {
}
}

// Add google benchmark json files, which have data already averaged
const googleBenchmarkFiles = fs.readdirSync(LogsDir).filter(f => f.endsWith('.json'));
for (const file of googleBenchmarkFiles) {
const data = JSON.parse(fs.readFileSync(path.join(LogsDir, file), 'utf-8'));
if (!data.context || !data.benchmarks) {
log(`Invalid google benchmark file: ${file}`);
continue;
}
const metric = getBarretenbergMetric(data.context);
if (!metric) {
log(`Unknown executable in benchmark file ${file}: ${data.context.executable}`);
continue;
}
const circuitSize = '2^20'; // Where to load size from?
const value = data.benchmarks[0]?.real_time;
if (value === undefined) {
log(`Couldn't find real_time in benchmark file ${file}`);
continue;
}
results[metric] = { [circuitSize]: value };
}

const timestampedResults: BenchmarkResultsWithTimestamp = { ...results, timestamp: new Date().toISOString() };

// Write results to disk
Expand Down
Loading

0 comments on commit cc829af

Please sign in to comment.