Skip to content

[UI v2] Edit variables (#16035) #16744

[UI v2] Edit variables (#16035)

[UI v2] Edit variables (#16035) #16744

Workflow file for this run

name: Benchmarks
env:
PY_COLORS: 1
on:
pull_request:
paths:
- .github/workflows/benchmarks.yaml
- .github/workflows/python-tests.yaml
- "src/prefect/**/*.py"
- requirements.txt
- requirements-dev.txt
- setup.cfg
- Dockerfile
push:
branches:
- main
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
jobs:
run-benchmarks:
name: Benchmark
# Runs with ephemeral API and SQLite database right now
runs-on:
group: oss-larger-runners
timeout-minutes: 20
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
fetch-depth: 0
- name: Set up Docker Buildx
if: ${{ matrix.build-docker-images }}
uses: docker/setup-buildx-action@v3
with:
driver-opts: image=moby/buildkit:v0.12.5
- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: UV Cache
uses: actions/cache@v4
id: cache-uv
with:
path: ~/.cache/uv
key: uvcache-${{ runner.os }}-${{ steps.setup_python.outputs.python-version }}-${{ hashFiles('requirements-client.txt', 'requirements.txt', 'requirements-dev.txt') }}
- name: Install packages
run: |
python -m pip install -U uv
uv pip install --upgrade --system -e .[dev]
- name: Prepare benchmark comparisons
# Note: We use a "cache" instead of artifacts because artifacts are not available
# across workflow runs.
id: bench-cache
uses: actions/cache@v4
with:
path: ./.benchmarks
# Pushes benchmark results for this branch and sha, this will always be a cache miss
# and `restore-keys` will be used to get the benchmarks for comparison
key: ${{ runner.os }}-${{ github.head_ref || 'main' }}-${{ github.sha }}
# Pulls benchmark results for the base branch
restore-keys: |
${{ runner.os }}-${{ github.base_ref }}-
${{ runner.os }}-main-
- name: Start server
run: |
PREFECT_HOME=$(pwd) prefect server start&
PREFECT_API_URL="http://127.0.0.1:4200/api" ./scripts/wait-for-server.py
# TODO: Replace `wait-for-server` with dedicated command
# https://github.com/PrefectHQ/prefect/issues/6990
- name: Run benchmarks
env:
HEAD_REF: ${{ github.head_ref }}
GITHUB_SHA: ${{ github.sha }}
# Includes comparison to previous benchmarks if available
# Import benchmark is ignored because because we run those
# benchmarks via CodSpeed
run: |
if [[ -z "$HEAD_REF" ]]; then
# HEAD_REF is unset or empty, use 'main' with the SHA
uniquename="main-$GITHUB_SHA"
else
# HEAD_REF is set, use the branch name directly
uniquename="$HEAD_REF"
fi
# Allow alphanumeric, underscores, and dashes, and replace other
# characters with an underscore
sanitized_uniquename="${uniquename//[^a-zA-Z0-9_\-]/_}"
PREFECT_API_URL="http://127.0.0.1:4200/api" \
python -m benches \
--ignore=benches/bench_import.py \
--timeout=180 \
--benchmark-save="${sanitized_uniquename}" \
${{ steps.bench-cache.outputs.cache-hit && '--benchmark-compare' || '' }}