This repository has been archived by the owner on Oct 11, 2024. It is now read-only.
nm benchmark #36
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: benchmark | |
on: | |
# makes workflow reusable | |
workflow_call: | |
inputs: | |
label: | |
description: "requested runner label (specifies instance)" | |
type: string | |
required: true | |
benchmark_config_list_file: | |
description: "path to a file containing a list of benchmark-configs to run benchmarks with. For reference look at .github/data/nm_benchmark_configs_list.txt" | |
type: string | |
required: true | |
timeout: | |
description: "maximum time runner will be up" | |
type: string | |
required: true | |
gitref: | |
description: "git commit hash or branch name" | |
type: string | |
required: true | |
Gi_per_thread: | |
description: 'requested GiB to reserve per thread' | |
type: string | |
required: true | |
python: | |
description: "python version, e.g. 3.10.12" | |
type: string | |
required: true | |
push_results_to_gh_pages: | |
description: "When set to true, the workflow pushes all benchmarking results to gh-pages UI" | |
type: string | |
required: true | |
# makes workflow manually callable | |
workflow_dispatch: | |
inputs: | |
label: | |
description: "requested runner label (specifies instance)" | |
type: string | |
required: true | |
benchmark_config_list_file: | |
description: "path to a file containing a list of benchmark-configs to run benchmarks with. For reference look at .github/data/nm_benchmark_configs_list.txt" | |
type: string | |
required: true | |
timeout: | |
description: "maximum time runner will be up" | |
type: string | |
required: true | |
gitref: | |
description: "git commit hash or branch name" | |
type: string | |
required: true | |
Gi_per_thread: | |
description: 'requested GiB to reserve per thread' | |
type: string | |
required: true | |
python: | |
description: "python version, e.g. 3.10.12" | |
type: string | |
required: true | |
push_results_to_gh_pages: | |
description: "When set to true, the workflow pushes all benchmarking results to gh-pages UI" | |
type: choice | |
options: | |
- 'true' | |
- 'false' | |
default: 'false' | |
jobs: | |
BENCHMARK: | |
runs-on: ${{ inputs.label }} | |
timeout-minutes: ${{ fromJSON(inputs.timeout) }} | |
steps: | |
- name: sanity check | |
shell: bash | |
run: | | |
echo "event name is:" ${{ github.event_name }} | |
echo "event type is:" ${{ github.event.action }} | |
echo "push result to gh pages : " ${{ inputs.push_results_to_gh_pages }} | |
- name: run push | |
if: ${{ inputs.push_results_to_gh_pages == 'true' }} | |
run: | | |
echo "evaled to true : Okay ! I am pushing to gh pages now" | |
- name: stay put | |
if: ${{ inputs.push_results_to_gh_pages == 'false' }} | |
run: | | |
echo "evaled to false : not pushing" | |
#- name: checkout repository code | |
# uses: actions/checkout@v4 | |
# with: | |
# fetch-depth: 0 | |
# ref: ${{ inputs.gitref }} | |
# submodules: recursive | |
#- name: setenv | |
# id: setenv | |
# uses: ./.github/actions/nm-set-env/ | |
# with: | |
# hf_token: ${{ secrets.NM_HF_TOKEN }} | |
# Gi_per_thread: ${{ inputs.Gi_per_thread }} | |
#- name: set python | |
# id: set_python | |
# uses: ./.github/actions/nm-set-python/ | |
# with: | |
# python: ${{ inputs.python }} | |
# venv: TEST | |
#- name: hf cache | |
# id: hf_cache | |
# uses: ./.github/actions/nm-hf-cache/ | |
# with: | |
# fs_cache: ${{ secrets.HF_FS_CACHE }} | |
#- name: build | |
# id: build | |
# uses: ./.github/actions/nm-build-vllm/ | |
# with: | |
# Gi_per_thread: ${{ inputs.Gi_per_thread }} | |
# python: ${{ inputs.python }} | |
# venv: TEST | |
# pypi: ${{ secrets.NM_PRIVATE_PYPI_LOCATION }} | |
#- name: run benchmarks | |
# uses: ./.github/actions/nm-benchmark/ | |
# with: | |
# benchmark_config_list_file: ${{ inputs.benchmark_config_list_file }} | |
# output_directory: benchmark-results | |
# python: ${{ inputs.python }} | |
# venv: TEST | |
#- name: store benchmark result artifacts | |
# uses: actions/upload-artifact@v4 | |
# if: success() || failure() | |
# with: | |
# name: ${{ github.run_id }}-${{ inputs.label }} | |
# path: benchmark-results | |
# retention-days: 10 |