Skip to content

Commit

Permalink
Use to test llm-performance on spr-perf (#9316)
Browse files Browse the repository at this point in the history
* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update action.yml

* Create cpu-perf-test.yaml

* Update action.yml

* Update action.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml
  • Loading branch information
Zephyr596 authored Nov 3, 2023
1 parent 742f60a commit 9d24012
Show file tree
Hide file tree
Showing 2 changed files with 66 additions and 1 deletion.
49 changes: 48 additions & 1 deletion .github/workflows/llm_performance_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -137,4 +137,51 @@ jobs:
curl -T ./*.csv ${LLM_FTP_URL}/llm/ggml-actions/perf/
cp ./*.csv /mnt/disk1/nightly_perf/
cd ../../../test/benchmark
python csv_to_html.py -f /mnt/disk1/nightly_perf/
python csv_to_html.py -f ../../dev/benchmark/all-in-one
cp ./*.html /mnt/disk1/nightly_perf/
llm-performance-test-on-spr:
needs: llm-cpp-build
strategy:
fail-fast: false
matrix:
python-version: ["3.9"]
runs-on: [self-hosted, llm, spr-perf]
env:
OMP_NUM_THREADS: 16
THREAD_NUM: 16
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
steps:
- uses: actions/checkout@v3

- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}

- name: Install dependencies
shell: bash
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade wheel
python -m pip install --upgrade omegaconf
python -m pip install --upgrade pandas
python -m pip install --upgrade einops
- name: Download llm binary
uses: ./.github/actions/llm/download-llm-binary

- name: Run LLM install (all) test
uses: ./.github/actions/llm/setup-llm-env

- name: Test on cpu
shell: bash
run: |
mv python/llm/test/benchmark/cpu-perf-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml
cd python/llm/dev/benchmark/all-in-one
export http_proxy=${HTTP_PROXY}
export https_proxy=${HTTPS_PROXY}
python run.py
cp ./*.csv /mnt/disk1/nightly_perf_cpu/
cd ../../../test/benchmark
python csv_to_html.py -f /mnt/disk1/nightly_perf_cpu/
18 changes: 18 additions & 0 deletions python/llm/test/benchmark/cpu-perf-test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
repo_id:
- 'meta-llama/Llama-2-7b-chat-hf'
local_model_hub: '/mnt/disk1/models'
warm_up: 1
num_trials: 3
num_beams: 1 # default to greedy search
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
in_out_pairs:
- '32-32'
- '1024-128'
test_api:
- "transformer_int4"
# - "native_int4"
# - "optimize_model"
# - "pytorch_autocast_bf16"
# - "ipex_fp16_gpu" # on Intel GPU
# - "transformer_int4_gpu" # on Intel GPU
# - "optimize_model_gpu" # on Intel GPU

0 comments on commit 9d24012

Please sign in to comment.