diff --git a/.github/workflows/llm_performance_tests.yml b/.github/workflows/llm_performance_tests.yml index 9185e4282d5..2f6de2d5afe 100644 --- a/.github/workflows/llm_performance_tests.yml +++ b/.github/workflows/llm_performance_tests.yml @@ -714,6 +714,33 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for transformers 4.43 (32-32 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_443.yaml + + - name: Test on igpu for transformers 4.43 (32-32 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.43.1 + pip install trl + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\32-32_int4_fp16_443.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\32-32_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 if %ERRORLEVEL% neq -1073740791 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 + if %ERRORLEVEL% neq 0 (exit /b 1) + + pip uninstall trl + call conda deactivate + - name: Concat csv and generate html (32-32 int4+fp16) shell: cmd run: | @@ -737,7 +764,7 @@ jobs: shell: bash run: | sed -i 's/32-32/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test4/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml - name: Test on igpu (1024-128 int4+fp16) @@ -811,6 +838,33 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for transformers 4.43 (1024-128 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_443.yaml + + - name: Test on igpu for transformers 4.43 (1024-128 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.43.1 + pip install trl + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_443.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 + if %ERRORLEVEL% neq 0 (exit /b 1) + + pip uninstall trl + call conda deactivate + - name: Concat csv and generate html (1024-128 int4+fp16) shell: cmd run: | @@ -833,7 +887,7 @@ jobs: shell: bash run: | sed -i 's/1024-128/2048-256/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test4/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml - name: Test on igpu (2048-256 int4+fp16) @@ -907,6 +961,33 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for transformers 4.43 (2048-256 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_443.yaml + + - name: Test on igpu for transformers 4.43 (2048-256 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.43.1 + pip install trl + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\2048-256_int4_fp16_443.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\2048-256_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 + if %ERRORLEVEL% neq 0 (exit /b 1) + + pip uninstall trl + call conda deactivate + - name: Concat csv and generate html (2048-256 int4+fp16) shell: cmd run: | @@ -929,7 +1010,7 @@ jobs: shell: bash run: | sed -i 's/2048-256/3072-384/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test4/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml - name: Test on igpu (3072-384 int4+fp16) @@ -1003,6 +1084,33 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for transformers 4.43 (3072-384 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_443.yaml + + - name: Test on igpu for transformers 4.43 (3072-384 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.43.1 + pip install trl + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\3072-384_int4_fp16_438.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\3072-384_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 + if %ERRORLEVEL% neq 0 (exit /b 1) + + pip uninstall trl + call conda deactivate + - name: Concat csv and generate html (3072-384 int4+fp16) shell: cmd run: | @@ -1025,7 +1133,7 @@ jobs: shell: bash run: | sed -i 's/3072-384/4096-512/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test4/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml - name: Test on igpu (4096-512 int4+fp16) @@ -1099,6 +1207,36 @@ jobs: call conda deactivate + # NOTE: Gemma2 not working for 4096-512. + # When it works, uncomment this section and remember to change "'s/{today}_test3/{today}_test1/g'" in next section. + + #- name: Prepare igpu perf test for transformers 4.43 (4096-512 int4+fp16) + # shell: bash + # run: | + # sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py + # sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_443.yaml + + #- name: Test on igpu for transformers 4.43 (4096-512 int4+fp16) + # shell: cmd + # run: | + # call conda activate igpu-perf + # pip install transformers==4.43.1 + # pip install trl + # + # set SYCL_CACHE_PERSISTENT=1 + # set BIGDL_LLM_XMX_DISABLED=1 + # + # cd python\llm\dev\benchmark\all-in-one + # move ..\..\..\test\benchmark\igpu-perf\4096-512_int4_fp16_443.yaml config.yaml + # set PYTHONIOENCODING=utf-8 + # python run.py >> %CSV_SAVE_PATH%\4096-512_int4_fp16\log\%LOG_FILE% 2>&1 + # if %ERRORLEVEL% neq 0 (exit /b 1) + # python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 + # if %ERRORLEVEL% neq 0 (exit /b 1) + # + # pip uninstall trl + # call conda deactivate + - name: Concat csv and generate html (4096-512 int4+fp16) shell: cmd run: | @@ -1195,6 +1333,33 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for transformers 4.43 (load_low_bit 1024-128 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_443.yaml + + - name: Test on igpu for transformers 4.43 (load_low_bit 1024-128 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.43.1 + pip install trl + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_loadlowbit_443.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 + if %ERRORLEVEL% neq 0 (exit /b 1) + + pip uninstall trl + call conda deactivate + - name: Concat csv and generate html (load_low_bit 1024-128 int4+fp16) shell: cmd run: | @@ -1216,7 +1381,7 @@ jobs: - name: Prepare igpu perf test (1024-128) shell: bash run: | - sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test4/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128.yaml - name: Test on igpu (1024-128) @@ -1290,6 +1455,33 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for transformers 4.43 (1024-128) + shell: bash + run: | + sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_443.yaml + + - name: Test on igpu for transformers 4.43 (1024-128) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.43.1 + pip install trl + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\1024-128_443.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 + if %ERRORLEVEL% neq 0 (exit /b 1) + + pip uninstall trl + call conda deactivate + - name: Concat csv and generate html (1024-128) shell: cmd run: | diff --git a/python/llm/test/benchmark/concat_csv.py b/python/llm/test/benchmark/concat_csv.py index 908f71f1aed..442eab7cc9a 100644 --- a/python/llm/test/benchmark/concat_csv.py +++ b/python/llm/test/benchmark/concat_csv.py @@ -36,7 +36,7 @@ def main(): merged_df = pd.concat([pd.read_csv(file, index_col=0) for file in csv_files], ignore_index=True) merged_df.reset_index(drop=True, inplace=True) - merged_csv = csv_files[0].replace("_test1", "").replace("_test2", "").replace("_test3", "") + merged_csv = csv_files[0].replace("_test1", "").replace("_test2", "").replace("_test3", "").replace("_test4", "") merged_df.to_csv(merged_csv) if __name__ == "__main__": diff --git a/python/llm/test/benchmark/igpu-perf/1024-128.yaml b/python/llm/test/benchmark/igpu-perf/1024-128.yaml index b0bd5f30c20..98fab56cbc4 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128.yaml @@ -1,17 +1,17 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - 'RWKV/v5-Eagle-7B-HF' - - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - - 'openbmb/MiniCPM-1B-sft-bf16' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'baichuan-inc/Baichuan2-13B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- 'RWKV/v5-Eagle-7B-HF' + #- '01-ai/Yi-6B-Chat' + #- 'Qwen/Qwen-VL-Chat' + #- 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml index b87e6c2c865..cc0a24821e1 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml @@ -1,9 +1,9 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-1.5B-Instruct' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' + # - 'Qwen/Qwen1.5-7B-Chat' + # - 'Qwen/Qwen2-1.5B-Instruct' + # - 'Qwen/Qwen2-7B-Instruct' + # - 'microsoft/Phi-3-mini-4k-instruct' + # - 'microsoft/Phi-3-mini-128k-instruct' - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_443.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_443.yaml new file mode 100644 index 00000000000..4667ff34c3a --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/1024-128_443.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'google/gemma-2-2b-it' + - 'google/gemma-2-9b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '1024-128' +test_api: + - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml index 39d575680ab..60b3bffc61c 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml @@ -1,16 +1,16 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - - 'openbmb/MiniCPM-1B-sft-bf16' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'baichuan-inc/Baichuan2-13B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- '01-ai/Yi-6B-Chat' + #- 'Qwen/Qwen-VL-Chat' + #- 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml index d7172186bb3..ff58a625bbb 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml @@ -1,9 +1,9 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-1.5B-Instruct' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' + # - 'Qwen/Qwen1.5-7B-Chat' + # - 'Qwen/Qwen2-1.5B-Instruct' + # - 'Qwen/Qwen2-7B-Instruct' + # - 'microsoft/Phi-3-mini-4k-instruct' + # - 'microsoft/Phi-3-mini-128k-instruct' - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_443.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_443.yaml new file mode 100644 index 00000000000..2f4bbd2270d --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_443.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'google/gemma-2-2b-it' + - 'google/gemma-2-9b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '1024-128' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows, use fp16 for non-linear layer +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml index 2730e465d47..dd5304c6695 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml @@ -1,16 +1,16 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - - 'openbmb/MiniCPM-1B-sft-bf16' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'baichuan-inc/Baichuan2-13B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- '01-ai/Yi-6B-Chat' + #- 'Qwen/Qwen-VL-Chat' + #- 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml index 3839d0d2951..3d48cf9b9f3 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml @@ -1,9 +1,9 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-1.5B-Instruct' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' + # - 'Qwen/Qwen1.5-7B-Chat' + # - 'Qwen/Qwen2-1.5B-Instruct' + # - 'Qwen/Qwen2-7B-Instruct' + # - 'microsoft/Phi-3-mini-4k-instruct' + # - 'microsoft/Phi-3-mini-128k-instruct' - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_443.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_443.yaml new file mode 100644 index 00000000000..8d8e16c5c42 --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_443.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'google/gemma-2-2b-it' + - 'google/gemma-2-9b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '1024-128' +test_api: + - "transformer_int4_fp16_loadlowbit_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml index c53e6283919..850912f7a5f 100644 --- a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml @@ -1,16 +1,16 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - - 'openbmb/MiniCPM-1B-sft-bf16' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'baichuan-inc/Baichuan2-13B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- '01-ai/Yi-6B-Chat' + #- 'Qwen/Qwen-VL-Chat' + #- 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml index d6ee670ea29..229d8ec35f9 100644 --- a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml @@ -1,9 +1,9 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-1.5B-Instruct' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' + # - 'Qwen/Qwen1.5-7B-Chat' + # - 'Qwen/Qwen2-1.5B-Instruct' + # - 'Qwen/Qwen2-7B-Instruct' + # - 'microsoft/Phi-3-mini-4k-instruct' + # - 'microsoft/Phi-3-mini-128k-instruct' - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_443.yaml b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_443.yaml new file mode 100644 index 00000000000..3f8e554d19d --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_443.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'google/gemma-2-2b-it' + - 'google/gemma-2-9b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '2048-256' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml index 47b9839a789..14adf06075b 100644 --- a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml @@ -1,15 +1,15 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - - 'openbmb/MiniCPM-1B-sft-bf16' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- '01-ai/Yi-6B-Chat' + #- 'Qwen/Qwen-VL-Chat' + #- 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_437.yaml index ddbf1cf4d19..ea08a7a120f 100644 --- a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_437.yaml @@ -1,9 +1,9 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-1.5B-Instruct' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' + # - 'Qwen/Qwen1.5-7B-Chat' + # - 'Qwen/Qwen2-1.5B-Instruct' + # - 'Qwen/Qwen2-7B-Instruct' + # - 'microsoft/Phi-3-mini-4k-instruct' + # - 'microsoft/Phi-3-mini-128k-instruct' - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_443.yaml b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_443.yaml new file mode 100644 index 00000000000..ac1a4d6511a --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_443.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'google/gemma-2-2b-it' + - 'google/gemma-2-9b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '3072-384' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml index 39115e0231b..23d46b79201 100644 --- a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml @@ -1,16 +1,16 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - - 'openbmb/MiniCPM-1B-sft-bf16' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'baichuan-inc/Baichuan2-13B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- '01-ai/Yi-6B-Chat' + #- 'Qwen/Qwen-VL-Chat' + #- 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' local_model_hub: 'path to your local model hub' warm_up: 3 diff --git a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml index 1a969165267..e7f6d4673fd 100644 --- a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml @@ -1,9 +1,9 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-1.5B-Instruct' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' + # - 'Qwen/Qwen1.5-7B-Chat' + # - 'Qwen/Qwen2-1.5B-Instruct' + # - 'Qwen/Qwen2-7B-Instruct' + # - 'microsoft/Phi-3-mini-4k-instruct' + # - 'microsoft/Phi-3-mini-128k-instruct' - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 3 diff --git a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_443.yaml b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_443.yaml new file mode 100644 index 00000000000..a02b19b1cf2 --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_443.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'google/gemma-2-2b-it' + - 'google/gemma-2-9b-it' +local_model_hub: 'path to your local model hub' +warm_up: 3 +num_trials: 5 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '32-32' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml index 26e128a564c..04e22f983c0 100644 --- a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml @@ -1,14 +1,14 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - '01-ai/Yi-6B-Chat' - - 'openbmb/MiniCPM-1B-sft-bf16' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- '01-ai/Yi-6B-Chat' + #- 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_437.yaml index c85da867aa2..e08cd4ec921 100644 --- a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_437.yaml @@ -1,9 +1,9 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-1.5B-Instruct' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' + # - 'Qwen/Qwen1.5-7B-Chat' + # - 'Qwen/Qwen2-1.5B-Instruct' + # - 'Qwen/Qwen2-7B-Instruct' + # - 'microsoft/Phi-3-mini-4k-instruct' + # - 'microsoft/Phi-3-mini-128k-instruct' - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_443.yaml b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_443.yaml new file mode 100644 index 00000000000..53467812ac7 --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_443.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'google/gemma-2-2b-it' + - 'google/gemma-2-9b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '4096-512' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)