diff --git a/.github/workflows/llm_performance_tests.yml b/.github/workflows/llm_performance_tests.yml index 255e5c6a1c0..6897a370ec2 100644 --- a/.github/workflows/llm_performance_tests.yml +++ b/.github/workflows/llm_performance_tests.yml @@ -679,6 +679,29 @@ jobs: if %ERRORLEVEL% neq 0 (exit /b 1) call conda deactivate + + - name: Prepare igpu perf test for transformers 4.38 (32-32 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_438.yaml + + - name: Test on igpu for transformers 4.38 (32-32 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.38.2 + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\32-32_int4_fp16_438.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\32-32_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate - name: Concat csv and generate html (32-32 int4+fp16) shell: cmd @@ -703,7 +726,7 @@ jobs: shell: bash run: | sed -i 's/32-32/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml - name: Test on igpu (1024-128 int4+fp16) @@ -747,6 +770,29 @@ jobs: if %ERRORLEVEL% neq 0 (exit /b 1) call conda deactivate + + - name: Prepare igpu perf test for transformers 4.38 (1024-128 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_438.yaml + + - name: Test on igpu for transformers 4.38 (1024-128 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.38.2 + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_438.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate - name: Concat csv and generate html (1024-128 int4+fp16) shell: cmd @@ -770,7 +816,7 @@ jobs: shell: bash run: | sed -i 's/1024-128/2048-256/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml - name: Test on igpu (2048-256 int4+fp16) @@ -814,6 +860,29 @@ jobs: if %ERRORLEVEL% neq 0 (exit /b 1) call conda deactivate + + - name: Prepare igpu perf test for transformers 4.38 (2048-256 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_438.yaml + + - name: Test on igpu for transformers 4.38 (2048-256 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.38.2 + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\2048-256_int4_fp16_438.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\2048-256_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate - name: Concat csv and generate html (2048-256 int4+fp16) shell: cmd @@ -837,7 +906,7 @@ jobs: shell: bash run: | sed -i 's/2048-256/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml - name: Test on igpu (load_low_bit 1024-128 int4+fp16) @@ -882,6 +951,29 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for transformers 4.38 (load_low_bit 1024-128 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_438.yaml + + - name: Test on igpu for transformers 4.38 (load_low_bit 1024-128 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.38.2 + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_loadlowbit_438.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + - name: Concat csv and generate html (load_low_bit 1024-128 int4+fp16) shell: cmd run: | @@ -903,7 +995,7 @@ jobs: - name: Prepare igpu perf test (1024-128) shell: bash run: | - sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128.yaml - name: Test on igpu (1024-128) @@ -948,6 +1040,29 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for transformers 4.38 (1024-128) + shell: bash + run: | + sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_438.yaml + + - name: Test on igpu for transformers 4.38 (1024-128) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.38.2 + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\1024-128_438.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + - name: Concat csv and generate html (1024-128) shell: cmd run: | diff --git a/python/llm/test/benchmark/igpu-perf/1024-128.yaml b/python/llm/test/benchmark/igpu-perf/1024-128.yaml index 69e5113ff89..31a4f256984 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128.yaml @@ -1,16 +1,17 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'openbmb/MiniCPM-1B-sft-bf16' - - 'openbmb/MiniCPM-2B-sft-bf16' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - 'RWKV/v5-Eagle-7B-HF' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'baichuan-inc/Baichuan2-13B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'openbmb/MiniCPM-1B-sft-bf16' + #- 'openbmb/MiniCPM-2B-sft-bf16' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- 'RWKV/v5-Eagle-7B-HF' + - '01-ai/Yi-6B-Chat' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml index 6019026ca4d..2c28164eb78 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml @@ -1,7 +1,7 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' + #- 'Qwen/Qwen1.5-7B-Chat' + #- 'Qwen/Qwen2-7B-Instruct' + #- 'microsoft/Phi-3-mini-4k-instruct' - 'microsoft/Phi-3-mini-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_438.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_438.yaml new file mode 100644 index 00000000000..55d3584439b --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/1024-128_438.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'stabilityai/stablelm-zephyr-3b' + #- 'google/gemma-7b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '1024-128' +test_api: + - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml index 7dbca21d48b..5c3a29b38c2 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml @@ -1,15 +1,16 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'openbmb/MiniCPM-1B-sft-bf16' - - 'openbmb/MiniCPM-2B-sft-bf16' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'baichuan-inc/Baichuan2-13B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'openbmb/MiniCPM-1B-sft-bf16' + #- 'openbmb/MiniCPM-2B-sft-bf16' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + - '01-ai/Yi-6B-Chat' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml index 12ccaa5d331..721da4194d9 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml @@ -1,7 +1,7 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' + #- 'Qwen/Qwen1.5-7B-Chat' + #- 'Qwen/Qwen2-7B-Instruct' + #- 'microsoft/Phi-3-mini-4k-instruct' - 'microsoft/Phi-3-mini-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_438.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_438.yaml new file mode 100644 index 00000000000..a6aa56916c1 --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_438.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'stabilityai/stablelm-zephyr-3b' + #- 'google/gemma-7b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '1024-128' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows, use fp16 for non-linear layer +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml index ca7b0726438..2d9609f006d 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml @@ -1,15 +1,16 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'openbmb/MiniCPM-1B-sft-bf16' - - 'openbmb/MiniCPM-2B-sft-bf16' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'baichuan-inc/Baichuan2-13B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'openbmb/MiniCPM-1B-sft-bf16' + #- 'openbmb/MiniCPM-2B-sft-bf16' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + - '01-ai/Yi-6B-Chat' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml index 4401207c07c..7a705aefe98 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml @@ -1,7 +1,7 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' + #- 'Qwen/Qwen1.5-7B-Chat' + #- 'Qwen/Qwen2-7B-Instruct' + #- 'microsoft/Phi-3-mini-4k-instruct' - 'microsoft/Phi-3-mini-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_438.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_438.yaml new file mode 100644 index 00000000000..73a2eecc6db --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_438.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'stabilityai/stablelm-zephyr-3b' + #- 'google/gemma-7b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '1024-128' +test_api: + - "transformer_int4_fp16_loadlowbit_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml index 3ce342f07f8..248d2a0ed5c 100644 --- a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml @@ -1,15 +1,16 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'openbmb/MiniCPM-1B-sft-bf16' - - 'openbmb/MiniCPM-2B-sft-bf16' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'baichuan-inc/Baichuan2-13B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'openbmb/MiniCPM-1B-sft-bf16' + #- 'openbmb/MiniCPM-2B-sft-bf16' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + - '01-ai/Yi-6B-Chat' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml index f9ae8540cd1..c7176487baa 100644 --- a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml @@ -1,7 +1,7 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' + #- 'Qwen/Qwen1.5-7B-Chat' + #- 'Qwen/Qwen2-7B-Instruct' + #- 'microsoft/Phi-3-mini-4k-instruct' - 'microsoft/Phi-3-mini-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_438.yaml b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_438.yaml new file mode 100644 index 00000000000..2299ffdb3ed --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_438.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'stabilityai/stablelm-zephyr-3b' + #- 'google/gemma-7b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '2048-256' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml index ff7c38a351e..6d7d4ec3656 100644 --- a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml @@ -1,15 +1,16 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'openbmb/MiniCPM-1B-sft-bf16' - - 'openbmb/MiniCPM-2B-sft-bf16' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'baichuan-inc/Baichuan2-13B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'openbmb/MiniCPM-1B-sft-bf16' + #- 'openbmb/MiniCPM-2B-sft-bf16' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + - '01-ai/Yi-6B-Chat' local_model_hub: 'path to your local model hub' warm_up: 3 num_trials: 5 diff --git a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml index de32d305c4a..95ba59fb013 100644 --- a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml @@ -1,7 +1,7 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' + #- 'Qwen/Qwen1.5-7B-Chat' + #- 'Qwen/Qwen2-7B-Instruct' + #- 'microsoft/Phi-3-mini-4k-instruct' - 'microsoft/Phi-3-mini-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 3 diff --git a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_438.yaml b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_438.yaml new file mode 100644 index 00000000000..b317ba5d32d --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_438.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'stabilityai/stablelm-zephyr-3b' + #- 'google/gemma-7b-it' +local_model_hub: 'path to your local model hub' +warm_up: 3 +num_trials: 5 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '32-32' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)