Skip to content

Commit

Permalink
Add model Qwen-VL-Chat to iGPU-perf (#11558)
Browse files Browse the repository at this point in the history
* Add model Qwen-VL-Chat to iGPU-perf

* small fix

---------

Co-authored-by: ATMxsp01 <[email protected]>
  • Loading branch information
ATMxsp01 and ATMxsp01 authored Jul 11, 2024
1 parent 105e124 commit 1355b2c
Show file tree
Hide file tree
Showing 8 changed files with 40 additions and 3 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/llm_performance_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -554,7 +554,7 @@ jobs:
pip install --upgrade pip
pip install --upgrade wheel
pip install --upgrade omegaconf pandas
pip install --upgrade tiktoken einops transformers_stream_generator
pip install --upgrade tiktoken einops transformers_stream_generator matplotlib
cd python\llm
python setup.py clean --all bdist_wheel --win
Expand Down Expand Up @@ -584,7 +584,7 @@ jobs:
pip install --upgrade pip
pip install --upgrade wheel
pip install --upgrade omegaconf pandas
pip install --upgrade tiktoken einops transformers_stream_generator
pip install --upgrade tiktoken einops transformers_stream_generator matplotlib
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
pip show ipex-llm | findstr %TEST_VERSION_DATE%
Expand Down
27 changes: 27 additions & 0 deletions python/llm/dev/benchmark/all-in-one/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@

PHI3VISION_IDS = ['microsoft/phi-3-vision-128k-instruct']

QWENVL_IDS = ['Qwen/Qwen-VL-Chat']

results = []
excludes = []

Expand Down Expand Up @@ -923,6 +925,12 @@ def run_transformer_int4_gpu_win(repo_id,
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = model.to('xpu')
elif repo_id in QWENVL_IDS:
model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit,
modules_to_not_convert=['c_fc', 'out_proj'],
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = model.to('xpu')
else:
model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit,
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
Expand Down Expand Up @@ -1038,6 +1046,13 @@ def run_transformer_int4_fp16_gpu_win(repo_id,
torch_dtype=torch.float16).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = model.to('xpu')
elif repo_id in QWENVL_IDS:
model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit,
modules_to_not_convert=['c_fc', 'out_proj'],
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding,
torch_dtype=torch.float16).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = model.to('xpu')
else:
model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit,
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding,
Expand Down Expand Up @@ -1149,6 +1164,12 @@ def run_transformer_int4_loadlowbit_gpu_win(repo_id,
use_cache=True, cpu_embedding=cpu_embedding).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path+'-'+low_bit, trust_remote_code=True)
model = model.to('xpu')
elif repo_id in QWENVL_IDS:
model = AutoModelForCausalLM.load_low_bit(model_path+'-'+low_bit, optimize_model=True, trust_remote_code=True,
modules_to_not_convert=['c_fc', 'out_proj'],
use_cache=True, cpu_embedding=cpu_embedding).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path+'-'+low_bit, trust_remote_code=True)
model = model.to('xpu')
else:
model = AutoModelForCausalLM.load_low_bit(model_path+'-'+low_bit, optimize_model=True, trust_remote_code=True,
use_cache=True, cpu_embedding=cpu_embedding).eval()
Expand Down Expand Up @@ -1259,6 +1280,12 @@ def run_transformer_int4_fp16_loadlowbit_gpu_win(repo_id,
use_cache=True, cpu_embedding=cpu_embedding).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path+'-'+low_bit, trust_remote_code=True)
model = model.half().to('xpu')
elif repo_id in QWENVL_IDS:
model = AutoModelForCausalLM.load_low_bit(model_path+'-'+low_bit, optimize_model=True, trust_remote_code=True,
modules_to_not_convert=['c_fc', 'out_proj'],
use_cache=True, cpu_embedding=cpu_embedding).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path+'-'+low_bit, trust_remote_code=True)
model = model.half().to('xpu')
else:
model = AutoModelForCausalLM.load_low_bit(model_path+'-'+low_bit, optimize_model=True, trust_remote_code=True,
use_cache=True, cpu_embedding=cpu_embedding).eval()
Expand Down
7 changes: 6 additions & 1 deletion python/llm/dev/benchmark/all-in-one/save.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
import sys
import gc

from run import LLAMA_IDS, CHATGLM_IDS, LLAVA_IDS, PHI3VISION_IDS, get_model_path
from run import LLAMA_IDS, CHATGLM_IDS, LLAVA_IDS, PHI3VISION_IDS, QWENVL_IDS, get_model_path

current_dir = os.path.dirname(os.path.realpath(__file__))

Expand Down Expand Up @@ -57,6 +57,11 @@ def save_model_in_low_bit(repo_id,
modules_to_not_convert=["vision_embed_tokens"],
trust_remote_code=True, use_cache=True).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
elif repo_id in QWENVL_IDS:
model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit,
modules_to_not_convert=['c_fc', 'out_proj'],
trust_remote_code=True, use_cache=True).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
else:
model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit,
trust_remote_code=True, use_cache=True).eval()
Expand Down
1 change: 1 addition & 0 deletions python/llm/test/benchmark/igpu-perf/1024-128.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ repo_id:
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
- 'RWKV/v5-Eagle-7B-HF'
- '01-ai/Yi-6B-Chat'
- 'Qwen/Qwen-VL-Chat'
local_model_hub: 'path to your local model hub'
warm_up: 1
num_trials: 3
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ repo_id:
- 'mistralai/Mistral-7B-Instruct-v0.2'
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
- '01-ai/Yi-6B-Chat'
- 'Qwen/Qwen-VL-Chat'
local_model_hub: 'path to your local model hub'
warm_up: 1
num_trials: 3
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ repo_id:
- 'mistralai/Mistral-7B-Instruct-v0.2'
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
- '01-ai/Yi-6B-Chat'
- 'Qwen/Qwen-VL-Chat'
local_model_hub: 'path to your local model hub'
warm_up: 1
num_trials: 3
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ repo_id:
- 'mistralai/Mistral-7B-Instruct-v0.2'
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
- '01-ai/Yi-6B-Chat'
- 'Qwen/Qwen-VL-Chat'
local_model_hub: 'path to your local model hub'
warm_up: 1
num_trials: 3
Expand Down
1 change: 1 addition & 0 deletions python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ repo_id:
- 'mistralai/Mistral-7B-Instruct-v0.2'
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
- '01-ai/Yi-6B-Chat'
- 'Qwen/Qwen-VL-Chat'
local_model_hub: 'path to your local model hub'
warm_up: 3
num_trials: 5
Expand Down

0 comments on commit 1355b2c

Please sign in to comment.