From 16ec3b3b2a970f3fd649d4fe6a9bcdbbd2df5e31 Mon Sep 17 00:00:00 2001 From: jinbridge <2635480475@qq.com> Date: Fri, 30 Aug 2024 16:06:17 +0800 Subject: [PATCH 1/5] Add MiniCPM-V cpu example --- .../Model/minicpm-v/README.md | 75 ++++++++++++ .../Model/minicpm-v/chat.py | 107 ++++++++++++++++++ 2 files changed, 182 insertions(+) create mode 100644 python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md create mode 100644 python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/chat.py diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md new file mode 100644 index 00000000000..18b71e82fed --- /dev/null +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md @@ -0,0 +1,75 @@ +# MiniCPM-V +In this directory, you will find examples on how you could apply IPEX-LLM INT4 optimizations on MiniCPM-V models. For illustration purposes, we utilize the [openbmb/MiniCPM-V-2_6](https://huggingface.co/openbmb/MiniCPM-V-2_6) as a reference MiniCPM-V model. + +## 0. Requirements +To run these examples with IPEX-LLM, we have some recommended requirements for your machine, please refer to [here](../README.md#recommended-requirements) for more information. + +## Example: Predict Tokens using `generate()` API +In the example [generate.py](./generate.py), we show a basic use case for a MiniCPM-V model to predict the next N tokens using `generate()` API, with IPEX-LLM INT4 optimizations. +### 1. Install +We suggest using conda to manage environment: + +On Linux: + +```bash +conda create -n llm python=3.11 +conda activate llm + +# install ipex-llm with 'all' option +pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pytorch.org/whl/cpu +pip install pillow torchvision +``` +On Windows: + +```cmd +conda create -n llm python=3.11 +conda activate llm + +pip install --pre --upgrade ipex-llm[all] +pip install pillow torchvision +``` + +### 2. Run +``` +python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROMPT --n-predict N_PREDICT +``` + +Arguments info: +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the MiniCPM-V model (e.g. `openbmb/MiniCPM-V-2_6`) to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'openbmb/MiniCPM-V-2_6'`. +- `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'What is AI?'`. +- `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. + +> **Note**: When loading the model in 4-bit, IPEX-LLM converts linear layers in the model into INT4 format. In theory, a *X*B model saved in 16-bit will requires approximately 2*X* GB of memory for loading, and ~0.5*X* GB memory for further inference. +> +> Please select the appropriate size of the MiniCPM model based on the capabilities of your machine. + +#### 2.1 Client +On client Windows machine, it is recommended to run directly with full utilization of all cores: +```cmd +python ./generate.py +``` + +#### 2.2 Server +For optimal performance on server, it is recommended to set several environment variables (refer to [here](../README.md#best-known-configuration-on-linux) for more information), and run the example with all the physical cores of a single socket. + +E.g. on Linux, +```bash +# set IPEX-LLM env variables +source ipex-llm-init + +# e.g. for a server with 48 cores per socket +export OMP_NUM_THREADS=48 +numactl -C 0-47 -m 0 python ./generate.py +``` + +#### 2.3 Sample Output +#### [openbmb/MiniCPM-V-2_6](https://huggingface.co/openbmb/MiniCPM-V-2_6) +```log +Inference time: xxxx s +-------------------- Input Image -------------------- +http://farm6.staticflickr.com/5268/5602445367_3504763978_z.jpg +-------------------- Input Prompt -------------------- +What is in the image? +-------------------- Chat Output -------------------- +The image features a young child holding a white teddy bear dressed in pink. The background includes some red flowers and what appears to be a stone wall. +``` \ No newline at end of file diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/chat.py b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/chat.py new file mode 100644 index 00000000000..79be0dc53ae --- /dev/null +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/chat.py @@ -0,0 +1,107 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import os +import time +import argparse +import requests +import torch +from PIL import Image +from ipex_llm.transformers import AutoModel +from transformers import AutoTokenizer + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Predict Tokens using `chat()` API for MiniCPM-V model') + parser.add_argument('--repo-id-or-model-path', type=str, default="openbmb/MiniCPM-V-2_6", + help='The huggingface repo id for the MiniCPM-V model to be downloaded' + ', or the path to the huggingface checkpoint folder') + parser.add_argument('--image-url-or-path', type=str, + default='http://farm6.staticflickr.com/5268/5602445367_3504763978_z.jpg', + help='The URL or path to the image to infer') + parser.add_argument('--prompt', type=str, default="What is in the image?", + help='Prompt to infer') + parser.add_argument('--stream', action='store_true', + help='Whether to chat in streaming mode') + + args = parser.parse_args() + model_path = args.repo_id_or_model_path + image_path = args.image_url_or_path + + # Load model in 4 bit, + # which convert the relevant layers in the model into INT4 format + model = AutoModel.from_pretrained(model_path, + load_in_low_bit="sym_int4", + optimize_model=True, + trust_remote_code=True, + use_cache=True, + torch_dtype=torch.float32, + modules_to_not_convert=["vpm", "resampler"]) + + # Load tokenizer + tokenizer = AutoTokenizer.from_pretrained(model_path, + trust_remote_code=True) + model.eval() + + query = args.prompt + if os.path.exists(image_path): + image = Image.open(image_path).convert('RGB') + else: + image = Image.open(requests.get(image_path, stream=True).raw).convert('RGB') + + # Generate predicted tokens + # here the prompt tuning refers to https://huggingface.co/openbmb/MiniCPM-V-2_6/blob/main/README.md + msgs = [{'role': 'user', 'content': [image, args.prompt]}] + + # ipex_llm model needs a warmup, then inference time can be accurate + model.chat( + image=None, + msgs=msgs, + tokenizer=tokenizer, + ) + + if args.stream: + res = model.chat( + image=None, + msgs=msgs, + tokenizer=tokenizer, + stream=True + ) + + print('-'*20, 'Input Image', '-'*20) + print(image_path) + print('-'*20, 'Input Prompt', '-'*20) + print(args.prompt) + print('-'*20, 'Stream Chat Output', '-'*20) + for new_text in res: + print(new_text, flush=True, end='') + else: + st = time.time() + res = model.chat( + image=None, + msgs=msgs, + tokenizer=tokenizer, + ) + end = time.time() + + print(f'Inference time: {end-st} s') + print('-'*20, 'Input Image', '-'*20) + print(image_path) + print('-'*20, 'Input Prompt', '-'*20) + print(args.prompt) + print('-'*20, 'Chat Output', '-'*20) + print(res) From b46e38934a9f794ec057bee127187be4b74d587c Mon Sep 17 00:00:00 2001 From: jinbridge <2635480475@qq.com> Date: Fri, 30 Aug 2024 16:15:10 +0800 Subject: [PATCH 2/5] fix --- .../Model/minicpm-v/README.md | 29 +++++++++++++------ 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md index 18b71e82fed..3ea49121259 100644 --- a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md @@ -4,8 +4,8 @@ In this directory, you will find examples on how you could apply IPEX-LLM INT4 o ## 0. Requirements To run these examples with IPEX-LLM, we have some recommended requirements for your machine, please refer to [here](../README.md#recommended-requirements) for more information. -## Example: Predict Tokens using `generate()` API -In the example [generate.py](./generate.py), we show a basic use case for a MiniCPM-V model to predict the next N tokens using `generate()` API, with IPEX-LLM INT4 optimizations. +## Example: Predict Tokens using `chat()` API +In the example [chat.py](./chat.py), we show a basic use case for a MiniCPM-V model to predict the next N tokens using `chat()` API, with IPEX-LLM INT4 optimizations. ### 1. Install We suggest using conda to manage environment: @@ -30,14 +30,25 @@ pip install pillow torchvision ``` ### 2. Run -``` -python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROMPT --n-predict N_PREDICT -``` + +- chat without streaming mode: + ``` + python ./chat.py --prompt 'What is in the image?' + ``` +- chat in streaming mode: + ``` + python ./chat.py --prompt 'What is in the image?' --stream + ``` + +> [!TIP] +> For chatting in streaming mode, it is recommended to set the environment variable `PYTHONUNBUFFERED=1`. + Arguments info: - `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the MiniCPM-V model (e.g. `openbmb/MiniCPM-V-2_6`) to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'openbmb/MiniCPM-V-2_6'`. -- `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'What is AI?'`. -- `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. +- `--image-url-or-path IMAGE_URL_OR_PATH`: argument defining the image to be infered. It is default to be `'http://farm6.staticflickr.com/5268/5602445367_3504763978_z.jpg'`. +- `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'What is in the image?'`. +- `--stream`: flag to chat in streaming mode > **Note**: When loading the model in 4-bit, IPEX-LLM converts linear layers in the model into INT4 format. In theory, a *X*B model saved in 16-bit will requires approximately 2*X* GB of memory for loading, and ~0.5*X* GB memory for further inference. > @@ -46,7 +57,7 @@ Arguments info: #### 2.1 Client On client Windows machine, it is recommended to run directly with full utilization of all cores: ```cmd -python ./generate.py +python ./chat.py ``` #### 2.2 Server @@ -59,7 +70,7 @@ source ipex-llm-init # e.g. for a server with 48 cores per socket export OMP_NUM_THREADS=48 -numactl -C 0-47 -m 0 python ./generate.py +numactl -C 0-47 -m 0 python ./chat.py ``` #### 2.3 Sample Output From a12db0b03a407e76bb1381b57cfad2c6593139d4 Mon Sep 17 00:00:00 2001 From: jinbridge <2635480475@qq.com> Date: Fri, 30 Aug 2024 16:56:00 +0800 Subject: [PATCH 3/5] fix --- .../Model/minicpm-v/README.md | 19 ++++++++++++++++--- .../Model/minicpm-v/chat.py | 7 ------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md index 3ea49121259..d3344014670 100644 --- a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md @@ -17,7 +17,7 @@ conda activate llm # install ipex-llm with 'all' option pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pytorch.org/whl/cpu -pip install pillow torchvision +pip install torchvision==0.16.2 --extra-index-url https://download.pytorch.org/whl/cpu ``` On Windows: @@ -26,7 +26,7 @@ conda create -n llm python=3.11 conda activate llm pip install --pre --upgrade ipex-llm[all] -pip install pillow torchvision +pip install torchvision==0.16.2 ``` ### 2. Run @@ -83,4 +83,17 @@ http://farm6.staticflickr.com/5268/5602445367_3504763978_z.jpg What is in the image? -------------------- Chat Output -------------------- The image features a young child holding a white teddy bear dressed in pink. The background includes some red flowers and what appears to be a stone wall. -``` \ No newline at end of file +``` + +```log +-------------------- Input Image -------------------- +http://farm6.staticflickr.com/5268/5602445367_3504763978_z.jpg +-------------------- Input Prompt -------------------- +图片里有什么? +-------------------- Stream Chat Output -------------------- +图片中有一个小女孩,她手里拿着一个穿着粉色裙子的白色小熊玩偶。背景中有红色花朵和石头结构,可能是一个花园或庭院。 +``` + +The sample input image is (which is fetched from [COCO dataset](https://cocodataset.org/#explore?id=264959)): + + diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/chat.py b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/chat.py index 79be0dc53ae..e0a07c59aa8 100644 --- a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/chat.py +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/chat.py @@ -67,13 +67,6 @@ # here the prompt tuning refers to https://huggingface.co/openbmb/MiniCPM-V-2_6/blob/main/README.md msgs = [{'role': 'user', 'content': [image, args.prompt]}] - # ipex_llm model needs a warmup, then inference time can be accurate - model.chat( - image=None, - msgs=msgs, - tokenizer=tokenizer, - ) - if args.stream: res = model.chat( image=None, From 863b245488b78935ef18aabd603f29596e316813 Mon Sep 17 00:00:00 2001 From: jinbridge <2635480475@qq.com> Date: Fri, 30 Aug 2024 16:56:50 +0800 Subject: [PATCH 4/5] fix --- .../CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md index d3344014670..b2ab4d11c44 100644 --- a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md @@ -18,6 +18,7 @@ conda activate llm # install ipex-llm with 'all' option pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pytorch.org/whl/cpu pip install torchvision==0.16.2 --extra-index-url https://download.pytorch.org/whl/cpu +pip install transformers==4.40.0 trl ``` On Windows: @@ -27,6 +28,7 @@ conda activate llm pip install --pre --upgrade ipex-llm[all] pip install torchvision==0.16.2 +pip install transformers==4.40.0 trl ``` ### 2. Run From e404b6be8010e69e78449df0fb5fdfa61705a394 Mon Sep 17 00:00:00 2001 From: jinbridge <2635480475@qq.com> Date: Mon, 2 Sep 2024 10:10:38 +0800 Subject: [PATCH 5/5] fix --- README.md | 2 +- .../CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 3c767128c74..a34c880b782 100644 --- a/README.md +++ b/README.md @@ -319,7 +319,7 @@ Over 50 models have been optimized/verified on `ipex-llm`, including *LLaMA/LLaM | MiniCPM-V | | [link](python/llm/example/GPU/HuggingFace/Multimodal/MiniCPM-V) | | MiniCPM-V-2 | | [link](python/llm/example/GPU/HuggingFace/Multimodal/MiniCPM-V-2) | | MiniCPM-Llama3-V-2_5 | | [link](python/llm/example/GPU/HuggingFace/Multimodal/MiniCPM-Llama3-V-2_5) | -| MiniCPM-V-2_6 | | [link](python/llm/example/GPU/HuggingFace/Multimodal/MiniCPM-V-2_6) | +| MiniCPM-V-2_6 | [link](python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v) | [link](python/llm/example/GPU/HuggingFace/Multimodal/MiniCPM-V-2_6) | ## Get Support - Please report a bug or raise a feature request by opening a [Github Issue](https://github.com/intel-analytics/ipex-llm/issues) diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md index b2ab4d11c44..640be289d36 100644 --- a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md @@ -17,7 +17,7 @@ conda activate llm # install ipex-llm with 'all' option pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pytorch.org/whl/cpu -pip install torchvision==0.16.2 --extra-index-url https://download.pytorch.org/whl/cpu +pip install torchvision==0.16.2 --index-url https://download.pytorch.org/whl/cpu pip install transformers==4.40.0 trl ``` On Windows: @@ -27,7 +27,7 @@ conda create -n llm python=3.11 conda activate llm pip install --pre --upgrade ipex-llm[all] -pip install torchvision==0.16.2 +pip install torchvision==0.16.2 --index-url https://download.pytorch.org/whl/cpu pip install transformers==4.40.0 trl ```