From c434c813656e164ec3912a75728c3ee7a9e50dce Mon Sep 17 00:00:00 2001 From: "Jin, Qiao" <89779290+JinBridger@users.noreply.github.com> Date: Mon, 2 Sep 2024 10:17:57 +0800 Subject: [PATCH] Add MiniCPM-V cpu example (#11975) * Add MiniCPM-V cpu example * fix * fix * fix * fix --- README.md | 2 +- .../Model/minicpm-v/README.md | 101 ++++++++++++++++++ .../Model/minicpm-v/chat.py | 100 +++++++++++++++++ 3 files changed, 202 insertions(+), 1 deletion(-) create mode 100644 python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md create mode 100644 python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/chat.py diff --git a/README.md b/README.md index 3c767128c74..a34c880b782 100644 --- a/README.md +++ b/README.md @@ -319,7 +319,7 @@ Over 50 models have been optimized/verified on `ipex-llm`, including *LLaMA/LLaM | MiniCPM-V | | [link](python/llm/example/GPU/HuggingFace/Multimodal/MiniCPM-V) | | MiniCPM-V-2 | | [link](python/llm/example/GPU/HuggingFace/Multimodal/MiniCPM-V-2) | | MiniCPM-Llama3-V-2_5 | | [link](python/llm/example/GPU/HuggingFace/Multimodal/MiniCPM-Llama3-V-2_5) | -| MiniCPM-V-2_6 | | [link](python/llm/example/GPU/HuggingFace/Multimodal/MiniCPM-V-2_6) | +| MiniCPM-V-2_6 | [link](python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v) | [link](python/llm/example/GPU/HuggingFace/Multimodal/MiniCPM-V-2_6) | ## Get Support - Please report a bug or raise a feature request by opening a [Github Issue](https://github.com/intel-analytics/ipex-llm/issues) diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md new file mode 100644 index 00000000000..640be289d36 --- /dev/null +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/README.md @@ -0,0 +1,101 @@ +# MiniCPM-V +In this directory, you will find examples on how you could apply IPEX-LLM INT4 optimizations on MiniCPM-V models. For illustration purposes, we utilize the [openbmb/MiniCPM-V-2_6](https://huggingface.co/openbmb/MiniCPM-V-2_6) as a reference MiniCPM-V model. + +## 0. Requirements +To run these examples with IPEX-LLM, we have some recommended requirements for your machine, please refer to [here](../README.md#recommended-requirements) for more information. + +## Example: Predict Tokens using `chat()` API +In the example [chat.py](./chat.py), we show a basic use case for a MiniCPM-V model to predict the next N tokens using `chat()` API, with IPEX-LLM INT4 optimizations. +### 1. Install +We suggest using conda to manage environment: + +On Linux: + +```bash +conda create -n llm python=3.11 +conda activate llm + +# install ipex-llm with 'all' option +pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pytorch.org/whl/cpu +pip install torchvision==0.16.2 --index-url https://download.pytorch.org/whl/cpu +pip install transformers==4.40.0 trl +``` +On Windows: + +```cmd +conda create -n llm python=3.11 +conda activate llm + +pip install --pre --upgrade ipex-llm[all] +pip install torchvision==0.16.2 --index-url https://download.pytorch.org/whl/cpu +pip install transformers==4.40.0 trl +``` + +### 2. Run + +- chat without streaming mode: + ``` + python ./chat.py --prompt 'What is in the image?' + ``` +- chat in streaming mode: + ``` + python ./chat.py --prompt 'What is in the image?' --stream + ``` + +> [!TIP] +> For chatting in streaming mode, it is recommended to set the environment variable `PYTHONUNBUFFERED=1`. + + +Arguments info: +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the MiniCPM-V model (e.g. `openbmb/MiniCPM-V-2_6`) to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'openbmb/MiniCPM-V-2_6'`. +- `--image-url-or-path IMAGE_URL_OR_PATH`: argument defining the image to be infered. It is default to be `'http://farm6.staticflickr.com/5268/5602445367_3504763978_z.jpg'`. +- `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'What is in the image?'`. +- `--stream`: flag to chat in streaming mode + +> **Note**: When loading the model in 4-bit, IPEX-LLM converts linear layers in the model into INT4 format. In theory, a *X*B model saved in 16-bit will requires approximately 2*X* GB of memory for loading, and ~0.5*X* GB memory for further inference. +> +> Please select the appropriate size of the MiniCPM model based on the capabilities of your machine. + +#### 2.1 Client +On client Windows machine, it is recommended to run directly with full utilization of all cores: +```cmd +python ./chat.py +``` + +#### 2.2 Server +For optimal performance on server, it is recommended to set several environment variables (refer to [here](../README.md#best-known-configuration-on-linux) for more information), and run the example with all the physical cores of a single socket. + +E.g. on Linux, +```bash +# set IPEX-LLM env variables +source ipex-llm-init + +# e.g. for a server with 48 cores per socket +export OMP_NUM_THREADS=48 +numactl -C 0-47 -m 0 python ./chat.py +``` + +#### 2.3 Sample Output +#### [openbmb/MiniCPM-V-2_6](https://huggingface.co/openbmb/MiniCPM-V-2_6) +```log +Inference time: xxxx s +-------------------- Input Image -------------------- +http://farm6.staticflickr.com/5268/5602445367_3504763978_z.jpg +-------------------- Input Prompt -------------------- +What is in the image? +-------------------- Chat Output -------------------- +The image features a young child holding a white teddy bear dressed in pink. The background includes some red flowers and what appears to be a stone wall. +``` + +```log +-------------------- Input Image -------------------- +http://farm6.staticflickr.com/5268/5602445367_3504763978_z.jpg +-------------------- Input Prompt -------------------- +图片里有什么? +-------------------- Stream Chat Output -------------------- +图片中有一个小女孩,她手里拿着一个穿着粉色裙子的白色小熊玩偶。背景中有红色花朵和石头结构,可能是一个花园或庭院。 +``` + +The sample input image is (which is fetched from [COCO dataset](https://cocodataset.org/#explore?id=264959)): + + diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/chat.py b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/chat.py new file mode 100644 index 00000000000..e0a07c59aa8 --- /dev/null +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v/chat.py @@ -0,0 +1,100 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import os +import time +import argparse +import requests +import torch +from PIL import Image +from ipex_llm.transformers import AutoModel +from transformers import AutoTokenizer + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Predict Tokens using `chat()` API for MiniCPM-V model') + parser.add_argument('--repo-id-or-model-path', type=str, default="openbmb/MiniCPM-V-2_6", + help='The huggingface repo id for the MiniCPM-V model to be downloaded' + ', or the path to the huggingface checkpoint folder') + parser.add_argument('--image-url-or-path', type=str, + default='http://farm6.staticflickr.com/5268/5602445367_3504763978_z.jpg', + help='The URL or path to the image to infer') + parser.add_argument('--prompt', type=str, default="What is in the image?", + help='Prompt to infer') + parser.add_argument('--stream', action='store_true', + help='Whether to chat in streaming mode') + + args = parser.parse_args() + model_path = args.repo_id_or_model_path + image_path = args.image_url_or_path + + # Load model in 4 bit, + # which convert the relevant layers in the model into INT4 format + model = AutoModel.from_pretrained(model_path, + load_in_low_bit="sym_int4", + optimize_model=True, + trust_remote_code=True, + use_cache=True, + torch_dtype=torch.float32, + modules_to_not_convert=["vpm", "resampler"]) + + # Load tokenizer + tokenizer = AutoTokenizer.from_pretrained(model_path, + trust_remote_code=True) + model.eval() + + query = args.prompt + if os.path.exists(image_path): + image = Image.open(image_path).convert('RGB') + else: + image = Image.open(requests.get(image_path, stream=True).raw).convert('RGB') + + # Generate predicted tokens + # here the prompt tuning refers to https://huggingface.co/openbmb/MiniCPM-V-2_6/blob/main/README.md + msgs = [{'role': 'user', 'content': [image, args.prompt]}] + + if args.stream: + res = model.chat( + image=None, + msgs=msgs, + tokenizer=tokenizer, + stream=True + ) + + print('-'*20, 'Input Image', '-'*20) + print(image_path) + print('-'*20, 'Input Prompt', '-'*20) + print(args.prompt) + print('-'*20, 'Stream Chat Output', '-'*20) + for new_text in res: + print(new_text, flush=True, end='') + else: + st = time.time() + res = model.chat( + image=None, + msgs=msgs, + tokenizer=tokenizer, + ) + end = time.time() + + print(f'Inference time: {end-st} s') + print('-'*20, 'Input Image', '-'*20) + print(image_path) + print('-'*20, 'Input Prompt', '-'*20) + print(args.prompt) + print('-'*20, 'Chat Output', '-'*20) + print(res)