Skip to content

Commit

Permalink
Update streaming in npu examples (#12495)
Browse files Browse the repository at this point in the history
* feat: add streaming

* Update readme accordingly

---------

Co-authored-by: Yuwen Hu <[email protected]>
  • Loading branch information
cranechu0131 and Oscilloscope98 authored Dec 4, 2024
1 parent a9e3f7f commit ffa9a9e
Show file tree
Hide file tree
Showing 6 changed files with 69 additions and 41 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,7 @@ Arguments info:
- `--max-context-len MAX_CONTEXT_LEN`: Defines the maximum sequence length for both input and output tokens. It is default to be `1024`.
- `--max-prompt-len MAX_PROMPT_LEN`: Defines the maximum number of tokens that the input prompt can contain. It is default to be `512`.
- `--disable-transpose-value-cache`: Disable the optimization of transposing value cache.
- `--disable-streaming`: Disable streaming mode of generation.
- `--save-directory SAVE_DIRECTORY`: argument defining the path to save converted model. If it is a non-existing path, the original pretrained model specified by `REPO_ID_OR_MODEL_PATH` will be loaded, otherwise the lowbit model in `SAVE_DIRECTORY` will be loaded.

### Troubleshooting
Expand Down
21 changes: 13 additions & 8 deletions python/llm/example/NPU/HF-Transformers-AutoModels/LLM/baichuan2.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import argparse

from ipex_llm.transformers.npu_model import AutoModelForCausalLM
from transformers import AutoTokenizer
from transformers import AutoTokenizer, TextStreamer

from transformers.utils import logging

Expand Down Expand Up @@ -56,6 +56,7 @@ def get_prompt(message: str, chat_history: list[tuple[str, str]],
parser.add_argument("--max-context-len", type=int, default=1024)
parser.add_argument("--max-prompt-len", type=int, default=512)
parser.add_argument("--disable-transpose-value-cache", action="store_true", default=False)
parser.add_argument("--disable-streaming", action="store_true", default=False)
parser.add_argument("--save-directory", type=str,
required=True,
help="The path of folder to save converted model, "
Expand Down Expand Up @@ -94,6 +95,10 @@ def get_prompt(message: str, chat_history: list[tuple[str, str]],
)
tokenizer = AutoTokenizer.from_pretrained(args.save_directory, trust_remote_code=True)

if args.disable_streaming:
streamer = None
else:
streamer = TextStreamer(tokenizer=tokenizer, skip_special_tokens=True)

DEFAULT_SYSTEM_PROMPT = """\
"""
Expand All @@ -105,19 +110,19 @@ def get_prompt(message: str, chat_history: list[tuple[str, str]],
for i in range(5):
prompt = get_prompt(args.prompt, [], system_prompt=DEFAULT_SYSTEM_PROMPT)
_input_ids = tokenizer.encode(prompt, return_tensors="pt")
print("-" * 20, "Input", "-" * 20)
print("input length:", len(_input_ids[0]))
print(prompt)
print("-" * 20, "Output", "-" * 20)
st = time.time()
output = model.generate(
_input_ids, num_beams=1, do_sample=False, max_new_tokens=args.n_predict
_input_ids, num_beams=1, do_sample=False, max_new_tokens=args.n_predict, streamer=streamer
)
end = time.time()
if args.disable_streaming:
output_str = tokenizer.decode(output[0], skip_special_tokens=False)
print(output_str)
print(f"Inference time: {end-st} s")
input_str = tokenizer.decode(_input_ids[0], skip_special_tokens=False)
print("-" * 20, "Input", "-" * 20)
print(input_str)
output_str = tokenizer.decode(output[0], skip_special_tokens=False)
print("-" * 20, "Output", "-" * 20)
print(output_str)

print("-" * 80)
print("done")
Expand Down
21 changes: 13 additions & 8 deletions python/llm/example/NPU/HF-Transformers-AutoModels/LLM/llama2.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import argparse

from ipex_llm.transformers.npu_model import AutoModelForCausalLM
from transformers import AutoTokenizer
from transformers import AutoTokenizer, TextStreamer

from transformers.utils import logging

Expand Down Expand Up @@ -56,6 +56,7 @@ def get_prompt(message: str, chat_history: list[tuple[str, str]],
parser.add_argument("--max-context-len", type=int, default=1024)
parser.add_argument("--max-prompt-len", type=int, default=512)
parser.add_argument("--disable-transpose-value-cache", action="store_true", default=False)
parser.add_argument("--disable-streaming", action="store_true", default=False)
parser.add_argument("--save-directory", type=str,
required=True,
help="The path of folder to save converted model, "
Expand Down Expand Up @@ -93,6 +94,10 @@ def get_prompt(message: str, chat_history: list[tuple[str, str]],
)
tokenizer = AutoTokenizer.from_pretrained(args.save_directory, trust_remote_code=True)

if args.disable_streaming:
streamer = None
else:
streamer = TextStreamer(tokenizer=tokenizer, skip_special_tokens=True)

DEFAULT_SYSTEM_PROMPT = """\
"""
Expand All @@ -104,19 +109,19 @@ def get_prompt(message: str, chat_history: list[tuple[str, str]],
for i in range(5):
prompt = get_prompt(args.prompt, [], system_prompt=DEFAULT_SYSTEM_PROMPT)
_input_ids = tokenizer.encode(prompt, return_tensors="pt")
print("-" * 20, "Input", "-" * 20)
print("input length:", len(_input_ids[0]))
print(prompt)
print("-" * 20, "Output", "-" * 20)
st = time.time()
output = model.generate(
_input_ids, num_beams=1, do_sample=False, max_new_tokens=args.n_predict
_input_ids, num_beams=1, do_sample=False, max_new_tokens=args.n_predict, streamer=streamer
)
end = time.time()
if args.disable_streaming:
output_str = tokenizer.decode(output[0], skip_special_tokens=False)
print(output_str)
print(f"Inference time: {end-st} s")
input_str = tokenizer.decode(_input_ids[0], skip_special_tokens=False)
print("-" * 20, "Input", "-" * 20)
print(input_str)
output_str = tokenizer.decode(output[0], skip_special_tokens=False)
print("-" * 20, "Output", "-" * 20)
print(output_str)

print("-" * 80)
print("done")
Expand Down
21 changes: 13 additions & 8 deletions python/llm/example/NPU/HF-Transformers-AutoModels/LLM/llama3.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import argparse

from ipex_llm.transformers.npu_model import AutoModelForCausalLM
from transformers import AutoTokenizer
from transformers import AutoTokenizer, TextStreamer

from transformers.utils import logging

Expand Down Expand Up @@ -57,6 +57,7 @@ def get_prompt(user_input: str, chat_history: list[tuple[str, str]],
parser.add_argument("--max-context-len", type=int, default=1024)
parser.add_argument("--max-prompt-len", type=int, default=512)
parser.add_argument("--disable-transpose-value-cache", action="store_true", default=False)
parser.add_argument("--disable-streaming", action="store_true", default=False)
parser.add_argument("--save-directory", type=str,
required=True,
help="The path of folder to save converted model, "
Expand Down Expand Up @@ -94,6 +95,10 @@ def get_prompt(user_input: str, chat_history: list[tuple[str, str]],
)
tokenizer = AutoTokenizer.from_pretrained(args.save_directory, trust_remote_code=True)

if args.disable_streaming:
streamer = None
else:
streamer = TextStreamer(tokenizer=tokenizer, skip_special_tokens=True)

DEFAULT_SYSTEM_PROMPT = """\
"""
Expand All @@ -105,19 +110,19 @@ def get_prompt(user_input: str, chat_history: list[tuple[str, str]],
for i in range(5):
prompt = get_prompt(args.prompt, [], system_prompt=DEFAULT_SYSTEM_PROMPT)
_input_ids = tokenizer.encode(prompt, return_tensors="pt")
print("-" * 20, "Input", "-" * 20)
print("input length:", len(_input_ids[0]))
print(prompt)
print("-" * 20, "Output", "-" * 20)
st = time.time()
output = model.generate(
_input_ids, num_beams=1, do_sample=False, max_new_tokens=args.n_predict
_input_ids, num_beams=1, do_sample=False, max_new_tokens=args.n_predict, streamer=streamer
)
end = time.time()
if args.disable_streaming:
output_str = tokenizer.decode(output[0], skip_special_tokens=False)
print(output_str)
print(f"Inference time: {end-st} s")
input_str = tokenizer.decode(_input_ids[0], skip_special_tokens=False)
print("-" * 20, "Input", "-" * 20)
print(input_str)
output_str = tokenizer.decode(output[0], skip_special_tokens=False)
print("-" * 20, "Output", "-" * 20)
print(output_str)

print("-" * 80)
print("done")
Expand Down
25 changes: 16 additions & 9 deletions python/llm/example/NPU/HF-Transformers-AutoModels/LLM/minicpm.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import argparse

from ipex_llm.transformers.npu_model import AutoModelForCausalLM
from transformers import AutoTokenizer
from transformers import AutoTokenizer, TextStreamer

from transformers.utils import logging

Expand All @@ -43,6 +43,7 @@
parser.add_argument("--max-context-len", type=int, default=1024)
parser.add_argument("--max-prompt-len", type=int, default=512)
parser.add_argument("--disable-transpose-value-cache", action="store_true", default=False)
parser.add_argument("--disable-streaming", action="store_true", default=False)
parser.add_argument("--save-directory", type=str,
required=True,
help="The path of folder to save converted model, "
Expand Down Expand Up @@ -80,26 +81,32 @@
)
tokenizer = AutoTokenizer.from_pretrained(args.save_directory, trust_remote_code=True)

if args.disable_streaming:
streamer = None
else:
streamer = TextStreamer(tokenizer=tokenizer, skip_special_tokens=True)

print("-" * 80)
print("done")
with torch.inference_mode():

print("finish to load")
for i in range(5):
_input_ids = tokenizer.encode("<用户>{}<AI>".format(args.prompt), return_tensors="pt")
prompt = "<用户>{}<AI>".format(args.prompt)
_input_ids = tokenizer.encode(prompt, return_tensors="pt")
print("-" * 20, "Input", "-" * 20)
print("input length:", len(_input_ids[0]))
print(prompt)
print("-" * 20, "Output", "-" * 20)
st = time.time()
output = model.generate(
_input_ids, num_beams=1, do_sample=False, max_new_tokens=args.n_predict
_input_ids, num_beams=1, do_sample=False, max_new_tokens=args.n_predict, streamer=streamer
)
end = time.time()
if args.disable_streaming:
output_str = tokenizer.decode(output[0], skip_special_tokens=False)
print(output_str)
print(f"Inference time: {end-st} s")
input_str = tokenizer.decode(_input_ids[0], skip_special_tokens=False)
print("-" * 20, "Input", "-" * 20)
print(input_str)
output_str = tokenizer.decode(output[0], skip_special_tokens=False)
print("-" * 20, "Output", "-" * 20)
print(output_str)

print("-" * 80)
print("done")
Expand Down
21 changes: 13 additions & 8 deletions python/llm/example/NPU/HF-Transformers-AutoModels/LLM/qwen.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import argparse

from ipex_llm.transformers.npu_model import AutoModelForCausalLM
from transformers import AutoTokenizer
from transformers import AutoTokenizer, TextStreamer

from transformers.utils import logging

Expand All @@ -45,6 +45,7 @@
parser.add_argument("--quantization_group_size", type=int, default=0)
parser.add_argument('--low-bit', type=str, default="sym_int4",
help='Load in low bit to use')
parser.add_argument("--disable-streaming", action="store_true", default=False)
parser.add_argument("--disable-transpose-value-cache", action="store_true", default=False)
parser.add_argument("--save-directory", type=str,
required=True,
Expand Down Expand Up @@ -84,6 +85,10 @@
)
tokenizer = AutoTokenizer.from_pretrained(args.save_directory, trust_remote_code=True)

if args.disable_streaming:
streamer = None
else:
streamer = TextStreamer(tokenizer=tokenizer, skip_special_tokens=True)

print("-" * 80)
print("done")
Expand All @@ -96,19 +101,19 @@
print("finish to load")
for i in range(3):
_input_ids = tokenizer([text], return_tensors="pt").input_ids
print("-" * 20, "Input", "-" * 20)
print("input length:", len(_input_ids[0]))
print(text)
print("-" * 20, "Output", "-" * 20)
st = time.time()
output = model.generate(
_input_ids, num_beams=1, do_sample=False, max_new_tokens=args.n_predict
_input_ids, num_beams=1, do_sample=False, max_new_tokens=args.n_predict, streamer=streamer
)
end = time.time()
if args.disable_streaming:
output_str = tokenizer.decode(output[0], skip_special_tokens=False)
print(output_str)
print(f"Inference time: {end-st} s")
input_str = tokenizer.decode(_input_ids[0], skip_special_tokens=False)
print("-" * 20, "Input", "-" * 20)
print(input_str)
output_str = tokenizer.decode(output[0], skip_special_tokens=False)
print("-" * 20, "Output", "-" * 20)
print(output_str)

print("-" * 80)
print("done")
Expand Down

0 comments on commit ffa9a9e

Please sign in to comment.