diff --git a/python/llm/example/NPU/HF-Transformers-AutoModels/LLM/Pipeline-Models/qwen2.py b/python/llm/example/NPU/HF-Transformers-AutoModels/LLM/Pipeline-Models/qwen2.py
index 6ca973c69e75..9c47f2715241 100644
--- a/python/llm/example/NPU/HF-Transformers-AutoModels/LLM/Pipeline-Models/qwen2.py
+++ b/python/llm/example/NPU/HF-Transformers-AutoModels/LLM/Pipeline-Models/qwen2.py
@@ -24,19 +24,6 @@
logger = logging.get_logger(__name__)
-def get_prompt(message: str, chat_history: list[tuple[str, str]],
- system_prompt: str) -> str:
- texts = [f'[INST] <>\n{system_prompt}\n<>\n\n']
- # The first user input is _not_ stripped
- do_strip = False
- for user_input, response in chat_history:
- user_input = user_input.strip() if do_strip else user_input
- do_strip = True
- texts.append(f'{user_input} [/INST] {response.strip()} [INST] ')
- message = message.strip() if do_strip else message
- texts.append(f'{message} [/INST]')
- return ''.join(texts)
-
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Predict Tokens using `generate()` API for npu model"
@@ -48,7 +35,7 @@ def get_prompt(message: str, chat_history: list[tuple[str, str]],
help="The huggingface repo id for the Baichuan2 model to be downloaded"
", or the path to the huggingface checkpoint folder",
)
- parser.add_argument('--prompt', type=str, default="What is AI?",
+ parser.add_argument('--prompt', type=str, default="AI是什么?",
help='Prompt to infer')
parser.add_argument("--n-predict", type=int, default=32, help="Max tokens to predict")
parser.add_argument("--max-context-len", type=int, default=1024)