From 42281319e90523d646c47692a43bdcc6b78ecb49 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Tue, 30 Jul 2024 20:20:31 +0200 Subject: [PATCH] Fix to throw exception in case of empty chat template in chat scenario (#697) --- samples/cpp/chat_sample/README.md | 8 ++++++++ samples/python/chat_sample/README.md | 10 ++++++++++ src/cpp/src/llm_pipeline.cpp | 1 + src/cpp/src/tokenizer.cpp | 7 +++++-- 4 files changed, 24 insertions(+), 2 deletions(-) diff --git a/samples/cpp/chat_sample/README.md b/samples/cpp/chat_sample/README.md index a2eccb4d3d..3f736985c2 100644 --- a/samples/cpp/chat_sample/README.md +++ b/samples/cpp/chat_sample/README.md @@ -34,3 +34,11 @@ UnicodeEncodeError: 'charmap' codec can't encode character '\u25aa' in position If you encounter the error described in the example when sample is printing output to the Windows console, it is likely due to the default Windows encoding not supporting certain Unicode characters. To resolve this: 1. Enable Unicode characters for Windows cmd - open `Region` settings from `Control panel`. `Administrative`->`Change system locale`->`Beta: Use Unicode UTF-8 for worldwide language support`->`OK`. Reboot. 2. Enable UTF-8 mode by setting environment variable `PYTHONIOENCODING="utf8"`. + +#### Missing chat template + +If you encounter an exception indicating a missing "chat template" when launching the `ov::genai::LLMPipeline` in chat mode, it likely means the model was not tuned for chat functionality. To work this around, manually add the chat template to tokenizer_config.json of your model. +The following template can be used as a default, but it may not work properly with every model: +``` +"chat_template": "{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|im_start|>user\n' + message['content'] + '<|im_end|>\n<|im_start|>assistant\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|im_end|>\n'}}{% endif %}{% endfor %}", +``` diff --git a/samples/python/chat_sample/README.md b/samples/python/chat_sample/README.md index 983789d0eb..c07023391f 100644 --- a/samples/python/chat_sample/README.md +++ b/samples/python/chat_sample/README.md @@ -22,3 +22,13 @@ To enable Unicode characters for Windows cmd open `Region` settings from `Contro Discrete GPUs (dGPUs) usually provide better performance compared to CPUs. It is recommended to run larger models on a dGPU with 32GB+ RAM. For example, the model meta-llama/Llama-2-13b-chat-hf can benefit from being run on a dGPU. Modify the source code to change the device for inference to the GPU. See https://github.com/openvinotoolkit/openvino.genai/blob/master/src/README.md#supported-models for the list of supported models. + + +## Troubleshooting +### Missing chat template + +If you encounter an exception indicating a missing "chat template" when launching the `ov::genai::LLMPipeline` in chat mode, it likely means the model was not tuned for chat functionality. To work this around, manually add the chat template to tokenizer_config.json of your model. +The following template can be used as a default, but it may not work properly with every model: +``` +"chat_template": "{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|im_start|>user\n' + message['content'] + '<|im_end|>\n<|im_start|>assistant\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|im_end|>\n'}}{% endif %}{% endfor %}", +``` \ No newline at end of file diff --git a/src/cpp/src/llm_pipeline.cpp b/src/cpp/src/llm_pipeline.cpp index 507d988a6a..1594dbd583 100644 --- a/src/cpp/src/llm_pipeline.cpp +++ b/src/cpp/src/llm_pipeline.cpp @@ -271,6 +271,7 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { m_history.push_back({{"role", "system"}, {"content", system_message}}); constexpr bool add_generation_prompt = false; + m_templated_chat_history = m_tokenizer.apply_chat_template(m_history, add_generation_prompt); } diff --git a/src/cpp/src/tokenizer.cpp b/src/cpp/src/tokenizer.cpp index b1e36033ee..748daa5875 100644 --- a/src/cpp/src/tokenizer.cpp +++ b/src/cpp/src/tokenizer.cpp @@ -368,6 +368,11 @@ class Tokenizer::TokenizerImpl { bool add_generation_prompt, const std::string& chat_template) const { auto chat_tpl = chat_template.empty() ? m_chat_template : chat_template; + OPENVINO_ASSERT(!chat_tpl.empty(), + "Chat template wasn't found. This may indicate that the model wasn't trained for chat scenario." + " Please add 'chat_template' to tokenizer_config.json to use the model in chat scenario." + " For more information see the section Troubleshooting in README.md"); + // Jinja2Cpp does not support slicing, e.g. [1:]. // In templates slicing is used typically in the header to find system prompt. // If header containts that typical expression we update template and @@ -433,8 +438,6 @@ class Tokenizer::TokenizerImpl { "For exmaple: user{user_prompt}model"); } } - - }; Tokenizer::Tokenizer(const std::string& tokenizer_path, const ov::AnyMap& plugin_config) {