diff --git a/samples/cpp/chat_sample/README.md b/samples/cpp/chat_sample/README.md index a2eccb4d3d..3f736985c2 100644 --- a/samples/cpp/chat_sample/README.md +++ b/samples/cpp/chat_sample/README.md @@ -34,3 +34,11 @@ UnicodeEncodeError: 'charmap' codec can't encode character '\u25aa' in position If you encounter the error described in the example when sample is printing output to the Windows console, it is likely due to the default Windows encoding not supporting certain Unicode characters. To resolve this: 1. Enable Unicode characters for Windows cmd - open `Region` settings from `Control panel`. `Administrative`->`Change system locale`->`Beta: Use Unicode UTF-8 for worldwide language support`->`OK`. Reboot. 2. Enable UTF-8 mode by setting environment variable `PYTHONIOENCODING="utf8"`. + +#### Missing chat template + +If you encounter an exception indicating a missing "chat template" when launching the `ov::genai::LLMPipeline` in chat mode, it likely means the model was not tuned for chat functionality. To work this around, manually add the chat template to tokenizer_config.json of your model. +The following template can be used as a default, but it may not work properly with every model: +``` +"chat_template": "{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|im_start|>user\n' + message['content'] + '<|im_end|>\n<|im_start|>assistant\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|im_end|>\n'}}{% endif %}{% endfor %}", +``` diff --git a/samples/python/chat_sample/README.md b/samples/python/chat_sample/README.md index 983789d0eb..c07023391f 100644 --- a/samples/python/chat_sample/README.md +++ b/samples/python/chat_sample/README.md @@ -22,3 +22,13 @@ To enable Unicode characters for Windows cmd open `Region` settings from `Contro Discrete GPUs (dGPUs) usually provide better performance compared to CPUs. It is recommended to run larger models on a dGPU with 32GB+ RAM. For example, the model meta-llama/Llama-2-13b-chat-hf can benefit from being run on a dGPU. Modify the source code to change the device for inference to the GPU. See https://github.com/openvinotoolkit/openvino.genai/blob/master/src/README.md#supported-models for the list of supported models. + + +## Troubleshooting +### Missing chat template + +If you encounter an exception indicating a missing "chat template" when launching the `ov::genai::LLMPipeline` in chat mode, it likely means the model was not tuned for chat functionality. To work this around, manually add the chat template to tokenizer_config.json of your model. +The following template can be used as a default, but it may not work properly with every model: +``` +"chat_template": "{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|im_start|>user\n' + message['content'] + '<|im_end|>\n<|im_start|>assistant\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|im_end|>\n'}}{% endif %}{% endfor %}", +``` \ No newline at end of file diff --git a/src/cpp/src/block_manager.hpp b/src/cpp/src/block_manager.hpp index 3b1a663235..3e80217f14 100644 --- a/src/cpp/src/block_manager.hpp +++ b/src/cpp/src/block_manager.hpp @@ -277,7 +277,6 @@ class BlockManager { } phisical_blocks_released += released_count; } - phisical_blocks_released = phisical_blocks_released; return num_required_blocks <= phisical_blocks_released; } diff --git a/src/cpp/src/llm_pipeline.cpp b/src/cpp/src/llm_pipeline.cpp index 507d988a6a..1594dbd583 100644 --- a/src/cpp/src/llm_pipeline.cpp +++ b/src/cpp/src/llm_pipeline.cpp @@ -271,6 +271,7 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { m_history.push_back({{"role", "system"}, {"content", system_message}}); constexpr bool add_generation_prompt = false; + m_templated_chat_history = m_tokenizer.apply_chat_template(m_history, add_generation_prompt); } diff --git a/src/cpp/src/tokenizer.cpp b/src/cpp/src/tokenizer.cpp index b1e36033ee..748daa5875 100644 --- a/src/cpp/src/tokenizer.cpp +++ b/src/cpp/src/tokenizer.cpp @@ -368,6 +368,11 @@ class Tokenizer::TokenizerImpl { bool add_generation_prompt, const std::string& chat_template) const { auto chat_tpl = chat_template.empty() ? m_chat_template : chat_template; + OPENVINO_ASSERT(!chat_tpl.empty(), + "Chat template wasn't found. This may indicate that the model wasn't trained for chat scenario." + " Please add 'chat_template' to tokenizer_config.json to use the model in chat scenario." + " For more information see the section Troubleshooting in README.md"); + // Jinja2Cpp does not support slicing, e.g. [1:]. // In templates slicing is used typically in the header to find system prompt. // If header containts that typical expression we update template and @@ -433,8 +438,6 @@ class Tokenizer::TokenizerImpl { "For exmaple: user{user_prompt}model"); } } - - }; Tokenizer::Tokenizer(const std::string& tokenizer_path, const ov::AnyMap& plugin_config) { diff --git a/src/cpp/src/tokenizers_path.hpp b/src/cpp/src/tokenizers_path.hpp index d2c3ef3b5e..4899daccc4 100644 --- a/src/cpp/src/tokenizers_path.hpp +++ b/src/cpp/src/tokenizers_path.hpp @@ -86,7 +86,7 @@ std::filesystem::path tokenizers_relative_to_genai() { // was already defined. class ScopedVar { public: - bool was_already_set; + bool was_already_set{false}; static constexpr char ENVIRONMENT_VARIABLE_NAME[] = "OPENVINO_TOKENIZERS_PATH_GENAI"; explicit ScopedVar(const std::string& environment_variable_value) { #ifdef _WIN32