From b64d9c6c7197549ca4991e9eb960bdc6a800c434 Mon Sep 17 00:00:00 2001 From: Roger Wang Date: Tue, 23 Jul 2024 21:12:17 -0700 Subject: [PATCH] fix --- vllm/model_executor/models/chameleon.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/models/chameleon.py b/vllm/model_executor/models/chameleon.py index 6ece95495a026..7659f598bab94 100644 --- a/vllm/model_executor/models/chameleon.py +++ b/vllm/model_executor/models/chameleon.py @@ -125,7 +125,8 @@ def input_processor_for_chameleon(ctx: InputContext, llm_inputs: LLMInputs): # Appending sep token for chat mode to follow default processor # behavior - new_prompt += tokenizer.sep_token + if new_prompt is not None: + new_prompt += tokenizer.sep_token new_token_ids += [CHAMELEON_SEP_TOKEN_ID] # NOTE: Create a defensive copy of the original inputs