From 6a5ab372732af5b2e16973e03eac92ef41fb0faf Mon Sep 17 00:00:00 2001 From: Roger Wang <136131678+ywang96@users.noreply.github.com> Date: Wed, 18 Sep 2024 19:14:03 -0700 Subject: [PATCH] Revert "[Misc][Bugfix] Disable guided decoding for mistral tokenizer (#8521)" This reverts commit ee2bceaaa67bd2f420f62a924da5834a7c1c862b. --- .../guided_decoding/__init__.py | 23 ------------------- 1 file changed, 23 deletions(-) diff --git a/vllm/model_executor/guided_decoding/__init__.py b/vllm/model_executor/guided_decoding/__init__.py index f4fe8a7307c04..7161e83952a3d 100644 --- a/vllm/model_executor/guided_decoding/__init__.py +++ b/vllm/model_executor/guided_decoding/__init__.py @@ -6,7 +6,6 @@ from vllm.model_executor.guided_decoding.guided_fields import ( GuidedDecodingRequest) from vllm.sampling_params import LogitsProcessor -from vllm.transformers_utils.tokenizer import MistralTokenizer async def get_guided_decoding_logits_processor( @@ -16,23 +15,12 @@ async def get_guided_decoding_logits_processor( request = _adapt_request_for_tool_use(request) if guided_decoding_backend == 'outlines': - if isinstance(tokenizer, MistralTokenizer): - raise NotImplementedError( - "Guided decoding with 'outlines' is currently not supported " - "for Mistral tokenizer. Please consider contributing to the " - "'outlines' project if you are interested in this feature.") # NOTE: lazy import outlines to avoid https://github.com/vllm-project/vllm/issues/4193 from vllm.model_executor.guided_decoding.outlines_decoding import ( # noqa get_outlines_guided_decoding_logits_processor) return await get_outlines_guided_decoding_logits_processor( request, tokenizer) if guided_decoding_backend == 'lm-format-enforcer': - if isinstance(tokenizer, MistralTokenizer): - raise NotImplementedError( - "Guided decoding with 'lm-format-enforcer' is currently not " - "supported for Mistral tokenizer. Please consider contributing " - "to the 'lm-format-enforcer' project if you are interested " - "in this feature.") from vllm.model_executor.guided_decoding.lm_format_enforcer_decoding import ( # noqa get_lm_format_enforcer_guided_decoding_logits_processor) return await get_lm_format_enforcer_guided_decoding_logits_processor( @@ -49,23 +37,12 @@ def get_local_guided_decoding_logits_processor( # request = _adapt_request_for_tool_use(request) if guided_decoding_backend == 'outlines': - if isinstance(tokenizer, MistralTokenizer): - raise NotImplementedError( - "Guided decoding with 'outlines' is currently not supported " - "for Mistral tokenizer. Please consider contributing to the " - "'outlines' project if you are interested in this feature.") # NOTE: lazy import outlines to avoid https://github.com/vllm-project/vllm/issues/4193 from vllm.model_executor.guided_decoding.outlines_decoding import ( # noqa get_local_outlines_guided_decoding_logits_processor) return get_local_outlines_guided_decoding_logits_processor( guided_options, tokenizer) if guided_decoding_backend == 'lm-format-enforcer': - if isinstance(tokenizer, MistralTokenizer): - raise NotImplementedError( - "Guided decoding with 'lm-format-enforcer' is currently not " - "supported for Mistral tokenizer. Please consider contributing " - "to the 'lm-format-enforcer' project if you are interested " - "in this feature.") from vllm.model_executor.guided_decoding.lm_format_enforcer_decoding import ( # noqa get_local_lm_format_enforcer_guided_decoding_logits_processor) return get_local_lm_format_enforcer_guided_decoding_logits_processor(