-
-
Notifications
You must be signed in to change notification settings - Fork 5.1k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Loading status checks…
Add detokenization test
1 parent
9627425
commit fedea30
Showing
1 changed file
with
32 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,32 @@ | ||
import pytest | ||
|
||
from vllm.entrypoints.llm import LLM | ||
from vllm.sampling_params import SamplingParams | ||
|
||
|
||
@pytest.mark.parametrize("model", ["facebook/opt-125m"]) | ||
def test_computed_prefix_blocks(model: str): | ||
# This test checks if the engine generates completions both with and | ||
# without optional detokenization, that detokenization includes text | ||
# and no-detokenization doesn't, and that both completions have the same | ||
# token_ids. | ||
prompt = ( | ||
"You are a helpful assistant. How do I build a car from cardboard and " | ||
"paper clips? Is there an easy to follow video tutorial available " | ||
"online for free?") | ||
|
||
llm = LLM(model=model) | ||
sampling_params = SamplingParams(max_tokens=10, | ||
temperature=0.0, | ||
detokenize=False) | ||
|
||
outputs_no_detokenization = llm.generate(prompt, | ||
sampling_params)[0].outputs[0] | ||
sampling_params.detokenize = True | ||
outputs_with_detokenization = llm.generate(prompt, | ||
sampling_params)[0].outputs[0] | ||
|
||
assert outputs_no_detokenization.text == '' | ||
assert outputs_with_detokenization.text != '' | ||
assert outputs_no_detokenization.token_ids == \ | ||
outputs_with_detokenization.token_ids |