Skip to content
This repository has been archived by the owner on Oct 11, 2024. It is now read-only.

Commit

Permalink
[Core] Avoid the need to pass None values to Sequence.inputs (vll…
Browse files Browse the repository at this point in the history
  • Loading branch information
DarkLight1337 authored and robertgshaw2-neuralmagic committed Jun 8, 2024
1 parent f900bcc commit 6824b2f
Show file tree
Hide file tree
Showing 7 changed files with 6 additions and 19 deletions.
2 changes: 0 additions & 2 deletions tests/core/test_block_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,6 @@ def test_append_slot_cow():
inputs={
"prompt": "one two three",
"prompt_token_ids": [1, 2, 3],
"multi_modal_data": None
},
block_size=block_size)

Expand Down Expand Up @@ -525,7 +524,6 @@ def test_sliding_window_multi_seq():
inputs={
"prompt": "one two three",
"prompt_token_ids": [0, 1, 2],
"multi_modal_data": None
},
block_size=block_size)
seq_group = SequenceGroup(request_id="1",
Expand Down
7 changes: 1 addition & 6 deletions tests/core/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ def create_dummy_prompt(
inputs={
"prompt": prompt_str,
"prompt_token_ids": prompt_tokens,
"multi_modal_data": None,
},
block_size=block_size)
seq_group = SequenceGroup(request_id=request_id,
Expand Down Expand Up @@ -103,11 +102,7 @@ def create_seq_group(
for seq_id_offset, output_len in enumerate(seq_output_lens):
seq = Sequence(
seq_id=seq_id_start + seq_id_offset,
inputs={
"prompt": "",
"prompt_token_ids": prompt_token_ids,
"multi_modal_data": None,
},
inputs={"prompt_token_ids": prompt_token_ids},
block_size=16,
)

Expand Down
6 changes: 1 addition & 5 deletions tests/engine/output_processor/test_stop_checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,7 @@ def sequence_with_eos(text: str, eos_token: str,
"""
seq = Sequence(
seq_id=0,
inputs={
"prompt": "",
"prompt_token_ids": [],
"multi_modal_data": None,
},
inputs={"prompt_token_ids": []},
block_size=16,
eos_token_id=eos_token_id,
)
Expand Down
1 change: 0 additions & 1 deletion tests/test_cache_block_hashing.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,6 @@ def test_auto_prefix_caching(model: str, block_size: int, max_num_seqs: int,
inputs={
"prompt": prompt,
"prompt_token_ids": prompt_token_ids,
"multi_modal_data": None,
},
block_size=block_size,
eos_token_id=tokenizer.tokenizer.eos_token_id,
Expand Down
1 change: 0 additions & 1 deletion tests/tokenization/test_detokenize.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,6 @@ def create_sequence(prompt_token_ids=None):
inputs={
"prompt": "<s>",
"prompt_token_ids": prompt_token_ids,
"multi_modal_data": None,
},
block_size=16,
)
Expand Down
4 changes: 2 additions & 2 deletions vllm/inputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,5 +126,5 @@ class TextTokensPrompt(TypedDict):

class LLMInputs(TypedDict):
prompt_token_ids: List[int]
prompt: Optional[str]
multi_modal_data: Optional["MultiModalData"]
prompt: NotRequired[Optional[str]]
multi_modal_data: NotRequired[Optional["MultiModalData"]]
4 changes: 2 additions & 2 deletions vllm/sequence.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,15 +249,15 @@ def __init__(

@property
def prompt(self) -> Optional[str]:
return self.inputs["prompt"]
return self.inputs.get("prompt")

@property
def prompt_token_ids(self) -> List[int]:
return self.inputs["prompt_token_ids"]

@property
def multi_modal_data(self) -> Optional["MultiModalData"]:
return self.inputs["multi_modal_data"]
return self.inputs.get("multi_modal_data")

@property
def lora_int_id(self) -> int:
Expand Down

0 comments on commit 6824b2f

Please sign in to comment.