Skip to content

Commit

Permalink
lint
Browse files Browse the repository at this point in the history
  • Loading branch information
charlifu committed Nov 14, 2024
1 parent a7e9918 commit 7e8afeb
Showing 1 changed file with 2 additions and 1 deletion.
3 changes: 2 additions & 1 deletion vllm/model_executor/layers/quantization/fp8.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,8 @@ def process_weights_after_loading(self, layer: Module) -> None:
if envs.VLLM_FP8_PADDING and weight.stride(-1) == 1 \
and (weight.stride(-2) * weight.element_size()) % 512 == 0:
num_pad = 256 // weight.element_size()
weight = F.pad(weight, (0, num_pad), "constant", 0)[..., :-num_pad]
weight = F.pad(weight, (0, num_pad), "constant",
0)[..., :-num_pad]
torch.cuda.empty_cache()

# Update layer with new values.
Expand Down

0 comments on commit 7e8afeb

Please sign in to comment.