From 8974872ef5e59cbfae9b88c7c4b92ed202829a1d Mon Sep 17 00:00:00 2001 From: Cody Yu Date: Thu, 27 Jun 2024 15:08:20 -0700 Subject: [PATCH] guard --- vllm/model_executor/layers/fused_moe/fused_moe.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/vllm/model_executor/layers/fused_moe/fused_moe.py b/vllm/model_executor/layers/fused_moe/fused_moe.py index 4d0160ff296a0..676ff748fd979 100644 --- a/vllm/model_executor/layers/fused_moe/fused_moe.py +++ b/vllm/model_executor/layers/fused_moe/fused_moe.py @@ -392,6 +392,11 @@ def fused_experts(hidden_states: torch.Tensor, M, _ = hidden_states.shape E, N, _ = w1.shape + if M > 65536: + # https://github.com/vllm-project/vllm/issues/5938 + raise ValueError("MoE kernel does not support more than 65536 tokens, " + f"but got {M}") + if override_config: config = override_config else: