From 0a95c556a1c8206b042eea84d6150e00402d6ade Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Tue, 2 Apr 2024 09:21:38 +0800 Subject: [PATCH] Fix starcoder first token perf (#10612) * add bias check * update --- python/llm/src/ipex_llm/transformers/low_bit_linear.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/llm/src/ipex_llm/transformers/low_bit_linear.py b/python/llm/src/ipex_llm/transformers/low_bit_linear.py index 91b8cda6583..94a163690f0 100644 --- a/python/llm/src/ipex_llm/transformers/low_bit_linear.py +++ b/python/llm/src/ipex_llm/transformers/low_bit_linear.py @@ -538,7 +538,7 @@ def __init__(self, input_features, output_features, qtype, bias=True, # The condition makes sure that empty cache only takes effect if this layer is lm_head. # For other models like llama, lm_cache will be applied as well # since performance isn't impacted. - self.is_lm_head = self.in_len * self.out_len >= 30000 * 4096 + self.is_lm_head = self.in_len * self.out_len >= 32000 * 4096 and self.bias is None self.low_memory_mode = self.is_lm_head def forward(self, x: torch.Tensor):