diff --git a/python/llm/src/ipex_llm/transformers/low_bit_linear.py b/python/llm/src/ipex_llm/transformers/low_bit_linear.py index 91b8cda6583..94a163690f0 100644 --- a/python/llm/src/ipex_llm/transformers/low_bit_linear.py +++ b/python/llm/src/ipex_llm/transformers/low_bit_linear.py @@ -538,7 +538,7 @@ def __init__(self, input_features, output_features, qtype, bias=True, # The condition makes sure that empty cache only takes effect if this layer is lm_head. # For other models like llama, lm_cache will be applied as well # since performance isn't impacted. - self.is_lm_head = self.in_len * self.out_len >= 30000 * 4096 + self.is_lm_head = self.in_len * self.out_len >= 32000 * 4096 and self.bias is None self.low_memory_mode = self.is_lm_head def forward(self, x: torch.Tensor):