diff --git a/python/llm/src/ipex_llm/transformers/npu_models/convert.py b/python/llm/src/ipex_llm/transformers/npu_models/convert.py index 4aa45b0054ae..2842799b1605 100644 --- a/python/llm/src/ipex_llm/transformers/npu_models/convert.py +++ b/python/llm/src/ipex_llm/transformers/npu_models/convert.py @@ -93,6 +93,10 @@ def replace_with_QuantizedLinear(layer, qtype, device, modules_to_not_convert, if (layer.in_features == 3584 and layer.out_features == 152064): qtype = "sym_int8_rtn" iqtype = ggml_tensor_qtype[qtype] + if qtype == "sym_int4_rtn": + if (layer.in_features == 18944 and layer.out_features == 3584): + qtype = "sym_int8_rtn" + iqtype = ggml_tensor_qtype[qtype] enable_scale_search = os.environ.get("IPEX_LLM_NPU_QUANTIZATION_OPT", "0") != "0" qweights, scale = ggml_convert_qtype(layer.weight.data.to(torch.float32), iqtype, device=device,