From 92dfed77bed0bff4828de811d259f03ea0a2bde3 Mon Sep 17 00:00:00 2001 From: binbin Deng <108676127+plusbang@users.noreply.github.com> Date: Thu, 28 Mar 2024 09:35:48 +0800 Subject: [PATCH] LLM: fix abnormal output of fp16 deepspeed autotp (#10558) --- python/llm/src/ipex_llm/transformers/low_bit_linear.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/python/llm/src/ipex_llm/transformers/low_bit_linear.py b/python/llm/src/ipex_llm/transformers/low_bit_linear.py index f59724ba229..35093106f52 100644 --- a/python/llm/src/ipex_llm/transformers/low_bit_linear.py +++ b/python/llm/src/ipex_llm/transformers/low_bit_linear.py @@ -702,12 +702,16 @@ def forward(self, x: torch.Tensor): if self.weight_type == 2: self.weight = self.weight.transpose(0, 1).contiguous() self.weight_type = 1 - return F.linear(x, self.weight, self.bias) + result = F.linear(x, self.weight, self.bias) else: if self.weight_type == 1: self.weight = self.weight.transpose(0, 1).contiguous() self.weight_type = 2 - return torch.ops.torch_ipex.matmul_bias_out(x, self.weight, self.bias) + result = torch.ops.torch_ipex.matmul_bias_out(x, self.weight, self.bias) + if self.mp_group is not None: + from deepspeed import comm as dist + dist.inference_all_reduce(result, group=self.mp_group) + return result else: if self.in_len == 4096 and self.weight_type != 3 or \ self.in_len == 11008 and self.weight_type != 1: