Skip to content

Commit

Permalink
LLM: fix abnormal output of fp16 deepspeed autotp (#10558)
Browse files Browse the repository at this point in the history
  • Loading branch information
plusbang authored Mar 28, 2024
1 parent e619142 commit 92dfed7
Showing 1 changed file with 6 additions and 2 deletions.
8 changes: 6 additions & 2 deletions python/llm/src/ipex_llm/transformers/low_bit_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -702,12 +702,16 @@ def forward(self, x: torch.Tensor):
if self.weight_type == 2:
self.weight = self.weight.transpose(0, 1).contiguous()
self.weight_type = 1
return F.linear(x, self.weight, self.bias)
result = F.linear(x, self.weight, self.bias)
else:
if self.weight_type == 1:
self.weight = self.weight.transpose(0, 1).contiguous()
self.weight_type = 2
return torch.ops.torch_ipex.matmul_bias_out(x, self.weight, self.bias)
result = torch.ops.torch_ipex.matmul_bias_out(x, self.weight, self.bias)
if self.mp_group is not None:
from deepspeed import comm as dist
dist.inference_all_reduce(result, group=self.mp_group)
return result
else:
if self.in_len == 4096 and self.weight_type != 3 or \
self.in_len == 11008 and self.weight_type != 1:
Expand Down

0 comments on commit 92dfed7

Please sign in to comment.