From e835ce508e90cd2ea70951250dc346587672c9fb Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Fri, 30 Aug 2024 05:43:33 -0700 Subject: [PATCH] fix style --- python/llm/src/ipex_llm/transformers/npu_models/qwen2_mp.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/python/llm/src/ipex_llm/transformers/npu_models/qwen2_mp.py b/python/llm/src/ipex_llm/transformers/npu_models/qwen2_mp.py index 2022f0e38226..de27a9b2fbfd 100644 --- a/python/llm/src/ipex_llm/transformers/npu_models/qwen2_mp.py +++ b/python/llm/src/ipex_llm/transformers/npu_models/qwen2_mp.py @@ -235,8 +235,10 @@ def mlp(self, hidden_states, seq_len): # for qwen2-7b mm1_0 = self.slice(mm1, begin=[0, 0, 0], end=[1, seq_len, 9472]) mm1_1 = self.slice(mm1, begin=[0, 0, 9472], end=[1, seq_len, 18944]) - hidden_states_0 = self.linear(mm1_0, self.hidden_size, 9472, bias=False, wt_dtype=self.dtype) - hidden_states_1 = self.linear(mm1_1, self.hidden_size, 9472, bias=False, wt_dtype=self.dtype) + hidden_states_0 = self.linear(mm1_0, self.hidden_size, 9472, + bias=False, wt_dtype=self.dtype) + hidden_states_1 = self.linear(mm1_1, self.hidden_size, 9472, + bias=False, wt_dtype=self.dtype) hidden_states = hidden_states_0 + hidden_states_1 else: hidden_states = self.linear(