From e0889266d9f79f0c55fa9d077504984b7d9bf3a3 Mon Sep 17 00:00:00 2001 From: Heyang Sun <60865256+Uxito-Ada@users.noreply.github.com> Date: Fri, 28 Jun 2024 17:30:52 +0800 Subject: [PATCH] refine format --- .../LoRA/chatglm_finetune/lora_finetune_chatglm.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/llm/example/GPU/LLM-Finetuning/LoRA/chatglm_finetune/lora_finetune_chatglm.py b/python/llm/example/GPU/LLM-Finetuning/LoRA/chatglm_finetune/lora_finetune_chatglm.py index ea1afd52f89..dfb0314a725 100644 --- a/python/llm/example/GPU/LLM-Finetuning/LoRA/chatglm_finetune/lora_finetune_chatglm.py +++ b/python/llm/example/GPU/LLM-Finetuning/LoRA/chatglm_finetune/lora_finetune_chatglm.py @@ -25,13 +25,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# This example is ported from https://github.com/THUDM/ChatGLM3/blob/main/finetune_demo/finetune_hf.py -# L33, L34, L417, L474 and L544-L546 are modified to enable the example on Intel Arc # Below 2 lines different from the original example, where transformers are patched with IPEX LLM from ipex_llm import llm_patch llm_patch(train=True) + +# This below example is ported from https://github.com/THUDM/ChatGLM3/blob/main/finetune_demo/finetune_hf.py +# L417, L474 and L544-L546 are modified to enable the example on Intel Arc import os import jieba import dataclasses as dc