From 743c7d71b40718b356ea32bea80e805c99a1431f Mon Sep 17 00:00:00 2001 From: Shaojun Liu <61072813+liu-shaojun@users.noreply.github.com> Date: Thu, 8 Aug 2024 14:04:26 +0800 Subject: [PATCH] enable inference mode for deepspeed tp serving --- python/llm/example/GPU/Deepspeed-AutoTP-FastAPI/serving.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/python/llm/example/GPU/Deepspeed-AutoTP-FastAPI/serving.py b/python/llm/example/GPU/Deepspeed-AutoTP-FastAPI/serving.py index 23de5fa1acc..3d8d4ca9ea3 100644 --- a/python/llm/example/GPU/Deepspeed-AutoTP-FastAPI/serving.py +++ b/python/llm/example/GPU/Deepspeed-AutoTP-FastAPI/serving.py @@ -116,11 +116,13 @@ def load_model(model_path, low_bit): # Use IPEX-LLM `optimize_model` to convert the model into optimized low bit format # Convert the rest of the model into float16 to reduce allreduce traffic model = optimize_model(model.module.to(f"cpu"), low_bit=low_bit).to(torch.float16) - + # Next, use XPU as accelerator to speed up inference current_accel = XPU_Accelerator() set_accelerator(current_accel) + model=model.eval() + # Move model back to xpu model = model.to(f"xpu:{local_rank}") model = BenchmarkWrapper(model)