From 32beb0ae114bb5b7b67d5c5aaff5c5a0faf18d00 Mon Sep 17 00:00:00 2001 From: Shaojun Liu <61072813+liu-shaojun@users.noreply.github.com> Date: Fri, 8 Nov 2024 11:20:20 +0800 Subject: [PATCH 1/2] Update README.md --- docker/llm/serving/xpu/docker/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docker/llm/serving/xpu/docker/README.md b/docker/llm/serving/xpu/docker/README.md index fd5826fee35..9109707c3a9 100644 --- a/docker/llm/serving/xpu/docker/README.md +++ b/docker/llm/serving/xpu/docker/README.md @@ -102,7 +102,9 @@ To set up model serving using `IPEX-LLM` as backend using FastChat, you can refe # start controller python -m fastchat.serve.controller & - + + export USE_XETLA=OFF + export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=2 export TORCH_LLM_ALLREDUCE=0 export CCL_DG2_ALLREDUCE=1 From b5f60f0566d51f0491ac728c93f6b925ae79f3c5 Mon Sep 17 00:00:00 2001 From: Shaojun Liu <61072813+liu-shaojun@users.noreply.github.com> Date: Fri, 8 Nov 2024 11:30:31 +0800 Subject: [PATCH 2/2] Update vllm_docker_quickstart.md --- docs/mddocs/DockerGuides/vllm_docker_quickstart.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/mddocs/DockerGuides/vllm_docker_quickstart.md b/docs/mddocs/DockerGuides/vllm_docker_quickstart.md index 91ef0d34c70..5396ae242b9 100644 --- a/docs/mddocs/DockerGuides/vllm_docker_quickstart.md +++ b/docs/mddocs/DockerGuides/vllm_docker_quickstart.md @@ -831,7 +831,9 @@ We can set up model serving using `IPEX-LLM` as backend using FastChat, the foll # start controller python -m fastchat.serve.controller & - + + export USE_XETLA=OFF + export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=2 export TORCH_LLM_ALLREDUCE=0 export CCL_DG2_ALLREDUCE=1