From de4bb97b4ff9aa6ffeceb8c808016d10f59497c0 Mon Sep 17 00:00:00 2001 From: Qiyuan Gong Date: Mon, 17 Jun 2024 17:52:12 +0800 Subject: [PATCH] Remove accelerate 0.23.0 install command in readme and docker (#11333) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit *ipex-llm's accelerate has been upgraded to 0.23.0. Remove accelerate 0.23.0 install command in README and docker。 --- .github/workflows/llm_unit_tests.yml | 2 +- docker/llm/finetune/qlora/cpu/docker/Dockerfile | 1 - docker/llm/finetune/qlora/cpu/docker/Dockerfile.k8s | 1 - docker/llm/finetune/xpu/Dockerfile | 2 +- .../readthedocs/source/doc/LLM/Quickstart/axolotl_quickstart.md | 1 - python/llm/example/CPU/QLoRA-FineTuning/README.md | 1 - python/llm/example/CPU/QLoRA-FineTuning/alpaca-qlora/README.md | 1 - python/llm/example/GPU/LLM-Finetuning/DPO/README.md | 1 - python/llm/example/GPU/LLM-Finetuning/HF-PEFT/README.md | 1 - python/llm/example/GPU/LLM-Finetuning/LISA/README.md | 1 - python/llm/example/GPU/LLM-Finetuning/LoRA/README.md | 1 - python/llm/example/GPU/LLM-Finetuning/QA-LoRA/README.md | 1 - .../llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md | 1 - .../example/GPU/LLM-Finetuning/QLoRA/simple-example/README.md | 1 - .../llm/example/GPU/LLM-Finetuning/QLoRA/trl-example/README.md | 1 - python/llm/example/GPU/LLM-Finetuning/ReLora/README.md | 1 - python/llm/example/GPU/LLM-Finetuning/axolotl/README.md | 1 - 17 files changed, 2 insertions(+), 17 deletions(-) diff --git a/.github/workflows/llm_unit_tests.yml b/.github/workflows/llm_unit_tests.yml index f9ab43fd512..68bca06a415 100644 --- a/.github/workflows/llm_unit_tests.yml +++ b/.github/workflows/llm_unit_tests.yml @@ -381,7 +381,7 @@ jobs: shell: bash run: | python -m pip uninstall datasets -y - python -m pip install transformers==4.36.0 datasets peft==0.10.0 accelerate==0.23.0 + python -m pip install transformers==4.36.0 datasets peft==0.10.0 python -m pip install bitsandbytes scipy # Specific oneapi position on arc ut test machines if [[ "$RUNNER_OS" == "Linux" ]]; then diff --git a/docker/llm/finetune/qlora/cpu/docker/Dockerfile b/docker/llm/finetune/qlora/cpu/docker/Dockerfile index 6c908c7b792..b7181dcb062 100644 --- a/docker/llm/finetune/qlora/cpu/docker/Dockerfile +++ b/docker/llm/finetune/qlora/cpu/docker/Dockerfile @@ -50,7 +50,6 @@ RUN mkdir -p /ipex_llm/data && mkdir -p /ipex_llm/model && \ # install huggingface dependencies pip install datasets transformers==4.36.0 && \ pip install fire peft==0.10.0 && \ - pip install accelerate==0.23.0 && \ pip install bitsandbytes && \ # get qlora example code cd /ipex_llm && \ diff --git a/docker/llm/finetune/qlora/cpu/docker/Dockerfile.k8s b/docker/llm/finetune/qlora/cpu/docker/Dockerfile.k8s index 3c57c38f02f..7d310c286b1 100644 --- a/docker/llm/finetune/qlora/cpu/docker/Dockerfile.k8s +++ b/docker/llm/finetune/qlora/cpu/docker/Dockerfile.k8s @@ -63,7 +63,6 @@ RUN mkdir -p /ipex_llm/data && mkdir -p /ipex_llm/model && \ # install huggingface dependencies pip install datasets transformers==4.36.0 && \ pip install fire peft==0.10.0 && \ - pip install accelerate==0.23.0 && \ # install basic dependencies apt-get update && apt-get install -y curl wget gpg gpg-agent && \ # Install Intel oneAPI keys. diff --git a/docker/llm/finetune/xpu/Dockerfile b/docker/llm/finetune/xpu/Dockerfile index f1df4538b62..082b6ecb2f5 100644 --- a/docker/llm/finetune/xpu/Dockerfile +++ b/docker/llm/finetune/xpu/Dockerfile @@ -41,7 +41,7 @@ RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRO rm -rf IPEX-LLM && \ # install transformers & peft dependencies pip install transformers==4.36.0 && \ - pip install peft==0.10.0 datasets accelerate==0.23.0 && \ + pip install peft==0.10.0 datasets && \ pip install bitsandbytes scipy fire && \ # Prepare accelerate config mkdir -p /root/.cache/huggingface/accelerate && \ diff --git a/docs/readthedocs/source/doc/LLM/Quickstart/axolotl_quickstart.md b/docs/readthedocs/source/doc/LLM/Quickstart/axolotl_quickstart.md index 8c3a28e18dd..4a2cbb3a90b 100644 --- a/docs/readthedocs/source/doc/LLM/Quickstart/axolotl_quickstart.md +++ b/docs/readthedocs/source/doc/LLM/Quickstart/axolotl_quickstart.md @@ -216,7 +216,6 @@ pip install -e . # below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # install transformers etc -pip install accelerate==0.23.0 # to avoid https://github.com/OpenAccess-AI-Collective/axolotl/issues/1544 pip install datasets==2.15.0 pip install transformers==4.37.0 diff --git a/python/llm/example/CPU/QLoRA-FineTuning/README.md b/python/llm/example/CPU/QLoRA-FineTuning/README.md index 5744ebf2298..6b3b1bfb7e8 100644 --- a/python/llm/example/CPU/QLoRA-FineTuning/README.md +++ b/python/llm/example/CPU/QLoRA-FineTuning/README.md @@ -22,7 +22,6 @@ pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pyt pip install transformers==4.36.0 pip install peft==0.10.0 pip install datasets -pip install accelerate==0.23.0 pip install bitsandbytes scipy ``` diff --git a/python/llm/example/CPU/QLoRA-FineTuning/alpaca-qlora/README.md b/python/llm/example/CPU/QLoRA-FineTuning/alpaca-qlora/README.md index 441926720cf..899f7d8bd6c 100644 --- a/python/llm/example/CPU/QLoRA-FineTuning/alpaca-qlora/README.md +++ b/python/llm/example/CPU/QLoRA-FineTuning/alpaca-qlora/README.md @@ -10,7 +10,6 @@ conda activate llm pip install --pre --upgrade ipex-llm[all] pip install datasets transformers==4.36.0 pip install fire peft==0.10.0 -pip install accelerate==0.23.0 pip install bitsandbytes scipy ``` diff --git a/python/llm/example/GPU/LLM-Finetuning/DPO/README.md b/python/llm/example/GPU/LLM-Finetuning/DPO/README.md index 0eae68c64ae..4bd41efdcc5 100644 --- a/python/llm/example/GPU/LLM-Finetuning/DPO/README.md +++ b/python/llm/example/GPU/LLM-Finetuning/DPO/README.md @@ -19,7 +19,6 @@ conda activate llm pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ pip install transformers==4.36.0 datasets pip install trl peft==0.10.0 -pip install accelerate==0.23.0 pip install bitsandbytes ``` diff --git a/python/llm/example/GPU/LLM-Finetuning/HF-PEFT/README.md b/python/llm/example/GPU/LLM-Finetuning/HF-PEFT/README.md index fb2a628867d..bb2b9f0c6ec 100644 --- a/python/llm/example/GPU/LLM-Finetuning/HF-PEFT/README.md +++ b/python/llm/example/GPU/LLM-Finetuning/HF-PEFT/README.md @@ -17,7 +17,6 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte pip install transformers==4.36.0 datasets pip install fire peft==0.10.0 pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning -pip install accelerate==0.23.0 pip install bitsandbytes scipy ``` diff --git a/python/llm/example/GPU/LLM-Finetuning/LISA/README.md b/python/llm/example/GPU/LLM-Finetuning/LISA/README.md index dfc9f1b3ac9..c02fdd5f5cb 100644 --- a/python/llm/example/GPU/LLM-Finetuning/LISA/README.md +++ b/python/llm/example/GPU/LLM-Finetuning/LISA/README.md @@ -13,7 +13,6 @@ conda create -n llm python=3.11 conda activate llm # below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ -pip install accelerate==0.23.0 pip install bitsandbytes==0.43.0 pip install datasets==2.18.0 pip install --upgrade transformers==4.36.0 diff --git a/python/llm/example/GPU/LLM-Finetuning/LoRA/README.md b/python/llm/example/GPU/LLM-Finetuning/LoRA/README.md index 52e9aad457d..a4a500efc33 100644 --- a/python/llm/example/GPU/LLM-Finetuning/LoRA/README.md +++ b/python/llm/example/GPU/LLM-Finetuning/LoRA/README.md @@ -15,7 +15,6 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte pip install transformers==4.36.0 datasets pip install fire peft==0.10.0 pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning -pip install accelerate==0.23.0 pip install bitsandbytes scipy ``` diff --git a/python/llm/example/GPU/LLM-Finetuning/QA-LoRA/README.md b/python/llm/example/GPU/LLM-Finetuning/QA-LoRA/README.md index 511787f39b4..ca093283c11 100644 --- a/python/llm/example/GPU/LLM-Finetuning/QA-LoRA/README.md +++ b/python/llm/example/GPU/LLM-Finetuning/QA-LoRA/README.md @@ -15,7 +15,6 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte pip install transformers==4.36.0 datasets pip install fire peft==0.10.0 pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning -pip install accelerate==0.23.0 pip install bitsandbytes scipy ``` diff --git a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md index 06716764379..8fabd6941cb 100644 --- a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md +++ b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md @@ -18,7 +18,6 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte pip install transformers==4.36.0 datasets pip install fire peft==0.10.0 pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning -pip install accelerate==0.23.0 pip install bitsandbytes scipy # configures OneAPI environment variables source /opt/intel/oneapi/setvars.sh # necessary to run before installing deepspeed diff --git a/python/llm/example/GPU/LLM-Finetuning/QLoRA/simple-example/README.md b/python/llm/example/GPU/LLM-Finetuning/QLoRA/simple-example/README.md index 68741b3f536..05806233153 100644 --- a/python/llm/example/GPU/LLM-Finetuning/QLoRA/simple-example/README.md +++ b/python/llm/example/GPU/LLM-Finetuning/QLoRA/simple-example/README.md @@ -19,7 +19,6 @@ conda activate llm pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ pip install transformers==4.36.0 datasets pip install peft==0.10.0 -pip install accelerate==0.23.0 pip install bitsandbytes scipy ``` diff --git a/python/llm/example/GPU/LLM-Finetuning/QLoRA/trl-example/README.md b/python/llm/example/GPU/LLM-Finetuning/QLoRA/trl-example/README.md index 303856f02fc..99488aceeed 100644 --- a/python/llm/example/GPU/LLM-Finetuning/QLoRA/trl-example/README.md +++ b/python/llm/example/GPU/LLM-Finetuning/QLoRA/trl-example/README.md @@ -19,7 +19,6 @@ conda activate llm pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ pip install transformers==4.36.0 datasets pip install peft==0.10.0 -pip install accelerate==0.23.0 pip install bitsandbytes scipy trl ``` diff --git a/python/llm/example/GPU/LLM-Finetuning/ReLora/README.md b/python/llm/example/GPU/LLM-Finetuning/ReLora/README.md index 7cf0c53988b..8d94a7eea66 100644 --- a/python/llm/example/GPU/LLM-Finetuning/ReLora/README.md +++ b/python/llm/example/GPU/LLM-Finetuning/ReLora/README.md @@ -15,7 +15,6 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte pip install transformers==4.36.0 datasets pip install fire peft==0.10.0 pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning -pip install accelerate==0.23.0 pip install bitsandbytes scipy ``` diff --git a/python/llm/example/GPU/LLM-Finetuning/axolotl/README.md b/python/llm/example/GPU/LLM-Finetuning/axolotl/README.md index 7a019e7f01b..cedc087d5c3 100644 --- a/python/llm/example/GPU/LLM-Finetuning/axolotl/README.md +++ b/python/llm/example/GPU/LLM-Finetuning/axolotl/README.md @@ -132,7 +132,6 @@ pip install -e . # below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # install transformers etc -pip install accelerate==0.23.0 # to avoid https://github.com/OpenAccess-AI-Collective/axolotl/issues/1544 pip install datasets==2.15.0 pip install transformers==4.37.0