Skip to content

Commit

Permalink
Update pip install to use --extra-index-url for ipex package (#10557)
Browse files Browse the repository at this point in the history
* Change to 'pip install .. --extra-index-url' for readthedocs

* Change to 'pip install .. --extra-index-url' for examples

* Change to 'pip install .. --extra-index-url' for remaining files

* Fix URL for ipex

* Add links for ipex US and CN servers

* Update ipex cpu url

* remove readme

* Update for github actions

* Update for dockerfiles
  • Loading branch information
chtanch authored Mar 28, 2024
1 parent 92dfed7 commit 1c5eb14
Show file tree
Hide file tree
Showing 116 changed files with 297 additions and 228 deletions.
4 changes: 2 additions & 2 deletions .github/actions/llm/setup-llm-env/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,10 @@ runs:
fi
whl_name=$(ls python/llm/dist)
if [[ ${{ inputs.extra-dependency }} == 'xpu_2.0' ]]; then
pip install --upgrade --pre -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[xpu_2.0]" -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --upgrade --pre -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[xpu_2.0]" --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
pip install pytest expecttest
elif [[ ${{ inputs.extra-dependency }} == 'xpu_2.1' ]]; then
pip install --upgrade --pre -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[xpu_2.1]" -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --upgrade --pre -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[xpu_2.1]" --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
pip install pytest expecttest
else
pip install --upgrade --pre -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[all]"
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/llm-whisper-evaluation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ jobs:
# - name: Install IPEX-LLM from Pypi
# shell: bash
# run: |
# pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
# pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/

# - name: Test installed xpu version
# shell: bash
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/llm_performance_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ jobs:
- name: Install IPEX-LLM from Pypi
shell: bash
run: |
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
test_version_date=`date -d 'yesterday' '+%Y%m%d'`
if ! pip show ipex-llm | grep $test_version_date; then
echo "Did not install ipex-llm with excepted version $test_version_date"
Expand Down Expand Up @@ -198,7 +198,7 @@ jobs:
- name: Install IPEX-LLM from Pypi
shell: bash
run: |
pip install --pre --upgrade ipex-llm[all] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[all] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
test_version_date=`date -d 'yesterday' '+%Y%m%d'`
if ! pip show ipex-llm | grep $test_version_date; then
echo "Did not install ipex-llm with excepted version $test_version_date"
Expand Down Expand Up @@ -272,7 +272,7 @@ jobs:
- name: Install IPEX-LLM from Pypi
shell: bash
run: |
pip install --pre --upgrade ipex-llm[all] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[all] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
test_version_date=`date -d 'yesterday' '+%Y%m%d'`
if ! pip show ipex-llm | grep $test_version_date; then
echo "Did not install ipex-llm with excepted version $test_version_date"
Expand Down Expand Up @@ -344,7 +344,7 @@ jobs:
# if not exist dist\ipex_llm*.whl (exit /b 1)
# for %%i in (dist\ipex_llm*.whl) do set whl_name=%%i

# pip install --pre --upgrade %whl_name%[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
# pip install --pre --upgrade %whl_name%[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
# if %ERRORLEVEL% neq 0 (exit /b 1)
# pip list

Expand All @@ -367,7 +367,7 @@ jobs:
pip install --upgrade omegaconf pandas
pip install --upgrade tiktoken einops transformers_stream_generator
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
pip show ipex-llm | findstr %TEST_VERSION_DATE%
if %ERRORLEVEL% neq 0 (
echo "Did not install ipex-llm with excepted version %TEST_VERSION_DATE%"
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/llm_unit_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -392,10 +392,10 @@ jobs:
pip install llama-index-readers-file llama-index-vector-stores-postgres llama-index-embeddings-huggingface
# Specific oneapi position on arc ut test machines
if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
source /opt/intel/oneapi/setvars.sh
elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then
pip install --pre --upgrade ipex-llm[xpu_2.0] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu_2.0] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
source /home/arda/intel/oneapi/setvars.sh
fi
bash python/llm/test/run-llm-llamaindex-tests-gpu.sh
2 changes: 1 addition & 1 deletion docker/llm/finetune/qlora/xpu/docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ RUN curl -fsSL https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-P
apt-get install -y python3-pip python3.9-dev python3-wheel python3.9-distutils && \
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && \
# install XPU ipex-llm
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu && \
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ && \
# install huggingface dependencies
pip install git+https://github.com/huggingface/transformers.git@${TRANSFORMERS_COMMIT_ID} && \
pip install peft==0.5.0 datasets accelerate==0.23.0 && \
Expand Down
2 changes: 1 addition & 1 deletion docker/llm/inference/xpu/docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ RUN curl -fsSL https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-P
python3 get-pip.py && \
rm get-pip.py && \
pip install --upgrade requests argparse urllib3 && \
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu && \
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ && \
# Fix Trivy CVE Issues
pip install transformers==4.36.2 && \
pip install transformers_stream_generator einops tiktoken && \
Expand Down
2 changes: 1 addition & 1 deletion docs/readthedocs/source/doc/LLM/Overview/FAQ/faq.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ Please also refer to [here](https://github.com/intel-analytics/ipex-llm?tab=read

## How to Resolve Errors

### Fail to install `ipex-llm` through `pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu`
### Fail to install `ipex-llm` through `pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/` or `pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/`

You could try to install IPEX-LLM dependencies for Intel XPU from source archives:
- For Windows system, refer to [here](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Overview/install_gpu.html#install-ipex-llm-from-wheel) for the steps.
Expand Down
83 changes: 66 additions & 17 deletions docs/readthedocs/source/doc/LLM/Overview/install_gpu.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,13 +52,28 @@ We recommend using [miniconda](https://docs.conda.io/en/latest/miniconda.html) t
``ipex-llm`` is tested with Python 3.9, 3.10 and 3.11. Python 3.9 is recommended for best practices.
```

The easiest ways to install `ipex-llm` is the following commands:
The easiest ways to install `ipex-llm` is the following commands,
choosing either US or CN website for `extra-index-url`:

```
conda create -n llm python=3.9 libuv
conda activate llm
```eval_rst
.. tabs::
.. tab:: US
.. code-block:: cmd
conda create -n llm python=3.9 libuv
conda activate llm
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
.. tab:: CN
.. code-block:: cmd
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
conda create -n llm python=3.9 libuv
conda activate llm
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
```

### Install IPEX-LLM From Wheel
Expand Down Expand Up @@ -396,31 +411,65 @@ We recommend using [miniconda](https://docs.conda.io/en/latest/miniconda.html) t
```eval_rst
.. tabs::
.. tab:: PyTorch 2.1
Choose either US or CN website for `extra-index-url`:
.. code-block:: bash
.. tabs::
.. tab:: US
conda create -n llm python=3.9
conda activate llm
.. code-block:: bash
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
conda create -n llm python=3.9
conda activate llm
.. note::
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
.. note::
The ``xpu`` option will install IPEX-LLM with PyTorch 2.1 by default, which is equivalent to
The ``xpu`` option will install IPEX-LLM with PyTorch 2.1 by default, which is equivalent to
.. code-block:: bash
pip install --pre --upgrade ipex-llm[xpu_2.1] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
.. tab:: CN
.. code-block:: bash
conda create -n llm python=3.9
conda activate llm
.. code-block:: bash
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
.. note::
The ``xpu`` option will install IPEX-LLM with PyTorch 2.1 by default, which is equivalent to
.. code-block:: bash
pip install --pre --upgrade ipex-llm[xpu_2.1] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu_2.1] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
.. tab:: PyTorch 2.0
Choose either US or CN website for `extra-index-url`:
.. code-block:: bash
.. tabs::
.. tab:: US
conda create -n llm python=3.9
conda activate llm
.. code-block:: bash
conda create -n llm python=3.9
conda activate llm
pip install --pre --upgrade ipex-llm[xpu_2.0] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
.. tab:: CN
.. code-block:: bash
conda create -n llm python=3.9
conda activate llm
pip install --pre --upgrade ipex-llm[xpu_2.0] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu_2.0] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
```

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ pip install --pre --upgrade ipex-llm[all] # for cpu
```

### For GPU
Choose either US or CN website for `extra-index-url`:
```eval_rst
.. tabs::
Expand Down
29 changes: 24 additions & 5 deletions docs/readthedocs/source/doc/LLM/Quickstart/install_linux_gpu.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ IPEX-LLM currently supports the Ubuntu 20.04 operating system and later, and sup
> <img src="https://llm-assets.readthedocs.io/en/latest/_images/basekit.png" alt="image-20240221102252565" width=100%; />
### Setup Python Environment
Download and install the Miniconda as follows if you don't have conda installed on your machine:
```bash
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
Expand All @@ -94,10 +94,29 @@ conda activate llm

## Install `ipex-llm`

* With the `llm` environment active, use `pip` to install `ipex-llm` for GPU:
```
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://developer.intel.com/ipex-whl-stable-xpu
```
With the `llm` environment active, use `pip` to install `ipex-llm` for GPU.
Choose either US or CN website for `extra-index-url`:

```eval_rst
.. tabs::
.. tab:: US
.. code-block:: cmd
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
.. tab:: CN
.. code-block:: cmd
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
```

```eval_rst
.. note::
If you encounter network issues while installing IPEX, refer to `this guide <https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Overview/install_gpu.html#id3>`_ for troubleshooting advice.
```

## Verify Installation
* You can verify if `ipex-llm` is successfully installed by simply importing a few classes from the library. For example, execute the following import command in the terminal:
Expand Down
2 changes: 1 addition & 1 deletion python/llm/example/CPU/Deepspeed-AutoTP/install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ mkdir -p /opt/intel/oneccl
mv ./_install/env /opt/intel/oneccl
# 2. install torch and ipex
pip install torch==2.1.0
pip install intel_extension_for_pytorch==2.1.0 -f https://developer.intel.com/ipex-whl-stable-cpu
pip install intel_extension_for_pytorch==2.1.0 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/
# install torchccl (oneccl binding for pytorch)
pip install https://intel-extension-for-pytorch.s3.amazonaws.com/torch_ccl/cpu/oneccl_bind_pt-2.1.0%2Bcpu-cp39-cp39-linux_x86_64.whl
# 3. install deepspeed
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ conda create -n llm python=3.9 # recommend to use Python 3.9
conda activate llm

# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/

# According to Gemma's requirement, please make sure you are using a stable version of Transformers, 4.38.1 or newer.
pip install transformers==4.38.1
Expand All @@ -30,7 +30,7 @@ We suggest using conda to manage environment:
conda create -n llm python=3.9 libuv
conda activate llm
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/

# According to Gemma's requirement, please make sure you are using a stable version of Transformers, 4.38.1 or newer.
pip install transformers==4.38.1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ python ./alpaca_qlora_finetuning_cpu.py \
```bash
# need to run the alpaca stand-alone version first
# for using mpirun
pip install oneccl_bind_pt -f https://developer.intel.com/ipex-whl-stable
pip install oneccl_bind_pt --extra-index-url https://developer.intel.com/ipex-whl-stable
```

2. modify conf in `finetune_one_node_two_sockets.sh` and run
Expand Down
2 changes: 1 addition & 1 deletion python/llm/example/GPU/Applications/autogen/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ conda activate autogen

# install xpu-supported and fastchat-adapted ipex-llm
# we recommend using ipex-llm version >= 2.5.0b20240110
pip install --pre --upgrade ipex-llm[xpu,serving] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu,serving] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/

# install recommend transformers version
pip install transformers==4.36.2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ We suggest using conda to manage environment:
conda create -n llm python=3.9
conda activate llm
pip install -U transformers==4.34.0
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
```

## Configures OneAPI environment variables
Expand Down
4 changes: 2 additions & 2 deletions python/llm/example/GPU/Deepspeed-AutoTP/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ To run this example with IPEX-LLM on Intel GPUs, we have some recommended requir
conda create -n llm python=3.9
conda activate llm
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install oneccl_bind_pt==2.1.100 -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
# configures OneAPI environment variables
source /opt/intel/oneapi/setvars.sh
pip install git+https://github.com/microsoft/DeepSpeed.git@4fc181b0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ We suggest using conda to manage environment:
conda create -n llm python=3.9
conda activate llm
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install transformers==4.35.0
pip install autoawq==0.1.8 --no-deps
pip install accelerate==0.25.0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ We suggest using conda to manage environment:
conda create -n llm python=3.9
conda activate llm
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install transformers==4.35.0
```
**Note: For Mixtral model, please use transformers 4.36.0:**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ conda create -n llm python=3.9 # recommend to use Python 3.9
conda activate llm

# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install transformers==4.36.0 # upgrade transformers
```

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ We suggest using conda to manage environment:
conda create -n llm python=3.9
conda activate llm
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install transformers==4.34.0
BUILD_CUDA_EXT=0 pip install git+https://github.com/PanQiWei/AutoGPTQ.git@1de9ab6
pip install optimum==0.14.0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,15 @@ We suggest using conda to manage environment:
conda create -n llm python=3.9
conda activate llm
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
```
#### 1.2 Installation on Windows
We suggest using conda to manage environment:
```bash
conda create -n llm python=3.9 libuv
conda activate llm
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
```

### 2. Configures OneAPI environment variables
Expand Down
Loading

0 comments on commit 1c5eb14

Please sign in to comment.