[LLM] Change default Linux GPU install option to PyTorch 2.1 (#9858)

* Update default xpu to ipex 2.1

* Update related install ut support correspondingly

* Add arc ut tests for both ipex 2.0 and 2.1

* Small fix

* Diable ipex 2.1 test for now as oneapi 2024.0 has not beed installed on the test machine

* Update document for default PyTorch 2.1

* Small fix

* Small fix

* Small doc fixes

* Small fixes
This commit is contained in:
Yuwen Hu 2024-01-08 17:16:17 +08:00 committed by GitHub
parent ed81baa35e
commit 5ba1dc38d4
4 changed files with 50 additions and 18 deletions

View file

@ -18,7 +18,6 @@ runs:
pip uninstall bigdl-core-xe-esimd-21 -y || true pip uninstall bigdl-core-xe-esimd-21 -y || true
sed -i 's/"bigdl-core-xe==" + VERSION + "/"bigdl-core-xe/g' python/llm/setup.py sed -i 's/"bigdl-core-xe==" + VERSION + "/"bigdl-core-xe/g' python/llm/setup.py
sed -i 's/"bigdl-core-xe-esimd==" + VERSION + "/"bigdl-core-xe-esimd/g' python/llm/setup.py sed -i 's/"bigdl-core-xe-esimd==" + VERSION + "/"bigdl-core-xe-esimd/g' python/llm/setup.py
sed -i 's/"bigdl-core-xe-21==" + VERSION + "/"bigdl-core-xe-21/g' python/llm/setup.py
sed -i 's/"bigdl-core-xe-21==" + VERSION/"bigdl-core-xe-21"/g' python/llm/setup.py sed -i 's/"bigdl-core-xe-21==" + VERSION/"bigdl-core-xe-21"/g' python/llm/setup.py
sed -i 's/"bigdl-core-xe-esimd-21==" + VERSION + "/"bigdl-core-xe-esimd-21/g' python/llm/setup.py sed -i 's/"bigdl-core-xe-esimd-21==" + VERSION + "/"bigdl-core-xe-esimd-21/g' python/llm/setup.py

View file

@ -208,7 +208,19 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
python-version: ["3.9","3.10","3.11"] include:
# - pytorch-version: "2.1"
# python-version: "3.9"
# - pytorch-version: "2.1"
# python-version: "3.10"
# - pytorch-version: "2.1"
# python-version: "3.11"
- pytorch-version: "2.0"
python-version: "3.9"
- pytorch-version: "2.0"
python-version: "3.10"
- pytorch-version: "2.0"
python-version: "3.11"
runs-on: [self-hosted, llm, arc] runs-on: [self-hosted, llm, arc]
env: env:
OMP_NUM_THREADS: 16 OMP_NUM_THREADS: 16
@ -246,15 +258,20 @@ jobs:
- name: Download llm binary - name: Download llm binary
uses: ./.github/actions/llm/download-llm-binary uses: ./.github/actions/llm/download-llm-binary
- name: Run LLM install (all) test - name: Install BigDL-LLM for xpu
uses: ./.github/actions/llm/setup-llm-env uses: ./.github/actions/llm/setup-llm-env
with: with:
extra-dependency: "xpu_2.0" extra-dependency: "xpu_${{ matrix.pytorch-version }}"
- name: Test installed xpu version - name: Test installed xpu version
shell: bash shell: bash
run: | run: |
source /opt/intel/oneapi/setvars.sh # TODO: differentiate oneapi path after oneapi 2024.0 is installed on test machine
if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then
source /opt/intel/oneapi/setvars.sh
elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then
source /opt/intel/oneapi/setvars.sh
fi
bash python/llm/test/run-llm-install-tests.sh bash python/llm/test/run-llm-install-tests.sh
- name: Download LLMs and datasets - name: Download LLMs and datasets
@ -295,7 +312,12 @@ jobs:
- name: Run LLM inference test - name: Run LLM inference test
shell: bash shell: bash
run: | run: |
source /opt/intel/oneapi/setvars.sh # TODO: differentiate oneapi path after oneapi 2024.0 is installed on test machine
if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then
source /opt/intel/oneapi/setvars.sh
elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then
source /opt/intel/oneapi/setvars.sh
fi
python -m pip install datasets librosa soundfile einops python -m pip install datasets librosa soundfile einops
bash python/llm/test/run-llm-inference-tests-gpu.sh bash python/llm/test/run-llm-inference-tests-gpu.sh
@ -304,5 +326,10 @@ jobs:
run: | run: |
python -m pip install transformers==4.34.0 peft==0.5.0 accelerate==0.23.0 python -m pip install transformers==4.34.0 peft==0.5.0 accelerate==0.23.0
python -m pip install bitsandbytes scipy python -m pip install bitsandbytes scipy
source /opt/intel/oneapi/setvars.sh # TODO: differentiate oneapi path after oneapi 2024.0 is installed on test machine
bash python/llm/test/run-llm-example-tests-gpu.sh if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then
source /opt/intel/oneapi/setvars.sh
elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then
source /opt/intel/oneapi/setvars.sh
fi
bash python/llm/test/run-llm-example-tests-gpu.sh

View file

@ -101,7 +101,7 @@ BigDL-LLM for GPU supports on Linux has been verified on:
```eval_rst ```eval_rst
.. important:: .. important::
BigDL-LLM on Linux only supports PyTorch 2.0 and PyTorch 2.1. BigDL-LLM on Linux supports PyTorch 2.0 and PyTorch 2.1.
``` ```
```eval_rst ```eval_rst
@ -171,7 +171,16 @@ We recommend using [miniconda](https://docs.conda.io/en/latest/miniconda.html) t
conda create -n llm python=3.9 conda create -n llm python=3.9
conda activate llm conda activate llm
pip install --pre --upgrade bigdl-llm[xpu_2.1] -f https://developer.intel.com/ipex-whl-stable-xpu pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
.. note::
The ``xpu`` option will install BigDL-LLM with PyTorch 2.1 by default, which is equivalent to
.. code-block:: bash
pip install --pre --upgrade bigdl-llm[xpu_2.1] -f https://developer.intel.com/ipex-whl-stable-xpu
.. tab:: Pytorch 2.0 .. tab:: Pytorch 2.0
@ -180,7 +189,7 @@ We recommend using [miniconda](https://docs.conda.io/en/latest/miniconda.html) t
conda create -n llm python=3.9 conda create -n llm python=3.9
conda activate llm conda activate llm
pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install --pre --upgrade bigdl-llm[xpu_2.0] -f https://developer.intel.com/ipex-whl-stable-xpu
``` ```

View file

@ -288,12 +288,9 @@ def setup_package():
"intel_extension_for_pytorch==2.1.10+xpu", "intel_extension_for_pytorch==2.1.10+xpu",
"bigdl-core-xe-21==" + VERSION, "bigdl-core-xe-21==" + VERSION,
"bigdl-core-xe-esimd-21==" + VERSION + ";platform_system=='Linux'"] "bigdl-core-xe-esimd-21==" + VERSION + ";platform_system=='Linux'"]
# default to ipex 2.0 for linux and 2.1 for windows # default to ipex 2.1 for linux and windows
xpu_requires = copy.deepcopy(xpu_20_requires) xpu_requires = copy.deepcopy(xpu_21_requires)
xpu_requires.extend(["torch==2.1.0a0;platform_system=='Windows'",
"torchvision==0.16.0a0;platform_system=='Windows'",
"intel_extension_for_pytorch==2.1.10+xpu;platform_system=='Windows'",
"bigdl-core-xe-21==" + VERSION + ";platform_system=='Windows'"])
serving_requires = ['py-cpuinfo'] serving_requires = ['py-cpuinfo']
serving_requires += SERVING_DEP serving_requires += SERVING_DEP
@ -319,7 +316,7 @@ def setup_package():
] ]
}, },
extras_require={"all": all_requires, extras_require={"all": all_requires,
"xpu": xpu_requires, # default to ipex 2.0 for linux and 2.1 for windows "xpu": xpu_requires, # default to ipex 2.1 for linux and windows
"xpu-2-0": xpu_20_requires, "xpu-2-0": xpu_20_requires,
"xpu-2-1": xpu_21_requires, "xpu-2-1": xpu_21_requires,
"serving": serving_requires}, "serving": serving_requires},