Support PyTorch 2.6 RC perf test on Windows (#12683)
This commit is contained in:
parent
7234c9b27b
commit
c24741584d
1 changed files with 41 additions and 25 deletions
16
.github/workflows/llm_performance_tests.yml
vendored
16
.github/workflows/llm_performance_tests.yml
vendored
|
|
@ -77,6 +77,10 @@ on:
|
||||||
required: false
|
required: false
|
||||||
type: boolean
|
type: boolean
|
||||||
default: true
|
default: true
|
||||||
|
gpu-pytorch-version:
|
||||||
|
description: 'PyTorch version used for GPU perf tests'
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
|
||||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||||
jobs:
|
jobs:
|
||||||
|
|
@ -649,6 +653,9 @@ jobs:
|
||||||
if: ${{ github.event_name == 'workflow_dispatch' && (inputs.checkout-ref != 'main') }}
|
if: ${{ github.event_name == 'workflow_dispatch' && (inputs.checkout-ref != 'main') }}
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
|
if [ ${{ inputs.gpu-pytorch-version }} == "2.6" ]; then
|
||||||
|
sed -i 's/"bigdl-core-xe-all==" + CORE_XE_VERSION/"bigdl-core-xe-all"/g' python/llm/setup.py
|
||||||
|
else
|
||||||
if [ ${{ matrix.platform }} == "perf-mtl" ]; then
|
if [ ${{ matrix.platform }} == "perf-mtl" ]; then
|
||||||
sed -i 's/"bigdl-core-xe-21==" + CORE_XE_VERSION/"bigdl-core-xe-21"/g' python/llm/setup.py
|
sed -i 's/"bigdl-core-xe-21==" + CORE_XE_VERSION/"bigdl-core-xe-21"/g' python/llm/setup.py
|
||||||
sed -i 's/"bigdl-core-xe-batch-21==" + CORE_XE_VERSION/"bigdl-core-xe-batch-21"/g' python/llm/setup.py
|
sed -i 's/"bigdl-core-xe-batch-21==" + CORE_XE_VERSION/"bigdl-core-xe-batch-21"/g' python/llm/setup.py
|
||||||
|
|
@ -659,6 +666,7 @@ jobs:
|
||||||
sed -i 's/"bigdl-core-xe-batch-23==" + CORE_XE_VERSION/"bigdl-core-xe-batch-23"/g' python/llm/setup.py
|
sed -i 's/"bigdl-core-xe-batch-23==" + CORE_XE_VERSION/"bigdl-core-xe-batch-23"/g' python/llm/setup.py
|
||||||
sed -i 's/"bigdl-core-xe-addons-23==" + CORE_XE_VERSION/"bigdl-core-xe-addons-23"/g' python/llm/setup.py
|
sed -i 's/"bigdl-core-xe-addons-23==" + CORE_XE_VERSION/"bigdl-core-xe-addons-23"/g' python/llm/setup.py
|
||||||
fi
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Install ipex-llm and other related packages (install from source)
|
- name: Install ipex-llm and other related packages (install from source)
|
||||||
if: ${{ github.event_name == 'workflow_dispatch' && (inputs.checkout-ref != 'main') }}
|
if: ${{ github.event_name == 'workflow_dispatch' && (inputs.checkout-ref != 'main') }}
|
||||||
|
|
@ -677,6 +685,9 @@ jobs:
|
||||||
if not exist dist\ipex_llm*.whl (exit /b 1)
|
if not exist dist\ipex_llm*.whl (exit /b 1)
|
||||||
for %%i in (dist\ipex_llm*.whl) do set whl_name=%%i
|
for %%i in (dist\ipex_llm*.whl) do set whl_name=%%i
|
||||||
|
|
||||||
|
if "${{ inputs.gpu-pytorch-version }}"=="2.6" (
|
||||||
|
pip install --pre --upgrade %whl_name%[xpu_2.6] --extra-index-url https://download.pytorch.org/whl/test/xpu
|
||||||
|
) else (
|
||||||
if "${{ matrix.platform }}"=="perf-mtl" (
|
if "${{ matrix.platform }}"=="perf-mtl" (
|
||||||
pip install --pre --upgrade %whl_name%[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
|
pip install --pre --upgrade %whl_name%[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
|
||||||
)
|
)
|
||||||
|
|
@ -686,6 +697,7 @@ jobs:
|
||||||
if "${{ matrix.platform }}"=="perf-dgpu" (
|
if "${{ matrix.platform }}"=="perf-dgpu" (
|
||||||
pip install --pre --upgrade %whl_name%[xpu_lnl] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/lnl/cn/
|
pip install --pre --upgrade %whl_name%[xpu_lnl] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/lnl/cn/
|
||||||
)
|
)
|
||||||
|
)
|
||||||
if %ERRORLEVEL% neq 0 (exit /b 1)
|
if %ERRORLEVEL% neq 0 (exit /b 1)
|
||||||
pip list
|
pip list
|
||||||
|
|
||||||
|
|
@ -711,6 +723,9 @@ jobs:
|
||||||
pip install --upgrade omegaconf pandas
|
pip install --upgrade omegaconf pandas
|
||||||
pip install --upgrade tiktoken einops transformers_stream_generator matplotlib
|
pip install --upgrade tiktoken einops transformers_stream_generator matplotlib
|
||||||
|
|
||||||
|
if "${{ inputs.gpu-pytorch-version }}"=="2.6" (
|
||||||
|
pip install --pre --upgrade ipex-llm[xpu_2.6] --extra-index-url https://download.pytorch.org/whl/test/xpu
|
||||||
|
) else (
|
||||||
if "${{ matrix.platform }}"=="perf-mtl" (
|
if "${{ matrix.platform }}"=="perf-mtl" (
|
||||||
pip install --pre --upgrade ipex-llm[xpu]==%TEST_VERSION% --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
|
pip install --pre --upgrade ipex-llm[xpu]==%TEST_VERSION% --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
|
||||||
)
|
)
|
||||||
|
|
@ -720,6 +735,7 @@ jobs:
|
||||||
if "${{ matrix.platform }}"=="perf-dgpu" (
|
if "${{ matrix.platform }}"=="perf-dgpu" (
|
||||||
pip install --pre --upgrade ipex-llm[xpu_lnl]==%TEST_VERSION% --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/lnl/cn/
|
pip install --pre --upgrade ipex-llm[xpu_lnl]==%TEST_VERSION% --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/lnl/cn/
|
||||||
)
|
)
|
||||||
|
)
|
||||||
pip show ipex-llm | findstr %TEST_VERSION%
|
pip show ipex-llm | findstr %TEST_VERSION%
|
||||||
if %ERRORLEVEL% neq 0 (
|
if %ERRORLEVEL% neq 0 (
|
||||||
echo "Did not install ipex-llm with excepted version %TEST_VERSION%"
|
echo "Did not install ipex-llm with excepted version %TEST_VERSION%"
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue