replace bigdl-llm with ipex-llm (#10545)

This commit is contained in:
Shaojun Liu 2024-03-26 15:12:38 +08:00 committed by GitHub
parent c563b41491
commit bb9be70105
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 101 additions and 2571 deletions

View file

@ -1,5 +1,5 @@
name: "BigDL-LLM convert tests" name: "IPEX-LLM convert tests"
description: "BigDL-LLM convert test, including downloading original models" description: "IPEX-LLM convert test, including downloading original models"
runs: runs:
using: "composite" using: "composite"

View file

@ -1,5 +1,5 @@
name: 'BigDL-LLM example tests' name: 'IPEX-LLM example tests'
description: 'BigDL-LLM example tests' description: 'IPEX-LLM example tests'
runs: runs:
using: "composite" using: "composite"

View file

@ -1,5 +1,5 @@
name: "Setup BigDL-LLM Env" name: "Setup IPEX-LLM Env"
description: "BigDL-LLM installation" description: "IPEX-LLM installation"
inputs: inputs:
extra-dependency: extra-dependency:
description: "Name of extra dependencies filled in brackets" description: "Name of extra dependencies filled in brackets"

View file

@ -276,7 +276,7 @@ jobs:
- name: Download FP16 results - name: Download FP16 results
shell: bash shell: bash
run: | run: |
wget https://raw.githubusercontent.com/intel-analytics/BigDL/main/python/llm/test/benchmark/harness/fp16.csv -O $ACC_FOLDER/../fp16.csv wget https://raw.githubusercontent.com/intel-analytics/ipex-llm/main/python/llm/test/benchmark/harness/fp16.csv -O $ACC_FOLDER/../fp16.csv
ls $ACC_FOLDER/.. ls $ACC_FOLDER/..
- name: Write to CSV - name: Write to CSV

View file

@ -83,7 +83,7 @@ jobs:
- name: Download llm binary - name: Download llm binary
uses: ./.github/actions/llm/download-llm-binary uses: ./.github/actions/llm/download-llm-binary
- name: Install BigDL-LLM - name: Install IPEX-LLM
uses: ./.github/actions/llm/setup-llm-env uses: ./.github/actions/llm/setup-llm-env
- name: Download original models & convert - name: Download original models & convert

View file

@ -259,7 +259,7 @@ jobs:
- name: Download fp16.results - name: Download fp16.results
shell: bash shell: bash
run: | run: |
wget https://raw.githubusercontent.com/intel-analytics/BigDL/main/python/llm/test/benchmark/perplexity/fp16.csv -O $ACC_FOLDER/../fp16.csv wget https://raw.githubusercontent.com/intel-analytics/ipex-llm/main/python/llm/test/benchmark/perplexity/fp16.csv -O $ACC_FOLDER/../fp16.csv
ls $ACC_FOLDER/.. ls $ACC_FOLDER/..
- name: Write to CSV - name: Write to CSV

View file

@ -111,7 +111,7 @@ jobs:
python -m pip install --upgrade librosa python -m pip install --upgrade librosa
python -m pip install --upgrade jiwer python -m pip install --upgrade jiwer
# please uncomment it and comment the "Install BigDL-LLM from Pypi" part for PR tests # please uncomment it and comment the "Install IPEX-LLM from Pypi" part for PR tests
- name: Download llm binary - name: Download llm binary
uses: ./.github/actions/llm/download-llm-binary uses: ./.github/actions/llm/download-llm-binary
@ -120,10 +120,10 @@ jobs:
with: with:
extra-dependency: "xpu_2.1" extra-dependency: "xpu_2.1"
# - name: Install BigDL-LLM from Pypi # - name: Install IPEX-LLM from Pypi
# shell: bash # shell: bash
# run: | # run: |
# pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu # pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
# - name: Test installed xpu version # - name: Test installed xpu version
# shell: bash # shell: bash

View file

@ -67,7 +67,7 @@ jobs:
cd python/llm/dev/benchmark/all-in-one cd python/llm/dev/benchmark/all-in-one
export http_proxy=${HTTP_PROXY} export http_proxy=${HTTP_PROXY}
export https_proxy=${HTTPS_PROXY} export https_proxy=${HTTPS_PROXY}
source bigdl-llm-init -t source ipex-llm-init -t
export OMP_NUM_THREADS=48 export OMP_NUM_THREADS=48
# hide time info # hide time info
sed -i 's/str(end - st)/"xxxxxx"/g' run.py sed -i 's/str(end - st)/"xxxxxx"/g' run.py
@ -125,7 +125,7 @@ jobs:
cd python/llm/dev/benchmark/all-in-one cd python/llm/dev/benchmark/all-in-one
export http_proxy=${HTTP_PROXY} export http_proxy=${HTTP_PROXY}
export https_proxy=${HTTPS_PROXY} export https_proxy=${HTTPS_PROXY}
source bigdl-llm-init -t source ipex-llm-init -t
export OMP_NUM_THREADS=48 export OMP_NUM_THREADS=48
# hide time info # hide time info
sed -i 's/str(end - st)/"xxxxxx"/g' run-stress-test.py sed -i 's/str(end - st)/"xxxxxx"/g' run-stress-test.py

View file

@ -273,7 +273,7 @@ jobs:
- name: Download llm binary - name: Download llm binary
uses: ./.github/actions/llm/download-llm-binary uses: ./.github/actions/llm/download-llm-binary
- name: Install BigDL-LLM for xpu - name: Install IPEX-LLM for xpu
uses: ./.github/actions/llm/setup-llm-env uses: ./.github/actions/llm/setup-llm-env
with: with:
extra-dependency: "xpu_${{ matrix.pytorch-version }}" extra-dependency: "xpu_${{ matrix.pytorch-version }}"
@ -392,10 +392,10 @@ jobs:
pip install llama-index-readers-file llama-index-vector-stores-postgres llama-index-embeddings-huggingface pip install llama-index-readers-file llama-index-vector-stores-postgres llama-index-embeddings-huggingface
# Specific oneapi position on arc ut test machines # Specific oneapi position on arc ut test machines
if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then
pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install --pre --upgrade ipex-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
source /opt/intel/oneapi/setvars.sh source /opt/intel/oneapi/setvars.sh
elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then
pip install --pre --upgrade bigdl-llm[xpu_2.0] -f https://developer.intel.com/ipex-whl-stable-xpu pip install --pre --upgrade ipex-llm[xpu_2.0] -f https://developer.intel.com/ipex-whl-stable-xpu
source /home/arda/intel/oneapi/setvars.sh source /home/arda/intel/oneapi/setvars.sh
fi fi
bash python/llm/test/run-llm-llamaindex-tests-gpu.sh bash python/llm/test/run-llm-llamaindex-tests-gpu.sh

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff