[LLM] Enable more long context in-out pairs for iGPU perf tests (#9765)

* Add test for 1024-128 and enable more tests for 512-64

* Fix date in results csv name to the time when the performance is triggered

* Small fix

* Small fix

* further fixes
This commit is contained in:
Yuwen Hu 2023-12-22 18:18:23 +08:00 committed by GitHub
parent 7fd7c37e1b
commit 02436c6cce
4 changed files with 135 additions and 6 deletions

View file

@ -275,7 +275,7 @@ jobs:
call conda deactivate
- name: Set directory envs
- name: Set directory envs & and fix generated csv date name
shell: bash
run: |
if [ ${{ github.event_name }} == 'schedule' ]; then
@ -286,6 +286,8 @@ jobs:
cur_date=$(date +%Y-%m-%d)
echo "LOG_FILE=${cur_date}_output.txt" >> "$GITHUB_ENV"
sed -i "s/date.today()/\"$cur_date\"/g" python/llm/dev/benchmark/all-in-one/run.py
- name: Prepare igpu perf test (32-32)
shell: bash
run: |
@ -489,7 +491,7 @@ jobs:
shell: bash
run: |
sed -i 's/32-512/512-64/g' python/llm/dev/benchmark/all-in-one/run.py
sed -i 's/{today}_test2/{today}/g' python/llm/dev/benchmark/all-in-one/run.py
sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/512-64.yaml
- name: Test on igpu (512-64)
@ -511,12 +513,37 @@ jobs:
call conda deactivate
- name: Generate html (512-64)
- name: Prepare igpu perf test for Mistral (512-64)
shell: bash
run: |
sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/512-64_434.yaml
- name: Test on igpu for Mistral (512-64)
shell: cmd
run: |
call conda activate igpu-perf
pip install transformers==4.34.0
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
set SYCL_CACHE_PERSISTENT=1
set BIGDL_LLM_XMX_DISABLED=1
cd python\llm\dev\benchmark\all-in-one
move ..\..\..\test\benchmark\igpu-perf\512-64_434.yaml config.yaml
python run.py >> %CSV_SAVE_PATH%\512-64\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
call conda deactivate
- name: Concat csv and generate html (512-64)
shell: cmd
run: |
call conda activate html-gen
cd python\llm\dev\benchmark\all-in-one
python ..\..\..\test\benchmark\concat_csv.py
del /q *test*.csv
move *.csv %CSV_SAVE_PATH%\512-64\
cd ..\..\..\test\benchmark
python csv_to_html.py -f %CSV_SAVE_PATH%\512-64\
@ -525,6 +552,72 @@ jobs:
call conda deactivate
# 1024-128
- name: Prepare igpu perf test (1024-128)
shell: bash
run: |
sed -i 's/512-64/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py
sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128.yaml
- name: Test on igpu (1024-128)
shell: cmd
run: |
call conda activate igpu-perf
pip install transformers==4.31.0
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
set SYCL_CACHE_PERSISTENT=1
set BIGDL_LLM_XMX_DISABLED=1
REM for llava
set TRANSFORMERS_OFFLINE=1
cd python\llm\dev\benchmark\all-in-one
move ..\..\..\test\benchmark\igpu-perf\1024-128.yaml config.yaml
python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
call conda deactivate
- name: Prepare igpu perf test for Mistral (1024-128)
shell: bash
run: |
sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_434.yaml
- name: Test on igpu for Mistral (1024-128)
shell: cmd
run: |
call conda activate igpu-perf
pip install transformers==4.34.0
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
set SYCL_CACHE_PERSISTENT=1
set BIGDL_LLM_XMX_DISABLED=1
cd python\llm\dev\benchmark\all-in-one
move ..\..\..\test\benchmark\igpu-perf\1024-128_434.yaml config.yaml
python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1
if %ERRORLEVEL% neq 0 (exit /b 1)
call conda deactivate
- name: Concat csv and generate html (1024-128)
shell: cmd
run: |
call conda activate html-gen
cd python\llm\dev\benchmark\all-in-one
python ..\..\..\test\benchmark\concat_csv.py
del /q *test*.csv
move *.csv %CSV_SAVE_PATH%\1024-128\
cd ..\..\..\test\benchmark
python csv_to_html.py -f %CSV_SAVE_PATH%\1024-128\
if %ERRORLEVEL% neq 0 (exit /b 1)
move %CSV_SAVE_PATH%\1024-128\*.html %CSV_SAVE_PATH%
call conda deactivate
# for test on machine when encountering error
# - name: Remove conda env
# if: ${{ always() }}

View file

@ -0,0 +1,24 @@
repo_id:
- 'THUDM/chatglm2-6b'
- 'THUDM/chatglm3-6b'
- 'baichuan-inc/Baichuan2-7B-Chat'
- 'internlm/internlm-chat-7b-8k'
# - 'Qwen/Qwen-7B-Chat'
- 'BAAI/AquilaChat2-7B'
- '01-ai/Yi-6B'
- 'meta-llama/Llama-2-7b-chat-hf'
# - 'WisdomShell/CodeShell-7B-Chat'
- 'tiiuae/falcon-7b-instruct-with-patch'
- 'mosaicml/mpt-7b-chat'
- 'liuhaotian/llava-v1.5-7b'
- 'RWKV/rwkv-4-world-7b'
local_model_hub: 'path to your local model hub'
warm_up: 1
num_trials: 3
num_beams: 1 # default to greedy search
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
in_out_pairs:
- '1024-128'
test_api:
- "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory)
cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)

View file

@ -0,0 +1,12 @@
repo_id:
- 'mistralai/Mistral-7B-Instruct-v0.1'
local_model_hub: 'path to your local model hub'
warm_up: 1
num_trials: 3
num_beams: 1 # default to greedy search
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
in_out_pairs:
- '1024-128'
test_api:
- "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory)
cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)

View file

@ -1,13 +1,13 @@
repo_id:
- 'THUDM/chatglm2-6b'
- 'THUDM/chatglm3-6b'
# - 'baichuan-inc/Baichuan2-7B-Chat'
- 'baichuan-inc/Baichuan2-7B-Chat'
- 'internlm/internlm-chat-7b-8k'
# - 'Qwen/Qwen-7B-Chat'
- 'Qwen/Qwen-7B-Chat'
- 'BAAI/AquilaChat2-7B'
- '01-ai/Yi-6B'
- 'meta-llama/Llama-2-7b-chat-hf'
# - 'WisdomShell/CodeShell-7B-Chat'
- 'WisdomShell/CodeShell-7B-Chat'
- 'tiiuae/falcon-7b-instruct-with-patch'
- 'mosaicml/mpt-7b-chat'
- 'liuhaotian/llava-v1.5-7b'