[LLM] iGPU long context tests (#9598)
* Temp enable PR * Enable tests for 256-64 * Try again 128-64 * Empty cache after each iteration for igpu benchmark scripts * Try tests for 512 * change order for 512 * Skip chatglm3 and llama2 for now * Separate tests for 512-64 * Small fix * Further fixes * Change back to nightly again
This commit is contained in:
parent
4e70e33934
commit
c998f5f2ba
6 changed files with 112 additions and 19 deletions
75
.github/workflows/llm_performance_tests.yml
vendored
75
.github/workflows/llm_performance_tests.yml
vendored
|
|
@ -249,6 +249,7 @@ jobs:
|
||||||
|
|
||||||
pip install %whl_name%[xpu] -i %INTERNAL_PYPI_URL% --trusted-host %INTERNAL_PYPI_TRUSTED_HOST% -q
|
pip install %whl_name%[xpu] -i %INTERNAL_PYPI_URL% --trusted-host %INTERNAL_PYPI_TRUSTED_HOST% -q
|
||||||
if %ERRORLEVEL% neq 0 (exit /b 1)
|
if %ERRORLEVEL% neq 0 (exit /b 1)
|
||||||
|
pip list
|
||||||
|
|
||||||
call conda deactivate
|
call conda deactivate
|
||||||
|
|
||||||
|
|
@ -263,15 +264,15 @@ jobs:
|
||||||
cur_date=$(date +%Y-%m-%d)
|
cur_date=$(date +%Y-%m-%d)
|
||||||
echo "LOG_FILE=${cur_date}_output.txt" >> "$GITHUB_ENV"
|
echo "LOG_FILE=${cur_date}_output.txt" >> "$GITHUB_ENV"
|
||||||
|
|
||||||
- name: Prepare igpu perf test
|
- name: Prepare igpu perf test (32)
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
# hide time info
|
# hide time info
|
||||||
sed -i 's/str(end - st)/"xxxxxx"/g' python/llm/dev/benchmark/all-in-one/run.py
|
sed -i 's/str(end - st)/"xxxxxx"/g' python/llm/dev/benchmark/all-in-one/run.py
|
||||||
sed -i 's/{today}/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py
|
sed -i 's/{today}/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py
|
||||||
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf-test.yaml
|
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/32-igpu-perf-test.yaml
|
||||||
|
|
||||||
- name: Test on igpu
|
- name: Test on igpu (32)
|
||||||
shell: cmd
|
shell: cmd
|
||||||
run: |
|
run: |
|
||||||
call conda activate igpu-perf
|
call conda activate igpu-perf
|
||||||
|
|
@ -281,20 +282,20 @@ jobs:
|
||||||
REM for llava
|
REM for llava
|
||||||
set TRANSFORMERS_OFFLINE=1
|
set TRANSFORMERS_OFFLINE=1
|
||||||
|
|
||||||
move python\llm\test\benchmark\igpu-perf-test.yaml python\llm\dev\benchmark\all-in-one\config.yaml
|
|
||||||
cd python\llm\dev\benchmark\all-in-one
|
cd python\llm\dev\benchmark\all-in-one
|
||||||
python run.py >> %LOG_FILE% 2>&1
|
move ..\..\..\test\benchmark\32-igpu-perf-test.yaml config.yaml
|
||||||
|
python run.py >> %CSV_SAVE_PATH%\32\log\%LOG_FILE% 2>&1
|
||||||
if %ERRORLEVEL% neq 0 (exit /b 1)
|
if %ERRORLEVEL% neq 0 (exit /b 1)
|
||||||
|
|
||||||
call conda deactivate
|
call conda deactivate
|
||||||
|
|
||||||
- name: Prepare igpu perf test for Mistral
|
- name: Prepare igpu perf test for Mistral (32)
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sed -i 's/test1/test2/g' python/llm/dev/benchmark/all-in-one/run.py
|
sed -i 's/test1/test2/g' python/llm/dev/benchmark/all-in-one/run.py
|
||||||
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf-test-434.yaml
|
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/32-igpu-perf-test-434.yaml
|
||||||
|
|
||||||
- name: Test on igpu for Mistral
|
- name: Test on igpu for Mistral (32)
|
||||||
shell: cmd
|
shell: cmd
|
||||||
run: |
|
run: |
|
||||||
call conda activate igpu-perf
|
call conda activate igpu-perf
|
||||||
|
|
@ -304,31 +305,71 @@ jobs:
|
||||||
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
|
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
|
||||||
set SYCL_CACHE_PERSISTENT=1
|
set SYCL_CACHE_PERSISTENT=1
|
||||||
|
|
||||||
move python\llm\test\benchmark\igpu-perf-test-434.yaml python\llm\dev\benchmark\all-in-one\config.yaml
|
|
||||||
cd python\llm\dev\benchmark\all-in-one
|
cd python\llm\dev\benchmark\all-in-one
|
||||||
python run.py >> %LOG_FILE% 2>&1
|
move ..\..\..\test\benchmark\32-igpu-perf-test-434.yaml config.yaml
|
||||||
|
python run.py >> %CSV_SAVE_PATH%\32\log\%LOG_FILE% 2>&1
|
||||||
if %ERRORLEVEL% neq 0 (exit /b 1)
|
if %ERRORLEVEL% neq 0 (exit /b 1)
|
||||||
|
|
||||||
call conda deactivate
|
call conda deactivate
|
||||||
|
|
||||||
- name: Concat csv and generate html
|
- name: Concat csv and generate html (32)
|
||||||
shell: cmd
|
shell: cmd
|
||||||
run: |
|
run: |
|
||||||
call conda activate igpu-perf
|
call conda activate igpu-perf
|
||||||
|
|
||||||
cd python\llm\dev\benchmark\all-in-one
|
cd python\llm\dev\benchmark\all-in-one
|
||||||
move %LOG_FILE% %CSV_SAVE_PATH%\log\
|
|
||||||
python ..\..\..\test\benchmark\concat_csv.py
|
python ..\..\..\test\benchmark\concat_csv.py
|
||||||
copy *.csv %CSV_SAVE_PATH%
|
copy *.csv %CSV_SAVE_PATH%\32\
|
||||||
del /q *.csv
|
del /q *.csv
|
||||||
cd ..\..\..\test\benchmark
|
cd ..\..\..\test\benchmark
|
||||||
python csv_to_html.py -f %CSV_SAVE_PATH%
|
python csv_to_html.py -f %CSV_SAVE_PATH%\32\
|
||||||
if %ERRORLEVEL% neq 0 (exit /b 1)
|
if %ERRORLEVEL% neq 0 (exit /b 1)
|
||||||
|
|
||||||
call conda deactivate
|
call conda deactivate
|
||||||
|
|
||||||
- name: Remove conda env
|
# TODO: create a action function here for different input
|
||||||
if: ${{ always() }}
|
- name: Prepare igpu perf test (512)
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sed -i 's/{today}_test2/{today}/g' python/llm/dev/benchmark/all-in-one/run.py
|
||||||
|
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/512-igpu-perf-test.yaml
|
||||||
|
|
||||||
|
- name: Test on igpu (512)
|
||||||
shell: cmd
|
shell: cmd
|
||||||
run: |
|
run: |
|
||||||
call conda env remove -n igpu-perf -y
|
call conda activate igpu-perf
|
||||||
|
pip install transformers==4.31.0
|
||||||
|
|
||||||
|
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
|
||||||
|
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
|
||||||
|
set SYCL_CACHE_PERSISTENT=1
|
||||||
|
REM for llava
|
||||||
|
set TRANSFORMERS_OFFLINE=1
|
||||||
|
|
||||||
|
cd python\llm\dev\benchmark\all-in-one
|
||||||
|
move ..\..\..\test\benchmark\512-igpu-perf-test.yaml config.yaml
|
||||||
|
python run.py >> %CSV_SAVE_PATH%\512\log\%LOG_FILE% 2>&1
|
||||||
|
if %ERRORLEVEL% neq 0 (exit /b 1)
|
||||||
|
|
||||||
|
call conda deactivate
|
||||||
|
|
||||||
|
- name: Generate html (512)
|
||||||
|
shell: cmd
|
||||||
|
run: |
|
||||||
|
call conda activate igpu-perf
|
||||||
|
|
||||||
|
cd python\llm\dev\benchmark\all-in-one
|
||||||
|
copy *.csv %CSV_SAVE_PATH%\512\
|
||||||
|
del /q *.csv
|
||||||
|
cd ..\..\..\test\benchmark
|
||||||
|
python csv_to_html.py -f %CSV_SAVE_PATH%\512\
|
||||||
|
if %ERRORLEVEL% neq 0 (exit /b 1)
|
||||||
|
|
||||||
|
call conda deactivate
|
||||||
|
|
||||||
|
# for test on machine when encountering error
|
||||||
|
# - name: Remove conda env
|
||||||
|
# if: ${{ always() }}
|
||||||
|
# shell: cmd
|
||||||
|
# run: |
|
||||||
|
# call conda env remove -n igpu-perf -y
|
||||||
|
|
|
||||||
|
|
@ -731,6 +731,7 @@ def run_transformer_int4_gpu_win(repo_id,
|
||||||
if i >= warm_up:
|
if i >= warm_up:
|
||||||
result[in_out].append([model.first_cost, model.rest_cost_mean, model.encoder_time,
|
result[in_out].append([model.first_cost, model.rest_cost_mean, model.encoder_time,
|
||||||
actual_in_len, actual_out_len, gpu_peak_mem])
|
actual_in_len, actual_out_len, gpu_peak_mem])
|
||||||
|
torch.xpu.empty_cache()
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
pass
|
pass
|
||||||
model.to('cpu')
|
model.to('cpu')
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,6 @@ num_beams: 1 # default to greedy search
|
||||||
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
|
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
|
||||||
in_out_pairs:
|
in_out_pairs:
|
||||||
- '32-32'
|
- '32-32'
|
||||||
# - '512-64'
|
|
||||||
# - '1024-128'
|
# - '1024-128'
|
||||||
test_api:
|
test_api:
|
||||||
# - "transformer_int4"
|
# - "transformer_int4"
|
||||||
|
|
@ -18,7 +18,6 @@ num_beams: 1 # default to greedy search
|
||||||
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
|
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
|
||||||
in_out_pairs:
|
in_out_pairs:
|
||||||
- '32-32'
|
- '32-32'
|
||||||
# - '512-64'
|
|
||||||
# - '1024-128'
|
# - '1024-128'
|
||||||
test_api:
|
test_api:
|
||||||
# - "transformer_int4"
|
# - "transformer_int4"
|
||||||
21
python/llm/test/benchmark/512-igpu-perf-test-434.yaml
Normal file
21
python/llm/test/benchmark/512-igpu-perf-test-434.yaml
Normal file
|
|
@ -0,0 +1,21 @@
|
||||||
|
repo_id:
|
||||||
|
- 'mistralai/Mistral-7B-Instruct-v0.1'
|
||||||
|
local_model_hub: 'path to your local model hub'
|
||||||
|
warm_up: 1
|
||||||
|
num_trials: 3
|
||||||
|
num_beams: 1 # default to greedy search
|
||||||
|
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
|
||||||
|
in_out_pairs:
|
||||||
|
- '512-64'
|
||||||
|
# - '1024-128'
|
||||||
|
test_api:
|
||||||
|
# - "transformer_int4"
|
||||||
|
# - "native_int4"
|
||||||
|
# - "optimize_model"
|
||||||
|
# - "pytorch_autocast_bf16"
|
||||||
|
# - "ipex_fp16_gpu" # on Intel GPU
|
||||||
|
# - "transformer_int4_gpu" # on Intel GPU
|
||||||
|
# - "optimize_model_gpu" # on Intel GPU
|
||||||
|
# - "deepspeed_transformer_int4_cpu" # on Intel SPR Server
|
||||||
|
- "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory)
|
||||||
|
cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)
|
||||||
32
python/llm/test/benchmark/512-igpu-perf-test.yaml
Normal file
32
python/llm/test/benchmark/512-igpu-perf-test.yaml
Normal file
|
|
@ -0,0 +1,32 @@
|
||||||
|
repo_id:
|
||||||
|
- 'THUDM/chatglm2-6b'
|
||||||
|
# - 'THUDM/chatglm3-6b'
|
||||||
|
# - 'baichuan-inc/Baichuan2-7B-Chat'
|
||||||
|
# - 'internlm/internlm-chat-7b-8k'
|
||||||
|
# - 'Qwen/Qwen-7B-Chat-10-12'
|
||||||
|
# - 'BAAI/AquilaChat2-7B'
|
||||||
|
- '01-ai/Yi-6B'
|
||||||
|
# - 'meta-llama/Llama-2-7b-chat-hf'
|
||||||
|
# - 'WisdomShell/CodeShell-7B-Chat'
|
||||||
|
- 'tiiuae/falcon-7b-instruct-with-patch'
|
||||||
|
- 'mosaicml/mpt-7b-chat'
|
||||||
|
# - 'liuhaotian/llava-v1.5-7b'
|
||||||
|
local_model_hub: 'path to your local model hub'
|
||||||
|
warm_up: 1
|
||||||
|
num_trials: 3
|
||||||
|
num_beams: 1 # default to greedy search
|
||||||
|
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
|
||||||
|
in_out_pairs:
|
||||||
|
- '512-64'
|
||||||
|
# - '1024-128'
|
||||||
|
test_api:
|
||||||
|
# - "transformer_int4"
|
||||||
|
# - "native_int4"
|
||||||
|
# - "optimize_model"
|
||||||
|
# - "pytorch_autocast_bf16"
|
||||||
|
# - "ipex_fp16_gpu" # on Intel GPU
|
||||||
|
# - "transformer_int4_gpu" # on Intel GPU
|
||||||
|
# - "optimize_model_gpu" # on Intel GPU
|
||||||
|
# - "deepspeed_transformer_int4_cpu" # on Intel SPR Server
|
||||||
|
- "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory)
|
||||||
|
cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)
|
||||||
Loading…
Reference in a new issue