Add Yi-6B and StableLM to iGPU perf test (#11546)
* Add transformer4.38.2 test to igpu benchmark (#11529) * add transformer4.38.1 test to igpu benchmark * use transformers4.38.2 & fix csv name error in 4.38 workflow * add model Yi-6B-Chat & remove temporarily most models --------- Co-authored-by: ATMxsp01 <shou.xu@intel.com> * filter some errorlevel (#11541) Co-authored-by: ATMxsp01 <shou.xu@intel.com> * Restore the temporarily removed models in iGPU-perf (#11544) * filter some errorlevel * restore the temporarily removed models in iGPU-perf --------- Co-authored-by: ATMxsp01 <shou.xu@intel.com> --------- Co-authored-by: Xu, Shuo <100334393+ATMxsp01@users.noreply.github.com> Co-authored-by: ATMxsp01 <shou.xu@intel.com>
This commit is contained in:
parent
7dc6756d86
commit
8982ab73d5
11 changed files with 194 additions and 4 deletions
123
.github/workflows/llm_performance_tests.yml
vendored
123
.github/workflows/llm_performance_tests.yml
vendored
|
|
@ -680,6 +680,29 @@ jobs:
|
||||||
|
|
||||||
call conda deactivate
|
call conda deactivate
|
||||||
|
|
||||||
|
- name: Prepare igpu perf test for transformers 4.38 (32-32 int4+fp16)
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py
|
||||||
|
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_438.yaml
|
||||||
|
|
||||||
|
- name: Test on igpu for transformers 4.38 (32-32 int4+fp16)
|
||||||
|
shell: cmd
|
||||||
|
run: |
|
||||||
|
call conda activate igpu-perf
|
||||||
|
pip install transformers==4.38.2
|
||||||
|
|
||||||
|
set SYCL_CACHE_PERSISTENT=1
|
||||||
|
set BIGDL_LLM_XMX_DISABLED=1
|
||||||
|
|
||||||
|
cd python\llm\dev\benchmark\all-in-one
|
||||||
|
move ..\..\..\test\benchmark\igpu-perf\32-32_int4_fp16_438.yaml config.yaml
|
||||||
|
set PYTHONIOENCODING=utf-8
|
||||||
|
python run.py >> %CSV_SAVE_PATH%\32-32_int4_fp16\log\%LOG_FILE% 2>&1
|
||||||
|
if %ERRORLEVEL% neq 0 if %ERRORLEVEL% neq -1073740791 (exit /b 1)
|
||||||
|
|
||||||
|
call conda deactivate
|
||||||
|
|
||||||
- name: Concat csv and generate html (32-32 int4+fp16)
|
- name: Concat csv and generate html (32-32 int4+fp16)
|
||||||
shell: cmd
|
shell: cmd
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -703,7 +726,7 @@ jobs:
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sed -i 's/32-32/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py
|
sed -i 's/32-32/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py
|
||||||
sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py
|
sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py
|
||||||
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml
|
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml
|
||||||
|
|
||||||
- name: Test on igpu (1024-128 int4+fp16)
|
- name: Test on igpu (1024-128 int4+fp16)
|
||||||
|
|
@ -748,6 +771,29 @@ jobs:
|
||||||
|
|
||||||
call conda deactivate
|
call conda deactivate
|
||||||
|
|
||||||
|
- name: Prepare igpu perf test for transformers 4.38 (1024-128 int4+fp16)
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py
|
||||||
|
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_438.yaml
|
||||||
|
|
||||||
|
- name: Test on igpu for transformers 4.38 (1024-128 int4+fp16)
|
||||||
|
shell: cmd
|
||||||
|
run: |
|
||||||
|
call conda activate igpu-perf
|
||||||
|
pip install transformers==4.38.2
|
||||||
|
|
||||||
|
set SYCL_CACHE_PERSISTENT=1
|
||||||
|
set BIGDL_LLM_XMX_DISABLED=1
|
||||||
|
|
||||||
|
cd python\llm\dev\benchmark\all-in-one
|
||||||
|
move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_438.yaml config.yaml
|
||||||
|
set PYTHONIOENCODING=utf-8
|
||||||
|
python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1
|
||||||
|
if %ERRORLEVEL% neq 0 (exit /b 1)
|
||||||
|
|
||||||
|
call conda deactivate
|
||||||
|
|
||||||
- name: Concat csv and generate html (1024-128 int4+fp16)
|
- name: Concat csv and generate html (1024-128 int4+fp16)
|
||||||
shell: cmd
|
shell: cmd
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -770,7 +816,7 @@ jobs:
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sed -i 's/1024-128/2048-256/g' python/llm/dev/benchmark/all-in-one/run.py
|
sed -i 's/1024-128/2048-256/g' python/llm/dev/benchmark/all-in-one/run.py
|
||||||
sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py
|
sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py
|
||||||
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml
|
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml
|
||||||
|
|
||||||
- name: Test on igpu (2048-256 int4+fp16)
|
- name: Test on igpu (2048-256 int4+fp16)
|
||||||
|
|
@ -815,6 +861,29 @@ jobs:
|
||||||
|
|
||||||
call conda deactivate
|
call conda deactivate
|
||||||
|
|
||||||
|
- name: Prepare igpu perf test for transformers 4.38 (2048-256 int4+fp16)
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py
|
||||||
|
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_438.yaml
|
||||||
|
|
||||||
|
- name: Test on igpu for transformers 4.38 (2048-256 int4+fp16)
|
||||||
|
shell: cmd
|
||||||
|
run: |
|
||||||
|
call conda activate igpu-perf
|
||||||
|
pip install transformers==4.38.2
|
||||||
|
|
||||||
|
set SYCL_CACHE_PERSISTENT=1
|
||||||
|
set BIGDL_LLM_XMX_DISABLED=1
|
||||||
|
|
||||||
|
cd python\llm\dev\benchmark\all-in-one
|
||||||
|
move ..\..\..\test\benchmark\igpu-perf\2048-256_int4_fp16_438.yaml config.yaml
|
||||||
|
set PYTHONIOENCODING=utf-8
|
||||||
|
python run.py >> %CSV_SAVE_PATH%\2048-256_int4_fp16\log\%LOG_FILE% 2>&1
|
||||||
|
if %ERRORLEVEL% neq 0 (exit /b 1)
|
||||||
|
|
||||||
|
call conda deactivate
|
||||||
|
|
||||||
- name: Concat csv and generate html (2048-256 int4+fp16)
|
- name: Concat csv and generate html (2048-256 int4+fp16)
|
||||||
shell: cmd
|
shell: cmd
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -837,7 +906,7 @@ jobs:
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sed -i 's/2048-256/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py
|
sed -i 's/2048-256/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py
|
||||||
sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py
|
sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py
|
||||||
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml
|
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml
|
||||||
|
|
||||||
- name: Test on igpu (load_low_bit 1024-128 int4+fp16)
|
- name: Test on igpu (load_low_bit 1024-128 int4+fp16)
|
||||||
|
|
@ -882,6 +951,29 @@ jobs:
|
||||||
|
|
||||||
call conda deactivate
|
call conda deactivate
|
||||||
|
|
||||||
|
- name: Prepare igpu perf test for transformers 4.38 (load_low_bit 1024-128 int4+fp16)
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py
|
||||||
|
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_438.yaml
|
||||||
|
|
||||||
|
- name: Test on igpu for transformers 4.38 (load_low_bit 1024-128 int4+fp16)
|
||||||
|
shell: cmd
|
||||||
|
run: |
|
||||||
|
call conda activate igpu-perf
|
||||||
|
pip install transformers==4.38.2
|
||||||
|
|
||||||
|
set SYCL_CACHE_PERSISTENT=1
|
||||||
|
set BIGDL_LLM_XMX_DISABLED=1
|
||||||
|
|
||||||
|
cd python\llm\dev\benchmark\all-in-one
|
||||||
|
move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_loadlowbit_438.yaml config.yaml
|
||||||
|
set PYTHONIOENCODING=utf-8
|
||||||
|
python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\log\%LOG_FILE% 2>&1
|
||||||
|
if %ERRORLEVEL% neq 0 (exit /b 1)
|
||||||
|
|
||||||
|
call conda deactivate
|
||||||
|
|
||||||
- name: Concat csv and generate html (load_low_bit 1024-128 int4+fp16)
|
- name: Concat csv and generate html (load_low_bit 1024-128 int4+fp16)
|
||||||
shell: cmd
|
shell: cmd
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -903,7 +995,7 @@ jobs:
|
||||||
- name: Prepare igpu perf test (1024-128)
|
- name: Prepare igpu perf test (1024-128)
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py
|
sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py
|
||||||
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128.yaml
|
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128.yaml
|
||||||
|
|
||||||
- name: Test on igpu (1024-128)
|
- name: Test on igpu (1024-128)
|
||||||
|
|
@ -948,6 +1040,29 @@ jobs:
|
||||||
|
|
||||||
call conda deactivate
|
call conda deactivate
|
||||||
|
|
||||||
|
- name: Prepare igpu perf test for transformers 4.38 (1024-128)
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py
|
||||||
|
sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_438.yaml
|
||||||
|
|
||||||
|
- name: Test on igpu for transformers 4.38 (1024-128)
|
||||||
|
shell: cmd
|
||||||
|
run: |
|
||||||
|
call conda activate igpu-perf
|
||||||
|
pip install transformers==4.38.2
|
||||||
|
|
||||||
|
set SYCL_CACHE_PERSISTENT=1
|
||||||
|
set BIGDL_LLM_XMX_DISABLED=1
|
||||||
|
|
||||||
|
cd python\llm\dev\benchmark\all-in-one
|
||||||
|
move ..\..\..\test\benchmark\igpu-perf\1024-128_438.yaml config.yaml
|
||||||
|
set PYTHONIOENCODING=utf-8
|
||||||
|
python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1
|
||||||
|
if %ERRORLEVEL% neq 0 (exit /b 1)
|
||||||
|
|
||||||
|
call conda deactivate
|
||||||
|
|
||||||
- name: Concat csv and generate html (1024-128)
|
- name: Concat csv and generate html (1024-128)
|
||||||
shell: cmd
|
shell: cmd
|
||||||
run: |
|
run: |
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,7 @@ repo_id:
|
||||||
- 'openbmb/MiniCPM-2B-sft-bf16'
|
- 'openbmb/MiniCPM-2B-sft-bf16'
|
||||||
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
|
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
|
||||||
- 'RWKV/v5-Eagle-7B-HF'
|
- 'RWKV/v5-Eagle-7B-HF'
|
||||||
|
- '01-ai/Yi-6B-Chat'
|
||||||
local_model_hub: 'path to your local model hub'
|
local_model_hub: 'path to your local model hub'
|
||||||
warm_up: 1
|
warm_up: 1
|
||||||
num_trials: 3
|
num_trials: 3
|
||||||
|
|
|
||||||
14
python/llm/test/benchmark/igpu-perf/1024-128_438.yaml
Normal file
14
python/llm/test/benchmark/igpu-perf/1024-128_438.yaml
Normal file
|
|
@ -0,0 +1,14 @@
|
||||||
|
repo_id:
|
||||||
|
- 'stabilityai/stablelm-zephyr-3b'
|
||||||
|
#- 'google/gemma-7b-it'
|
||||||
|
local_model_hub: 'path to your local model hub'
|
||||||
|
warm_up: 1
|
||||||
|
num_trials: 3
|
||||||
|
num_beams: 1 # default to greedy search
|
||||||
|
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
|
||||||
|
batch_size: 1 # default to 1
|
||||||
|
in_out_pairs:
|
||||||
|
- '1024-128'
|
||||||
|
test_api:
|
||||||
|
- "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory)
|
||||||
|
cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)
|
||||||
|
|
@ -10,6 +10,7 @@ repo_id:
|
||||||
- 'openbmb/MiniCPM-1B-sft-bf16'
|
- 'openbmb/MiniCPM-1B-sft-bf16'
|
||||||
- 'openbmb/MiniCPM-2B-sft-bf16'
|
- 'openbmb/MiniCPM-2B-sft-bf16'
|
||||||
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
|
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
|
||||||
|
- '01-ai/Yi-6B-Chat'
|
||||||
local_model_hub: 'path to your local model hub'
|
local_model_hub: 'path to your local model hub'
|
||||||
warm_up: 1
|
warm_up: 1
|
||||||
num_trials: 3
|
num_trials: 3
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,14 @@
|
||||||
|
repo_id:
|
||||||
|
- 'stabilityai/stablelm-zephyr-3b'
|
||||||
|
#- 'google/gemma-7b-it'
|
||||||
|
local_model_hub: 'path to your local model hub'
|
||||||
|
warm_up: 1
|
||||||
|
num_trials: 3
|
||||||
|
num_beams: 1 # default to greedy search
|
||||||
|
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
|
||||||
|
batch_size: 1 # default to 1
|
||||||
|
in_out_pairs:
|
||||||
|
- '1024-128'
|
||||||
|
test_api:
|
||||||
|
- "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows, use fp16 for non-linear layer
|
||||||
|
cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)
|
||||||
|
|
@ -10,6 +10,7 @@ repo_id:
|
||||||
- 'openbmb/MiniCPM-1B-sft-bf16'
|
- 'openbmb/MiniCPM-1B-sft-bf16'
|
||||||
- 'openbmb/MiniCPM-2B-sft-bf16'
|
- 'openbmb/MiniCPM-2B-sft-bf16'
|
||||||
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
|
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
|
||||||
|
- '01-ai/Yi-6B-Chat'
|
||||||
local_model_hub: 'path to your local model hub'
|
local_model_hub: 'path to your local model hub'
|
||||||
warm_up: 1
|
warm_up: 1
|
||||||
num_trials: 3
|
num_trials: 3
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,14 @@
|
||||||
|
repo_id:
|
||||||
|
- 'stabilityai/stablelm-zephyr-3b'
|
||||||
|
#- 'google/gemma-7b-it'
|
||||||
|
local_model_hub: 'path to your local model hub'
|
||||||
|
warm_up: 1
|
||||||
|
num_trials: 3
|
||||||
|
num_beams: 1 # default to greedy search
|
||||||
|
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
|
||||||
|
batch_size: 1 # default to 1
|
||||||
|
in_out_pairs:
|
||||||
|
- '1024-128'
|
||||||
|
test_api:
|
||||||
|
- "transformer_int4_fp16_loadlowbit_gpu_win" # on Intel GPU for Windows (catch GPU peak memory)
|
||||||
|
cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)
|
||||||
|
|
@ -10,6 +10,7 @@ repo_id:
|
||||||
- 'openbmb/MiniCPM-1B-sft-bf16'
|
- 'openbmb/MiniCPM-1B-sft-bf16'
|
||||||
- 'openbmb/MiniCPM-2B-sft-bf16'
|
- 'openbmb/MiniCPM-2B-sft-bf16'
|
||||||
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
|
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
|
||||||
|
- '01-ai/Yi-6B-Chat'
|
||||||
local_model_hub: 'path to your local model hub'
|
local_model_hub: 'path to your local model hub'
|
||||||
warm_up: 1
|
warm_up: 1
|
||||||
num_trials: 3
|
num_trials: 3
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,14 @@
|
||||||
|
repo_id:
|
||||||
|
- 'stabilityai/stablelm-zephyr-3b'
|
||||||
|
#- 'google/gemma-7b-it'
|
||||||
|
local_model_hub: 'path to your local model hub'
|
||||||
|
warm_up: 1
|
||||||
|
num_trials: 3
|
||||||
|
num_beams: 1 # default to greedy search
|
||||||
|
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
|
||||||
|
batch_size: 1 # default to 1
|
||||||
|
in_out_pairs:
|
||||||
|
- '2048-256'
|
||||||
|
test_api:
|
||||||
|
- "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory)
|
||||||
|
cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)
|
||||||
|
|
@ -10,6 +10,7 @@ repo_id:
|
||||||
- 'openbmb/MiniCPM-1B-sft-bf16'
|
- 'openbmb/MiniCPM-1B-sft-bf16'
|
||||||
- 'openbmb/MiniCPM-2B-sft-bf16'
|
- 'openbmb/MiniCPM-2B-sft-bf16'
|
||||||
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
|
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
|
||||||
|
- '01-ai/Yi-6B-Chat'
|
||||||
local_model_hub: 'path to your local model hub'
|
local_model_hub: 'path to your local model hub'
|
||||||
warm_up: 3
|
warm_up: 3
|
||||||
num_trials: 5
|
num_trials: 5
|
||||||
|
|
|
||||||
14
python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_438.yaml
Normal file
14
python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_438.yaml
Normal file
|
|
@ -0,0 +1,14 @@
|
||||||
|
repo_id:
|
||||||
|
- 'stabilityai/stablelm-zephyr-3b'
|
||||||
|
#- 'google/gemma-7b-it'
|
||||||
|
local_model_hub: 'path to your local model hub'
|
||||||
|
warm_up: 3
|
||||||
|
num_trials: 5
|
||||||
|
num_beams: 1 # default to greedy search
|
||||||
|
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
|
||||||
|
batch_size: 1 # default to 1
|
||||||
|
in_out_pairs:
|
||||||
|
- '32-32'
|
||||||
|
test_api:
|
||||||
|
- "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory)
|
||||||
|
cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)
|
||||||
Loading…
Reference in a new issue