diff --git a/.github/workflows/llm_performance_tests.yml b/.github/workflows/llm_performance_tests.yml index cc8f4f53..9185e428 100644 --- a/.github/workflows/llm_performance_tests.yml +++ b/.github/workflows/llm_performance_tests.yml @@ -924,11 +924,203 @@ jobs: call conda deactivate + # 3072-384 int4+fp16 + - name: Prepare igpu perf test (3072-384 int4+fp16) + shell: bash + run: | + sed -i 's/2048-256/3072-384/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml + + - name: Test on igpu (3072-384 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.36.2 + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + REM for llava + set TRANSFORMERS_OFFLINE=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\3072-384_int4_fp16.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\3072-384_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test1 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + + - name: Prepare igpu perf test for transformers 4.37 (3072-384 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_437.yaml + + - name: Test on igpu for transformers 4.37 (3072-384 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.37.0 + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\3072-384_int4_fp16_437.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\3072-384_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + + - name: Prepare igpu perf test for transformers 4.38 (3072-384 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_438.yaml + + - name: Test on igpu for transformers 4.38 (3072-384 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.38.2 + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\3072-384_int4_fp16_438.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\3072-384_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + + - name: Concat csv and generate html (3072-384 int4+fp16) + shell: cmd + run: | + call conda activate html-gen + + cd python\llm\dev\benchmark\all-in-one + python ..\..\..\test\benchmark\concat_csv.py + if %ERRORLEVEL% neq 0 (exit /b 1) + del /q *test*.csv + move *.csv %CSV_SAVE_PATH%\3072-384_int4_fp16\ + cd ..\..\..\test\benchmark + python csv_to_html.py -f %CSV_SAVE_PATH%\3072-384_int4_fp16\ + if %ERRORLEVEL% neq 0 (exit /b 1) + move %CSV_SAVE_PATH%\3072-384_int4_fp16\*.html %CSV_SAVE_PATH% + + call conda deactivate + + # 4096-512 int4+fp16 + - name: Prepare igpu perf test (4096-512 int4+fp16) + shell: bash + run: | + sed -i 's/3072-384/4096-512/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml + + - name: Test on igpu (4096-512 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.36.2 + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + REM for llava + set TRANSFORMERS_OFFLINE=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\4096-512_int4_fp16.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\4096-512_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test1 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + + - name: Prepare igpu perf test for transformers 4.37 (4096-512 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_437.yaml + + - name: Test on igpu for transformers 4.37 (4096-512 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.37.0 + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\4096-512_int4_fp16_437.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\4096-512_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + + - name: Prepare igpu perf test for transformers 4.38 (4096-512 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_438.yaml + + - name: Test on igpu for transformers 4.38 (4096-512 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.38.2 + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\4096-512_int4_fp16_438.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\4096-512_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + + - name: Concat csv and generate html (4096-512 int4+fp16) + shell: cmd + run: | + call conda activate html-gen + + cd python\llm\dev\benchmark\all-in-one + python ..\..\..\test\benchmark\concat_csv.py + if %ERRORLEVEL% neq 0 (exit /b 1) + del /q *test*.csv + move *.csv %CSV_SAVE_PATH%\4096-512_int4_fp16\ + cd ..\..\..\test\benchmark + python csv_to_html.py -f %CSV_SAVE_PATH%\4096-512_int4_fp16\ + if %ERRORLEVEL% neq 0 (exit /b 1) + move %CSV_SAVE_PATH%\4096-512_int4_fp16\*.html %CSV_SAVE_PATH% + + call conda deactivate + # load_low_bit 1024-128 int4+fp16 - name: Prepare igpu perf test (load_low_bit 1024-128 int4+fp16) shell: bash run: | - sed -i 's/2048-256/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/4096-512/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml diff --git a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml new file mode 100644 index 00000000..47b9839a --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml @@ -0,0 +1,24 @@ +repo_id: + - 'THUDM/chatglm3-6b' + - 'THUDM/glm-4-9b-chat' + - 'baichuan-inc/Baichuan2-7B-Chat' + - 'meta-llama/Llama-2-7b-chat-hf' + - 'meta-llama/Llama-2-13b-chat-hf' + - 'meta-llama/Meta-Llama-3-8B-Instruct' + - 'mistralai/Mistral-7B-Instruct-v0.2' + - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + - '01-ai/Yi-6B-Chat' + - 'Qwen/Qwen-VL-Chat' + - 'openbmb/MiniCPM-1B-sft-bf16' + - 'openbmb/MiniCPM-2B-sft-bf16' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '3072-384' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows, use fp16 for non-linear layer +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_437.yaml new file mode 100644 index 00000000..cfd7cc31 --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_437.yaml @@ -0,0 +1,17 @@ +repo_id: + - 'Qwen/Qwen1.5-7B-Chat' + - 'Qwen/Qwen2-7B-Instruct' + - 'microsoft/Phi-3-mini-4k-instruct' + - 'microsoft/Phi-3-mini-128k-instruct' + - 'microsoft/phi-3-vision-128k-instruct' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '3072-384' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_438.yaml b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_438.yaml new file mode 100644 index 00000000..72dc1267 --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_438.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'stabilityai/stablelm-zephyr-3b' + #- 'google/gemma-7b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '3072-384' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml new file mode 100644 index 00000000..26e128a5 --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml @@ -0,0 +1,23 @@ +repo_id: + - 'THUDM/chatglm3-6b' + - 'THUDM/glm-4-9b-chat' + - 'baichuan-inc/Baichuan2-7B-Chat' + - 'meta-llama/Llama-2-7b-chat-hf' + - 'meta-llama/Llama-2-13b-chat-hf' + - 'meta-llama/Meta-Llama-3-8B-Instruct' + - 'mistralai/Mistral-7B-Instruct-v0.2' + - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + - '01-ai/Yi-6B-Chat' + - 'openbmb/MiniCPM-1B-sft-bf16' + - 'openbmb/MiniCPM-2B-sft-bf16' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '4096-512' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows, use fp16 for non-linear layer +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_437.yaml new file mode 100644 index 00000000..7c2632d3 --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_437.yaml @@ -0,0 +1,17 @@ +repo_id: + - 'Qwen/Qwen1.5-7B-Chat' + - 'Qwen/Qwen2-7B-Instruct' + - 'microsoft/Phi-3-mini-4k-instruct' + - 'microsoft/Phi-3-mini-128k-instruct' + - 'microsoft/phi-3-vision-128k-instruct' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '4096-512' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_438.yaml b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_438.yaml new file mode 100644 index 00000000..c20de4c8 --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_438.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'stabilityai/stablelm-zephyr-3b' + #- 'google/gemma-7b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '4096-512' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)