diff --git a/.github/workflows/llm_performance_tests.yml b/.github/workflows/llm_performance_tests.yml index 99405ef4..e7f82000 100644 --- a/.github/workflows/llm_performance_tests.yml +++ b/.github/workflows/llm_performance_tests.yml @@ -256,6 +256,17 @@ jobs: call conda deactivate + - name: Create env for html generation + shell: cmd + run: | + call conda create -n html-gen python=3.9 -y + call conda activate html-gen + + pip install pandas==1.5.3 + pip install Jinja2 + + call conda deactivate + - name: Set directory envs shell: bash run: | @@ -267,38 +278,39 @@ jobs: cur_date=$(date +%Y-%m-%d) echo "LOG_FILE=${cur_date}_output.txt" >> "$GITHUB_ENV" - - name: Prepare igpu perf test (32) + - name: Prepare igpu perf test (32-32) shell: bash run: | # hide time info sed -i 's/str(end - st)/"xxxxxx"/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/32-igpu-perf-test.yaml + sed -i 's/{api}-results-{today}.csv/32-32-{api}-results-{today}_test1.csv/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-32.yaml - - name: Test on igpu (32) + - name: Test on igpu (32-32) shell: cmd run: | call conda activate igpu-perf call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" set SYCL_ENABLE_DEFAULT_CONTEXTS=1 set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 REM for llava set TRANSFORMERS_OFFLINE=1 cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\32-igpu-perf-test.yaml config.yaml - python run.py >> %CSV_SAVE_PATH%\32\log\%LOG_FILE% 2>&1 + move ..\..\..\test\benchmark\igpu-perf\32-32.yaml config.yaml + python run.py >> %CSV_SAVE_PATH%\32-32\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) call conda deactivate - - name: Prepare igpu perf test for Mistral (32) + - name: Prepare igpu perf test for Mistral (32-32) shell: bash run: | - sed -i 's/test1/test2/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/32-igpu-perf-test-434.yaml + sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-32_434.yaml - - name: Test on igpu for Mistral (32) + - name: Test on igpu for Mistral (32-32) shell: cmd run: | call conda activate igpu-perf @@ -307,37 +319,40 @@ jobs: call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" set SYCL_ENABLE_DEFAULT_CONTEXTS=1 set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\32-igpu-perf-test-434.yaml config.yaml - python run.py >> %CSV_SAVE_PATH%\32\log\%LOG_FILE% 2>&1 + move ..\..\..\test\benchmark\igpu-perf\32-32_434.yaml config.yaml + python run.py >> %CSV_SAVE_PATH%\32-32\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) call conda deactivate - - name: Concat csv and generate html (32) + - name: Concat csv and generate html (32-32) shell: cmd run: | - call conda activate igpu-perf + call conda activate html-gen cd python\llm\dev\benchmark\all-in-one python ..\..\..\test\benchmark\concat_csv.py - copy *.csv %CSV_SAVE_PATH%\32\ - del /q *.csv - cd ..\..\..\test\benchmark - python csv_to_html.py -f %CSV_SAVE_PATH%\32\ - if %ERRORLEVEL% neq 0 (exit /b 1) + move *.csv %CSV_SAVE_PATH%\32-32\ + REM cd ..\..\..\test\benchmark + REM python csv_to_html.py -f %CSV_SAVE_PATH%\32-32\ + REM if %ERRORLEVEL% neq 0 (exit /b 1) + REM move *.csv %CSV_SAVE_PATH% call conda deactivate # TODO: create a action function here for different input - - name: Prepare igpu perf test (512) + - name: Prepare igpu perf test (32-256) shell: bash run: | - sed -i 's/{today}_test2/{today}/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/512-igpu-perf-test.yaml + # hide time info + sed -i 's/32-32/32-256/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-256.yaml - - name: Test on igpu (512) + - name: Test on igpu (32-256) shell: cmd run: | call conda activate igpu-perf @@ -346,30 +361,166 @@ jobs: call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" set SYCL_ENABLE_DEFAULT_CONTEXTS=1 set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 REM for llava set TRANSFORMERS_OFFLINE=1 cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\512-igpu-perf-test.yaml config.yaml - python run.py >> %CSV_SAVE_PATH%\512\log\%LOG_FILE% 2>&1 + move ..\..\..\test\benchmark\igpu-perf\32-256.yaml config.yaml + python run.py >> %CSV_SAVE_PATH%\32-256\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) call conda deactivate - - name: Generate html (512) + - name: Prepare igpu perf test for Mistral (32-256) + shell: bash + run: | + sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-256_434.yaml + + - name: Test on igpu for Mistral (32-256) shell: cmd run: | call conda activate igpu-perf + pip install transformers==4.34.0 + + call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" + set SYCL_ENABLE_DEFAULT_CONTEXTS=1 + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 cd python\llm\dev\benchmark\all-in-one - copy *.csv %CSV_SAVE_PATH%\512\ - del /q *.csv - cd ..\..\..\test\benchmark - python csv_to_html.py -f %CSV_SAVE_PATH%\512\ + move ..\..\..\test\benchmark\igpu-perf\32-256_434.yaml config.yaml + python run.py >> %CSV_SAVE_PATH%\32-256\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) call conda deactivate + - name: Concat csv and generate html (32-256) + shell: cmd + run: | + call conda activate html-gen + + cd python\llm\dev\benchmark\all-in-one + python ..\..\..\test\benchmark\concat_csv.py + move *.csv %CSV_SAVE_PATH%\32-256\ + REM cd ..\..\..\test\benchmark + REM python csv_to_html.py -f %CSV_SAVE_PATH%\32-256\ + REM if %ERRORLEVEL% neq 0 (exit /b 1) + REM move *.csv %CSV_SAVE_PATH% + + call conda deactivate + + # 32-512 + - name: Prepare igpu perf test (32-512) + shell: bash + run: | + # hide time info + sed -i 's/32-256/32-512/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-512.yaml + + - name: Test on igpu (32-512) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.31.0 + + call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" + set SYCL_ENABLE_DEFAULT_CONTEXTS=1 + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + REM for llava + set TRANSFORMERS_OFFLINE=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\32-512.yaml config.yaml + python run.py >> %CSV_SAVE_PATH%\32-512\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + + - name: Prepare igpu perf test for Mistral (32-512) + shell: bash + run: | + sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-512_434.yaml + + - name: Test on igpu for Mistral (32-512) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.34.0 + + call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" + set SYCL_ENABLE_DEFAULT_CONTEXTS=1 + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\32-512_434.yaml config.yaml + python run.py >> %CSV_SAVE_PATH%\32-512\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + + - name: Concat csv and generate html (32-512) + shell: cmd + run: | + call conda activate html-gen + + cd python\llm\dev\benchmark\all-in-one + python ..\..\..\test\benchmark\concat_csv.py + move *.csv %CSV_SAVE_PATH%\32-512\ + REM cd ..\..\..\test\benchmark + REM python csv_to_html.py -f %CSV_SAVE_PATH%\32-512\ + REM if %ERRORLEVEL% neq 0 (exit /b 1) + REM move *.csv %CSV_SAVE_PATH% + + call conda deactivate + + # 512-64 + - name: Prepare igpu perf test (512-64) + shell: bash + run: | + sed -i 's/32-512/512-64/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test2/{today}/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/512-64.yaml + + - name: Test on igpu (512-64) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.31.0 + + call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" + set SYCL_ENABLE_DEFAULT_CONTEXTS=1 + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + REM for llava + set TRANSFORMERS_OFFLINE=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\512-64.yaml config.yaml + python run.py >> %CSV_SAVE_PATH%\512-64\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + + - name: Generate html (512-64) + shell: cmd + run: | + call conda activate html-gen + + cd python\llm\dev\benchmark\all-in-one + move *.csv %CSV_SAVE_PATH%\512-64\ + REM cd ..\..\..\test\benchmark + REM python csv_to_html.py -f %CSV_SAVE_PATH%\512-64\ + REM if %ERRORLEVEL% neq 0 (exit /b 1) + REM move *.csv %CSV_SAVE_PATH% + + call conda deactivate + # for test on machine when encountering error # - name: Remove conda env # if: ${{ always() }} diff --git a/python/llm/test/benchmark/32-igpu-perf-test.yaml b/python/llm/test/benchmark/igpu-perf/32-256.yaml similarity index 71% rename from python/llm/test/benchmark/32-igpu-perf-test.yaml rename to python/llm/test/benchmark/igpu-perf/32-256.yaml index 81ec620c..faa2a153 100644 --- a/python/llm/test/benchmark/32-igpu-perf-test.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-256.yaml @@ -18,18 +18,7 @@ num_trials: 3 num_beams: 1 # default to greedy search low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) in_out_pairs: - - '32-32' - '32-256' - - '32-512' - # - '1024-128' test_api: - # - "transformer_int4" - # - "native_int4" - # - "optimize_model" - # - "pytorch_autocast_bf16" - # - "ipex_fp16_gpu" # on Intel GPU - # - "transformer_int4_gpu" # on Intel GPU - # - "optimize_model_gpu" # on Intel GPU - # - "deepspeed_transformer_int4_cpu" # on Intel SPR Server - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/32-igpu-perf-test-434.yaml b/python/llm/test/benchmark/igpu-perf/32-256_434.yaml similarity index 58% rename from python/llm/test/benchmark/32-igpu-perf-test-434.yaml rename to python/llm/test/benchmark/igpu-perf/32-256_434.yaml index d485ece0..b37cd9c7 100644 --- a/python/llm/test/benchmark/32-igpu-perf-test-434.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-256_434.yaml @@ -6,18 +6,7 @@ num_trials: 3 num_beams: 1 # default to greedy search low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) in_out_pairs: - - '32-32' - '32-256' - - '32-512' - # - '1024-128' test_api: - # - "transformer_int4" - # - "native_int4" - # - "optimize_model" - # - "pytorch_autocast_bf16" - # - "ipex_fp16_gpu" # on Intel GPU - # - "transformer_int4_gpu" # on Intel GPU - # - "optimize_model_gpu" # on Intel GPU - # - "deepspeed_transformer_int4_cpu" # on Intel SPR Server - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/32-32.yaml b/python/llm/test/benchmark/igpu-perf/32-32.yaml new file mode 100644 index 00000000..2108899e --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/32-32.yaml @@ -0,0 +1,24 @@ +repo_id: + - 'THUDM/chatglm2-6b' + - 'THUDM/chatglm3-6b' + - 'baichuan-inc/Baichuan2-7B-Chat' + - 'internlm/internlm-chat-7b-8k' + - 'Qwen/Qwen-7B-Chat-10-12' + - 'BAAI/AquilaChat2-7B' + - '01-ai/Yi-6B' + - 'meta-llama/Llama-2-7b-chat-hf' + - 'WisdomShell/CodeShell-7B-Chat' + - 'tiiuae/falcon-7b-instruct-with-patch' + - 'mosaicml/mpt-7b-chat' + - 'liuhaotian/llava-v1.5-7b' + - 'RWKV/rwkv-4-world-7b' +local_model_hub: 'path to your local model hub' +warm_up: 3 +num_trials: 5 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +in_out_pairs: + - '32-32' +test_api: + - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/32-32_434.yaml b/python/llm/test/benchmark/igpu-perf/32-32_434.yaml new file mode 100644 index 00000000..8e23f29a --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/32-32_434.yaml @@ -0,0 +1,12 @@ +repo_id: + - 'mistralai/Mistral-7B-Instruct-v0.1' +local_model_hub: 'path to your local model hub' +warm_up: 3 +num_trials: 5 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +in_out_pairs: + - '32-32' +test_api: + - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/32-512.yaml b/python/llm/test/benchmark/igpu-perf/32-512.yaml new file mode 100644 index 00000000..640ec66d --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/32-512.yaml @@ -0,0 +1,24 @@ +repo_id: + - 'THUDM/chatglm2-6b' + - 'THUDM/chatglm3-6b' + - 'baichuan-inc/Baichuan2-7B-Chat' + - 'internlm/internlm-chat-7b-8k' + - 'Qwen/Qwen-7B-Chat-10-12' + - 'BAAI/AquilaChat2-7B' + - '01-ai/Yi-6B' + - 'meta-llama/Llama-2-7b-chat-hf' + - 'WisdomShell/CodeShell-7B-Chat' + - 'tiiuae/falcon-7b-instruct-with-patch' + - 'mosaicml/mpt-7b-chat' + - 'liuhaotian/llava-v1.5-7b' + - 'RWKV/rwkv-4-world-7b' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +in_out_pairs: + - '32-512' +test_api: + - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/32-512_434.yaml b/python/llm/test/benchmark/igpu-perf/32-512_434.yaml new file mode 100644 index 00000000..4a5d316d --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/32-512_434.yaml @@ -0,0 +1,12 @@ +repo_id: + - 'mistralai/Mistral-7B-Instruct-v0.1' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +in_out_pairs: + - '32-512' +test_api: + - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/512-igpu-perf-test.yaml b/python/llm/test/benchmark/igpu-perf/512-64.yaml similarity index 73% rename from python/llm/test/benchmark/512-igpu-perf-test.yaml rename to python/llm/test/benchmark/igpu-perf/512-64.yaml index 91ae96b2..5f9c6a41 100644 --- a/python/llm/test/benchmark/512-igpu-perf-test.yaml +++ b/python/llm/test/benchmark/igpu-perf/512-64.yaml @@ -19,15 +19,6 @@ num_beams: 1 # default to greedy search low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) in_out_pairs: - '512-64' - # - '1024-128' test_api: - # - "transformer_int4" - # - "native_int4" - # - "optimize_model" - # - "pytorch_autocast_bf16" - # - "ipex_fp16_gpu" # on Intel GPU - # - "transformer_int4_gpu" # on Intel GPU - # - "optimize_model_gpu" # on Intel GPU - # - "deepspeed_transformer_int4_cpu" # on Intel SPR Server - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/512-igpu-perf-test-434.yaml b/python/llm/test/benchmark/igpu-perf/512-64_434.yaml similarity index 60% rename from python/llm/test/benchmark/512-igpu-perf-test-434.yaml rename to python/llm/test/benchmark/igpu-perf/512-64_434.yaml index f9c292aa..41afe76b 100644 --- a/python/llm/test/benchmark/512-igpu-perf-test-434.yaml +++ b/python/llm/test/benchmark/igpu-perf/512-64_434.yaml @@ -7,15 +7,6 @@ num_beams: 1 # default to greedy search low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) in_out_pairs: - '512-64' - # - '1024-128' test_api: - # - "transformer_int4" - # - "native_int4" - # - "optimize_model" - # - "pytorch_autocast_bf16" - # - "ipex_fp16_gpu" # on Intel GPU - # - "transformer_int4_gpu" # on Intel GPU - # - "optimize_model_gpu" # on Intel GPU - # - "deepspeed_transformer_int4_cpu" # on Intel SPR Server - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)