From c998f5f2ba749f69159abdc6ac7f60eb653f5582 Mon Sep 17 00:00:00 2001 From: Yuwen Hu <54161268+Oscilloscope98@users.noreply.github.com> Date: Wed, 6 Dec 2023 10:19:20 +0800 Subject: [PATCH] [LLM] iGPU long context tests (#9598) * Temp enable PR * Enable tests for 256-64 * Try again 128-64 * Empty cache after each iteration for igpu benchmark scripts * Try tests for 512 * change order for 512 * Skip chatglm3 and llama2 for now * Separate tests for 512-64 * Small fix * Further fixes * Change back to nightly again --- .github/workflows/llm_performance_tests.yml | 75 ++++++++++++++----- python/llm/dev/benchmark/all-in-one/run.py | 1 + ...st-434.yaml => 32-igpu-perf-test-434.yaml} | 1 - ...-perf-test.yaml => 32-igpu-perf-test.yaml} | 1 - .../benchmark/512-igpu-perf-test-434.yaml | 21 ++++++ .../test/benchmark/512-igpu-perf-test.yaml | 32 ++++++++ 6 files changed, 112 insertions(+), 19 deletions(-) rename python/llm/test/benchmark/{igpu-perf-test-434.yaml => 32-igpu-perf-test-434.yaml} (98%) rename python/llm/test/benchmark/{igpu-perf-test.yaml => 32-igpu-perf-test.yaml} (98%) create mode 100644 python/llm/test/benchmark/512-igpu-perf-test-434.yaml create mode 100644 python/llm/test/benchmark/512-igpu-perf-test.yaml diff --git a/.github/workflows/llm_performance_tests.yml b/.github/workflows/llm_performance_tests.yml index 9e372e45..e576f638 100644 --- a/.github/workflows/llm_performance_tests.yml +++ b/.github/workflows/llm_performance_tests.yml @@ -249,6 +249,7 @@ jobs: pip install %whl_name%[xpu] -i %INTERNAL_PYPI_URL% --trusted-host %INTERNAL_PYPI_TRUSTED_HOST% -q if %ERRORLEVEL% neq 0 (exit /b 1) + pip list call conda deactivate @@ -263,15 +264,15 @@ jobs: cur_date=$(date +%Y-%m-%d) echo "LOG_FILE=${cur_date}_output.txt" >> "$GITHUB_ENV" - - name: Prepare igpu perf test + - name: Prepare igpu perf test (32) shell: bash run: | # hide time info sed -i 's/str(end - st)/"xxxxxx"/g' python/llm/dev/benchmark/all-in-one/run.py sed -i 's/{today}/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf-test.yaml + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/32-igpu-perf-test.yaml - - name: Test on igpu + - name: Test on igpu (32) shell: cmd run: | call conda activate igpu-perf @@ -281,20 +282,20 @@ jobs: REM for llava set TRANSFORMERS_OFFLINE=1 - move python\llm\test\benchmark\igpu-perf-test.yaml python\llm\dev\benchmark\all-in-one\config.yaml cd python\llm\dev\benchmark\all-in-one - python run.py >> %LOG_FILE% 2>&1 + move ..\..\..\test\benchmark\32-igpu-perf-test.yaml config.yaml + python run.py >> %CSV_SAVE_PATH%\32\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) call conda deactivate - - name: Prepare igpu perf test for Mistral + - name: Prepare igpu perf test for Mistral (32) shell: bash run: | sed -i 's/test1/test2/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf-test-434.yaml + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/32-igpu-perf-test-434.yaml - - name: Test on igpu for Mistral + - name: Test on igpu for Mistral (32) shell: cmd run: | call conda activate igpu-perf @@ -304,31 +305,71 @@ jobs: set SYCL_ENABLE_DEFAULT_CONTEXTS=1 set SYCL_CACHE_PERSISTENT=1 - move python\llm\test\benchmark\igpu-perf-test-434.yaml python\llm\dev\benchmark\all-in-one\config.yaml cd python\llm\dev\benchmark\all-in-one - python run.py >> %LOG_FILE% 2>&1 + move ..\..\..\test\benchmark\32-igpu-perf-test-434.yaml config.yaml + python run.py >> %CSV_SAVE_PATH%\32\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) call conda deactivate - - name: Concat csv and generate html + - name: Concat csv and generate html (32) shell: cmd run: | call conda activate igpu-perf cd python\llm\dev\benchmark\all-in-one - move %LOG_FILE% %CSV_SAVE_PATH%\log\ python ..\..\..\test\benchmark\concat_csv.py - copy *.csv %CSV_SAVE_PATH% + copy *.csv %CSV_SAVE_PATH%\32\ del /q *.csv cd ..\..\..\test\benchmark - python csv_to_html.py -f %CSV_SAVE_PATH% + python csv_to_html.py -f %CSV_SAVE_PATH%\32\ if %ERRORLEVEL% neq 0 (exit /b 1) call conda deactivate - - name: Remove conda env - if: ${{ always() }} + # TODO: create a action function here for different input + - name: Prepare igpu perf test (512) + shell: bash + run: | + sed -i 's/{today}_test2/{today}/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/512-igpu-perf-test.yaml + + - name: Test on igpu (512) shell: cmd run: | - call conda env remove -n igpu-perf -y + call conda activate igpu-perf + pip install transformers==4.31.0 + + call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" + set SYCL_ENABLE_DEFAULT_CONTEXTS=1 + set SYCL_CACHE_PERSISTENT=1 + REM for llava + set TRANSFORMERS_OFFLINE=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\512-igpu-perf-test.yaml config.yaml + python run.py >> %CSV_SAVE_PATH%\512\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + + - name: Generate html (512) + shell: cmd + run: | + call conda activate igpu-perf + + cd python\llm\dev\benchmark\all-in-one + copy *.csv %CSV_SAVE_PATH%\512\ + del /q *.csv + cd ..\..\..\test\benchmark + python csv_to_html.py -f %CSV_SAVE_PATH%\512\ + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + + # for test on machine when encountering error + # - name: Remove conda env + # if: ${{ always() }} + # shell: cmd + # run: | + # call conda env remove -n igpu-perf -y diff --git a/python/llm/dev/benchmark/all-in-one/run.py b/python/llm/dev/benchmark/all-in-one/run.py index e85f1c9d..a5cf0878 100644 --- a/python/llm/dev/benchmark/all-in-one/run.py +++ b/python/llm/dev/benchmark/all-in-one/run.py @@ -731,6 +731,7 @@ def run_transformer_int4_gpu_win(repo_id, if i >= warm_up: result[in_out].append([model.first_cost, model.rest_cost_mean, model.encoder_time, actual_in_len, actual_out_len, gpu_peak_mem]) + torch.xpu.empty_cache() except RuntimeError: pass model.to('cpu') diff --git a/python/llm/test/benchmark/igpu-perf-test-434.yaml b/python/llm/test/benchmark/32-igpu-perf-test-434.yaml similarity index 98% rename from python/llm/test/benchmark/igpu-perf-test-434.yaml rename to python/llm/test/benchmark/32-igpu-perf-test-434.yaml index 05ce879a..28ff8ba8 100644 --- a/python/llm/test/benchmark/igpu-perf-test-434.yaml +++ b/python/llm/test/benchmark/32-igpu-perf-test-434.yaml @@ -7,7 +7,6 @@ num_beams: 1 # default to greedy search low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) in_out_pairs: - '32-32' - # - '512-64' # - '1024-128' test_api: # - "transformer_int4" diff --git a/python/llm/test/benchmark/igpu-perf-test.yaml b/python/llm/test/benchmark/32-igpu-perf-test.yaml similarity index 98% rename from python/llm/test/benchmark/igpu-perf-test.yaml rename to python/llm/test/benchmark/32-igpu-perf-test.yaml index 92720d26..420d5b56 100644 --- a/python/llm/test/benchmark/igpu-perf-test.yaml +++ b/python/llm/test/benchmark/32-igpu-perf-test.yaml @@ -18,7 +18,6 @@ num_beams: 1 # default to greedy search low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) in_out_pairs: - '32-32' - # - '512-64' # - '1024-128' test_api: # - "transformer_int4" diff --git a/python/llm/test/benchmark/512-igpu-perf-test-434.yaml b/python/llm/test/benchmark/512-igpu-perf-test-434.yaml new file mode 100644 index 00000000..f9c292aa --- /dev/null +++ b/python/llm/test/benchmark/512-igpu-perf-test-434.yaml @@ -0,0 +1,21 @@ +repo_id: + - 'mistralai/Mistral-7B-Instruct-v0.1' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +in_out_pairs: + - '512-64' + # - '1024-128' +test_api: + # - "transformer_int4" + # - "native_int4" + # - "optimize_model" + # - "pytorch_autocast_bf16" + # - "ipex_fp16_gpu" # on Intel GPU + # - "transformer_int4_gpu" # on Intel GPU + # - "optimize_model_gpu" # on Intel GPU + # - "deepspeed_transformer_int4_cpu" # on Intel SPR Server + - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/512-igpu-perf-test.yaml b/python/llm/test/benchmark/512-igpu-perf-test.yaml new file mode 100644 index 00000000..0cede39f --- /dev/null +++ b/python/llm/test/benchmark/512-igpu-perf-test.yaml @@ -0,0 +1,32 @@ +repo_id: + - 'THUDM/chatglm2-6b' + # - 'THUDM/chatglm3-6b' + # - 'baichuan-inc/Baichuan2-7B-Chat' + # - 'internlm/internlm-chat-7b-8k' + # - 'Qwen/Qwen-7B-Chat-10-12' + # - 'BAAI/AquilaChat2-7B' + - '01-ai/Yi-6B' + # - 'meta-llama/Llama-2-7b-chat-hf' + # - 'WisdomShell/CodeShell-7B-Chat' + - 'tiiuae/falcon-7b-instruct-with-patch' + - 'mosaicml/mpt-7b-chat' + # - 'liuhaotian/llava-v1.5-7b' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +in_out_pairs: + - '512-64' + # - '1024-128' +test_api: + # - "transformer_int4" + # - "native_int4" + # - "optimize_model" + # - "pytorch_autocast_bf16" + # - "ipex_fp16_gpu" # on Intel GPU + # - "transformer_int4_gpu" # on Intel GPU + # - "optimize_model_gpu" # on Intel GPU + # - "deepspeed_transformer_int4_cpu" # on Intel SPR Server + - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)