diff --git a/.github/workflows/llm_performance_tests.yml b/.github/workflows/llm_performance_tests.yml index 36b31f23..736b1dd4 100644 --- a/.github/workflows/llm_performance_tests.yml +++ b/.github/workflows/llm_performance_tests.yml @@ -153,7 +153,8 @@ jobs: source /opt/intel/oneapi/setvars.sh export USE_XETLA=OFF export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - cp python/llm/test/benchmark/arc-perf-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml + pip install transformers==4.36.2 + cp python/llm/test/benchmark/arc-perf-transformers-436.yaml python/llm/dev/benchmark/all-in-one/config.yaml cd python/llm/dev/benchmark/all-in-one mkdir test_batch1 mkdir test_batch2 @@ -167,7 +168,7 @@ jobs: mv *.csv test_batch1 # batch_size 2 cd ../../../../../ - cp python/llm/test/benchmark/arc-perf-test-batch2.yaml python/llm/dev/benchmark/all-in-one/config.yaml + cp python/llm/test/benchmark/arc-perf-transformers-436-batch2.yaml python/llm/dev/benchmark/all-in-one/config.yaml cd python/llm/dev/benchmark/all-in-one # change csv name sed -i 's/batch1/batch2/g' run.py @@ -175,7 +176,7 @@ jobs: mv *.csv test_batch2 # batch_size 4 cd ../../../../../ - cp python/llm/test/benchmark/arc-perf-test-batch4.yaml python/llm/dev/benchmark/all-in-one/config.yaml + cp python/llm/test/benchmark/arc-perf-transformers-436-batch4.yaml python/llm/dev/benchmark/all-in-one/config.yaml cd python/llm/dev/benchmark/all-in-one # change csv name sed -i 's/batch2/batch4/g' run.py @@ -188,7 +189,7 @@ jobs: source /opt/intel/oneapi/setvars.sh export USE_XETLA=OFF export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - # upgrade transformers for model Qwen/Qwen1.5-7B-Chat + # upgrade for default transformers version python -m pip install transformers==4.37.0 # batch_size 1 cp python/llm/test/benchmark/arc-perf-transformers-437.yaml python/llm/dev/benchmark/all-in-one/config.yaml @@ -314,7 +315,7 @@ jobs: run: | # batch_size 1 cd python/llm/dev/benchmark/all-in-one/test_batch1 - python ../../../../test/benchmark/check_results.py -c test1 -y ../../../../test/benchmark/arc-perf-test.yaml + python ../../../../test/benchmark/check_results.py -c test1 -y ../../../../test/benchmark/arc-perf-transformers-436.yaml python ../../../../test/benchmark/check_results.py -c test2 -y ../../../../test/benchmark/arc-perf-transformers-437.yaml python ../../../../test/benchmark/check_results.py -c test3 -y ../../../../test/benchmark/arc-perf-transformers-440.yaml find . -name "*test*.csv" -delete @@ -327,7 +328,7 @@ jobs: rm -r test_batch1 # batch_size 2 cd test_batch2 - python ../../../../test/benchmark/check_results.py -c test1 -y ../../../../test/benchmark/arc-perf-test-batch2.yaml + python ../../../../test/benchmark/check_results.py -c test1 -y ../../../../test/benchmark/arc-perf-transformers-436-batch2.yaml python ../../../../test/benchmark/check_results.py -c test2 -y ../../../../test/benchmark/arc-perf-transformers-437-batch2.yaml find . -name "*test*.csv" -delete if [[ ${{ github.event_name }} == "schedule" ]]; then @@ -339,7 +340,7 @@ jobs: rm -r test_batch2 # batch_size 4 cd test_batch4 - python ../../../../test/benchmark/check_results.py -c test1 -y ../../../../test/benchmark/arc-perf-test-batch4.yaml + python ../../../../test/benchmark/check_results.py -c test1 -y ../../../../test/benchmark/arc-perf-transformers-436-batch4.yaml python ../../../../test/benchmark/check_results.py -c test2 -y ../../../../test/benchmark/arc-perf-transformers-437-batch4.yaml find . -name "*test*.csv" -delete if [[ ${{ github.event_name }} == "schedule" ]]; then @@ -384,7 +385,6 @@ jobs: python -m pip install --upgrade einops python -m pip install --upgrade tiktoken python -m pip install --upgrade transformers_stream_generator - # specific for test on certain commits - name: Download llm binary if: ${{ github.event_name == 'workflow_dispatch' && (inputs.checkout-ref != 'main') }} @@ -653,6 +653,7 @@ jobs: set BIGDL_LLM_XMX_DISABLED=1 REM for llava set TRANSFORMERS_OFFLINE=1 + pip install transformers==4.37.0 cd python\llm\dev\benchmark\all-in-one move ..\..\..\test\benchmark\igpu-perf\32-32_int4_fp16.yaml config.yaml @@ -664,23 +665,23 @@ jobs: call conda deactivate - - name: Prepare igpu perf test for transformers 4.37 (32-32 int4+fp16) + - name: Prepare igpu perf test for transformers 4.36 (32-32 int4+fp16) shell: bash run: | sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_436.yaml - - name: Test on igpu for transformers 4.37 (32-32 int4+fp16) + - name: Test on igpu for transformers 4.36 (32-32 int4+fp16) shell: cmd run: | call conda activate igpu-perf - pip install transformers==4.37.0 + pip install transformers==4.36.2 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\32-32_int4_fp16_437.yaml config.yaml + move ..\..\..\test\benchmark\igpu-perf\32-32_int4_fp16_436.yaml config.yaml set PYTHONIOENCODING=utf-8 python run.py >> %CSV_SAVE_PATH%\32-32_int4_fp16\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) @@ -771,7 +772,7 @@ jobs: shell: cmd run: | call conda activate igpu-perf - pip install transformers==4.36.2 + pip install transformers==4.37.0 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 @@ -788,23 +789,23 @@ jobs: call conda deactivate - - name: Prepare igpu perf test for transformers 4.37 (1024-128 int4+fp16) + - name: Prepare igpu perf test for transformers 4.36 (1024-128 int4+fp16) shell: bash run: | sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_436.yaml - - name: Test on igpu for transformers 4.37 (1024-128 int4+fp16) + - name: Test on igpu for transformers 4.36 (1024-128 int4+fp16) shell: cmd run: | call conda activate igpu-perf - pip install transformers==4.37.0 + pip install transformers==4.36.2 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_437.yaml config.yaml + move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_436.yaml config.yaml set PYTHONIOENCODING=utf-8 python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) @@ -812,7 +813,7 @@ jobs: if %ERRORLEVEL% neq 0 (exit /b 1) call conda deactivate - + - name: Prepare igpu perf test for transformers 4.38 (1024-128 int4+fp16) shell: bash run: | @@ -894,7 +895,6 @@ jobs: shell: cmd run: | call conda activate igpu-perf - pip install transformers==4.36.2 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 @@ -911,23 +911,23 @@ jobs: call conda deactivate - - name: Prepare igpu perf test for transformers 4.37 (2048-256 int4+fp16) + - name: Prepare igpu perf test for transformers 4.36 (2048-256 int4+fp16) shell: bash run: | sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_436.yaml - - name: Test on igpu for transformers 4.37 (2048-256 int4+fp16) + - name: Test on igpu for transformers 4.36 (2048-256 int4+fp16) shell: cmd run: | call conda activate igpu-perf - pip install transformers==4.37.0 + pip install transformers==4.36.2 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\2048-256_int4_fp16_437.yaml config.yaml + move ..\..\..\test\benchmark\igpu-perf\2048-256_int4_fp16_436.yaml config.yaml set PYTHONIOENCODING=utf-8 python run.py >> %CSV_SAVE_PATH%\2048-256_int4_fp16\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) @@ -935,7 +935,7 @@ jobs: if %ERRORLEVEL% neq 0 (exit /b 1) call conda deactivate - + - name: Prepare igpu perf test for transformers 4.38 (2048-256 int4+fp16) shell: bash run: | @@ -1017,7 +1017,7 @@ jobs: shell: cmd run: | call conda activate igpu-perf - pip install transformers==4.36.2 + pip install transformers==4.37.0 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 @@ -1034,23 +1034,23 @@ jobs: call conda deactivate - - name: Prepare igpu perf test for transformers 4.37 (3072-384 int4+fp16) + - name: Prepare igpu perf test for transformers 4.36 (3072-384 int4+fp16) shell: bash run: | sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_437.yaml + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_436.yaml - - name: Test on igpu for transformers 4.37 (3072-384 int4+fp16) + - name: Test on igpu for transformers 4.36 (3072-384 int4+fp16) shell: cmd run: | call conda activate igpu-perf - pip install transformers==4.37.0 + pip install transformers==4.36.2 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\3072-384_int4_fp16_437.yaml config.yaml + move ..\..\..\test\benchmark\igpu-perf\3072-384_int4_fp16_436.yaml config.yaml set PYTHONIOENCODING=utf-8 python run.py >> %CSV_SAVE_PATH%\3072-384_int4_fp16\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) @@ -1140,7 +1140,7 @@ jobs: shell: cmd run: | call conda activate igpu-perf - pip install transformers==4.36.2 + pip install transformers==4.37.0 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 @@ -1157,35 +1157,10 @@ jobs: call conda deactivate - - name: Prepare igpu perf test for transformers 4.37 (4096-512 int4+fp16) - shell: bash - run: | - sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_437.yaml - - - name: Test on igpu for transformers 4.37 (4096-512 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.37.0 - - set SYCL_CACHE_PERSISTENT=1 - set BIGDL_LLM_XMX_DISABLED=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\4096-512_int4_fp16_437.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\4096-512_int4_fp16\log\%LOG_FILE% 2>&1 - if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - name: Prepare igpu perf test for transformers 4.38 (4096-512 int4+fp16) shell: bash run: | - sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_438.yaml - name: Test on igpu for transformers 4.38 (4096-512 int4+fp16) @@ -1202,7 +1177,7 @@ jobs: set PYTHONIOENCODING=utf-8 python run.py >> %CSV_SAVE_PATH%\4096-512_int4_fp16\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2 if %ERRORLEVEL% neq 0 (exit /b 1) call conda deactivate @@ -1210,7 +1185,7 @@ jobs: - name: Prepare igpu perf test for transformers 4.43 (4096-512 int4+fp16) shell: bash run: | - sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_443.yaml - name: Test on igpu for transformers 4.43 (4096-512 int4+fp16) @@ -1228,7 +1203,7 @@ jobs: set PYTHONIOENCODING=utf-8 python run.py >> %CSV_SAVE_PATH%\4096-512_int4_fp16\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 if %ERRORLEVEL% neq 0 (exit /b 1) pip uninstall trl -y @@ -1256,14 +1231,14 @@ jobs: shell: bash run: | sed -i 's/4096-512/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test4/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml - name: Test on igpu (load_low_bit 1024-128 int4+fp16) shell: cmd run: | call conda activate igpu-perf - pip install transformers==4.36.2 + pip install transformers==4.37.0 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 @@ -1280,23 +1255,23 @@ jobs: call conda deactivate - - name: Prepare igpu perf test for transformers 4.37 (load_low_bit 1024-128 int4+fp16) + - name: Prepare igpu perf test for transformers 4.36 (load_low_bit 1024-128 int4+fp16) shell: bash run: | sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_436.yaml - - name: Test on igpu for transformers 4.37 (load_low_bit 1024-128 int4+fp16) + - name: Test on igpu for transformers 4.36 (load_low_bit 1024-128 int4+fp16) shell: cmd run: | call conda activate igpu-perf - pip install transformers==4.37.0 + pip install transformers==4.36.2 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_loadlowbit_437.yaml config.yaml + move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_loadlowbit_436.yaml config.yaml set PYTHONIOENCODING=utf-8 python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) @@ -1385,7 +1360,7 @@ jobs: shell: cmd run: | call conda activate igpu-perf - pip install transformers==4.36.2 + pip install transformers==4.37.0 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 @@ -1402,23 +1377,23 @@ jobs: call conda deactivate - - name: Prepare igpu perf test for transformers 4.37 (1024-128) + - name: Prepare igpu perf test for transformers 4.36 (1024-128) shell: bash run: | sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_437.yaml + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_436.yaml - - name: Test on igpu for transformers 4.37 (1024-128) + - name: Test on igpu for transformers 4.36 (1024-128) shell: cmd run: | call conda activate igpu-perf - pip install transformers==4.37.0 + pip install transformers==4.36.2 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_437.yaml config.yaml + move ..\..\..\test\benchmark\igpu-perf\1024-128_436.yaml config.yaml set PYTHONIOENCODING=utf-8 python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) @@ -1520,4 +1495,3 @@ jobs: # shell: cmd # run: | # call conda env remove -n igpu-perf -y - diff --git a/python/llm/test/benchmark/arc-perf-test-batch2.yaml b/python/llm/test/benchmark/arc-perf-test-batch2.yaml deleted file mode 100644 index 70447fd7..00000000 --- a/python/llm/test/benchmark/arc-perf-test-batch2.yaml +++ /dev/null @@ -1,30 +0,0 @@ -repo_id: - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'THUDM/chatglm3-6b-4bit' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat-4bit' - - 'THUDM/glm-4-9b-chat' - - 'openbmb/MiniCPM-2B-sft-bf16' - - 'Qwen/Qwen-VL-Chat' - #- 'SmerkyG/rwkv-5-world-7b' #this model only fp32 is supported for now, fp16 and bf16 are not supported - - '01-ai/Yi-6B-Chat' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - '01-ai/Yi-1.5-6B-Chat' -local_model_hub: '/mnt/disk1/models' -warm_up: 1 -num_trials: 3 -num_beams: 1 # default to greedy search -low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) -batch_size: 2 # default to 1 -in_out_pairs: - - '32-32' - - '1024-128' - - '2048-256' -test_api: - - "transformer_int4_fp16_gpu" # on Intel GPU -cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api) -exclude: - - 'baichuan-inc/Baichuan2-13B-Chat-4bit:2048' -task: 'continuation' # task can be 'continuation', 'QA' and 'summarize' diff --git a/python/llm/test/benchmark/arc-perf-test-batch4.yaml b/python/llm/test/benchmark/arc-perf-test-batch4.yaml deleted file mode 100644 index 3bfd4796..00000000 --- a/python/llm/test/benchmark/arc-perf-test-batch4.yaml +++ /dev/null @@ -1,36 +0,0 @@ -repo_id: - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'THUDM/chatglm3-6b-4bit' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat-4bit' - - 'THUDM/glm-4-9b-chat' - - 'openbmb/MiniCPM-2B-sft-bf16' - - 'Qwen/Qwen-VL-Chat' - #- 'SmerkyG/rwkv-5-world-7b' #this model only fp32 is supported for now, fp16 and bf16 are not supported - - '01-ai/Yi-6B-Chat' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - '01-ai/Yi-1.5-6B-Chat' -local_model_hub: '/mnt/disk1/models' -warm_up: 1 -num_trials: 3 -num_beams: 1 # default to greedy search -low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) -batch_size: 4 # default to 1 -in_out_pairs: - - '32-32' - - '1024-128' - - '2048-256' -test_api: - - "transformer_int4_fp16_gpu" # on Intel GPU -cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api) -exclude: - - 'meta-llama/Llama-2-13b-chat-hf:2048' - - 'baichuan-inc/Baichuan2-7B-Chat:2048' - - 'baichuan-inc/Baichuan2-13B-Chat-4bit:1024' - - 'baichuan-inc/Baichuan2-13B-Chat-4bit:2048' - - 'Qwen/Qwen-VL-Chat:2048' -# - 'fnlp/moss-moon-003-sft-4bit:1024' -# - 'fnlp/moss-moon-003-sft-4bit:2048' -task: 'continuation' # task can be 'continuation', 'QA' and 'summarize' diff --git a/python/llm/test/benchmark/arc-perf-test.yaml b/python/llm/test/benchmark/arc-perf-test.yaml deleted file mode 100644 index 890b8dbf..00000000 --- a/python/llm/test/benchmark/arc-perf-test.yaml +++ /dev/null @@ -1,32 +0,0 @@ -repo_id: - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'THUDM/chatglm3-6b-4bit' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat-4bit' - - 'THUDM/glm-4-9b-chat' - - 'openbmb/MiniCPM-2B-sft-bf16' - - 'Qwen/Qwen-VL-Chat' - #- 'SmerkyG/rwkv-5-world-7b' #this model only fp32 is supported for now, fp16 and bf16 are not supported - - '01-ai/Yi-6B-Chat' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - '01-ai/Yi-1.5-6B-Chat' -local_model_hub: '/mnt/disk1/models' -warm_up: 1 -num_trials: 3 -num_beams: 1 # default to greedy search -low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) -batch_size: 1 # default to 1 -in_out_pairs: - - '32-32' - - '1024-128' - - '2048-256' -test_api: - - "transformer_int4_fp16_gpu" # on Intel GPU -cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api) -exclude: -# - 'fnlp/moss-moon-003-sft-4bit:1024' -# - 'fnlp/moss-moon-003-sft-4bit:2048' - - 'baichuan-inc/Baichuan2-13B-Chat-4bit:2048' -task: 'continuation' # task can be 'continuation', 'QA' and 'summarize' diff --git a/python/llm/test/benchmark/arc-perf-transformers-436-batch2.yaml b/python/llm/test/benchmark/arc-perf-transformers-436-batch2.yaml new file mode 100644 index 00000000..42ef79f3 --- /dev/null +++ b/python/llm/test/benchmark/arc-perf-transformers-436-batch2.yaml @@ -0,0 +1,16 @@ +repo_id: + - 'Qwen/Qwen-VL-Chat' +local_model_hub: '/mnt/disk1/models' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 2 # default to 1 +in_out_pairs: + - '32-32' + - '1024-128' + - '2048-256' +test_api: + - "transformer_int4_fp16_gpu" # on Intel GPU +cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api) +task: 'continuation' # task can be 'continuation', 'QA' and 'summarize' diff --git a/python/llm/test/benchmark/arc-perf-transformers-436-batch4.yaml b/python/llm/test/benchmark/arc-perf-transformers-436-batch4.yaml new file mode 100644 index 00000000..606b9c6c --- /dev/null +++ b/python/llm/test/benchmark/arc-perf-transformers-436-batch4.yaml @@ -0,0 +1,18 @@ +repo_id: + - 'Qwen/Qwen-VL-Chat' +local_model_hub: '/mnt/disk1/models' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 4 # default to 1 +in_out_pairs: + - '32-32' + - '1024-128' + - '2048-256' +test_api: + - "transformer_int4_fp16_gpu" # on Intel GPU +cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api) +exclude: + - 'Qwen/Qwen-VL-Chat:2048' +task: 'continuation' # task can be 'continuation', 'QA' and 'summarize' diff --git a/python/llm/test/benchmark/arc-perf-transformers-436.yaml b/python/llm/test/benchmark/arc-perf-transformers-436.yaml new file mode 100644 index 00000000..efdf1419 --- /dev/null +++ b/python/llm/test/benchmark/arc-perf-transformers-436.yaml @@ -0,0 +1,16 @@ +repo_id: + - 'Qwen/Qwen-VL-Chat' +local_model_hub: '/mnt/disk1/models' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '32-32' + - '1024-128' + - '2048-256' +test_api: + - "transformer_int4_fp16_gpu" # on Intel GPU +cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api) +task: 'continuation' # task can be 'continuation', 'QA' and 'summarize' diff --git a/python/llm/test/benchmark/arc-perf-transformers-437-batch2.yaml b/python/llm/test/benchmark/arc-perf-transformers-437-batch2.yaml index d675d506..9b9ab1f1 100644 --- a/python/llm/test/benchmark/arc-perf-transformers-437-batch2.yaml +++ b/python/llm/test/benchmark/arc-perf-transformers-437-batch2.yaml @@ -6,6 +6,18 @@ repo_id: - 'microsoft/phi-3-vision-128k-instruct' - 'Qwen/Qwen2-7B-Instruct' - 'microsoft/Phi-3-mini-128k-instruct' + - 'meta-llama/Llama-2-7b-chat-hf' + - 'meta-llama/Llama-2-13b-chat-hf' + - 'THUDM/chatglm3-6b-4bit' + - 'baichuan-inc/Baichuan2-7B-Chat' + - 'baichuan-inc/Baichuan2-13B-Chat-4bit' + - 'THUDM/glm-4-9b-chat' + - 'openbmb/MiniCPM-2B-sft-bf16' + #- 'SmerkyG/rwkv-5-world-7b' #this model only fp32 is supported for now, fp16 and bf16 are not supported + - '01-ai/Yi-6B-Chat' + - 'mistralai/Mistral-7B-Instruct-v0.2' + - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + - '01-ai/Yi-1.5-6B-Chat' local_model_hub: '/mnt/disk1/models' warm_up: 1 num_trials: 3 @@ -19,4 +31,6 @@ in_out_pairs: test_api: - "transformer_int4_fp16_gpu" # on Intel GPU cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api) +exclude: + - 'baichuan-inc/Baichuan2-13B-Chat-4bit:2048' task: 'continuation' # task can be 'continuation', 'QA' and 'summarize' diff --git a/python/llm/test/benchmark/arc-perf-transformers-437-batch4.yaml b/python/llm/test/benchmark/arc-perf-transformers-437-batch4.yaml index f3d55c83..368a8c63 100644 --- a/python/llm/test/benchmark/arc-perf-transformers-437-batch4.yaml +++ b/python/llm/test/benchmark/arc-perf-transformers-437-batch4.yaml @@ -6,6 +6,18 @@ repo_id: - 'microsoft/phi-3-vision-128k-instruct' - 'Qwen/Qwen2-7B-Instruct' - 'microsoft/Phi-3-mini-128k-instruct' + - 'meta-llama/Llama-2-7b-chat-hf' + - 'meta-llama/Llama-2-13b-chat-hf' + - 'THUDM/chatglm3-6b-4bit' + - 'baichuan-inc/Baichuan2-7B-Chat' + - 'baichuan-inc/Baichuan2-13B-Chat-4bit' + - 'THUDM/glm-4-9b-chat' + - 'openbmb/MiniCPM-2B-sft-bf16' + #- 'SmerkyG/rwkv-5-world-7b' #this model only fp32 is supported for now, fp16 and bf16 are not supported + - '01-ai/Yi-6B-Chat' + - 'mistralai/Mistral-7B-Instruct-v0.2' + - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + - '01-ai/Yi-1.5-6B-Chat' local_model_hub: '/mnt/disk1/models' warm_up: 1 num_trials: 3 @@ -22,4 +34,8 @@ cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu w exclude: - 'Qwen/Qwen1.5-7B-Chat:2048' - 'meta-llama/Meta-Llama-3-8B-Instruct:2048' -task: 'continuation' # task can be 'continuation', 'QA' and 'summarize' \ No newline at end of file + - 'meta-llama/Llama-2-13b-chat-hf:2048' + - 'baichuan-inc/Baichuan2-7B-Chat:2048' + - 'baichuan-inc/Baichuan2-13B-Chat-4bit:1024' + - 'baichuan-inc/Baichuan2-13B-Chat-4bit:2048' +task: 'continuation' # task can be 'continuation', 'QA' and 'summarize' diff --git a/python/llm/test/benchmark/arc-perf-transformers-437.yaml b/python/llm/test/benchmark/arc-perf-transformers-437.yaml index 1c775344..bca87891 100644 --- a/python/llm/test/benchmark/arc-perf-transformers-437.yaml +++ b/python/llm/test/benchmark/arc-perf-transformers-437.yaml @@ -6,6 +6,18 @@ repo_id: - 'microsoft/phi-3-vision-128k-instruct' - 'Qwen/Qwen2-7B-Instruct' - 'microsoft/Phi-3-mini-128k-instruct' + - 'meta-llama/Llama-2-7b-chat-hf' + - 'meta-llama/Llama-2-13b-chat-hf' + - 'THUDM/chatglm3-6b-4bit' + - 'baichuan-inc/Baichuan2-7B-Chat' + - 'baichuan-inc/Baichuan2-13B-Chat-4bit' + - 'THUDM/glm-4-9b-chat' + - 'openbmb/MiniCPM-2B-sft-bf16' + #- 'SmerkyG/rwkv-5-world-7b' #this model only fp32 is supported for now, fp16 and bf16 are not supported + - '01-ai/Yi-6B-Chat' + - 'mistralai/Mistral-7B-Instruct-v0.2' + - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + - '01-ai/Yi-1.5-6B-Chat' local_model_hub: '/mnt/disk1/models' warm_up: 1 num_trials: 3 @@ -19,4 +31,6 @@ in_out_pairs: test_api: - "transformer_int4_fp16_gpu" # on Intel GPU cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api) +exclude: + - 'baichuan-inc/Baichuan2-13B-Chat-4bit:2048' task: 'continuation' # task can be 'continuation', 'QA' and 'summarize' diff --git a/python/llm/test/benchmark/core-perf-test.yaml b/python/llm/test/benchmark/core-perf-test.yaml index 55f738de..2def68c1 100644 --- a/python/llm/test/benchmark/core-perf-test.yaml +++ b/python/llm/test/benchmark/core-perf-test.yaml @@ -3,7 +3,7 @@ repo_id: - 'THUDM/chatglm3-6b' - 'baichuan-inc/Baichuan2-7B-Chat' - 'internlm/internlm-chat-7b' - - 'Qwen/Qwen-7B-Chat' + # - 'Qwen/Qwen-7B-Chat' # requires transformers < 4.37.0 - 'BAAI/AquilaChat2-7B' - 'meta-llama/Llama-2-7b-chat-hf' - 'WisdomShell/CodeShell-7B' diff --git a/python/llm/test/benchmark/igpu-perf/1024-128.yaml b/python/llm/test/benchmark/igpu-perf/1024-128.yaml index b0bd5f30..759a7566 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128.yaml @@ -10,9 +10,15 @@ repo_id: - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - 'RWKV/v5-Eagle-7B-HF' - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' + - 'Qwen/Qwen1.5-7B-Chat' + - 'Qwen/Qwen2-1.5B-Instruct' + - 'Qwen/Qwen2-7B-Instruct' + - 'microsoft/Phi-3-mini-4k-instruct' + - 'microsoft/Phi-3-mini-128k-instruct' + - 'microsoft/phi-3-vision-128k-instruct' + - 'openbmb/MiniCPM-V-2_6' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_436.yaml similarity index 65% rename from python/llm/test/benchmark/igpu-perf/1024-128_437.yaml rename to python/llm/test/benchmark/igpu-perf/1024-128_436.yaml index c6850389..c967f66a 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_436.yaml @@ -1,11 +1,5 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-1.5B-Instruct' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' - - 'microsoft/phi-3-vision-128k-instruct' - - 'openbmb/MiniCPM-V-2_6' + - 'Qwen/Qwen-VL-Chat' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml index 39d57568..f66172d9 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml @@ -9,9 +9,15 @@ repo_id: - 'mistralai/Mistral-7B-Instruct-v0.2' - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' + - 'Qwen/Qwen1.5-7B-Chat' + - 'Qwen/Qwen2-1.5B-Instruct' + - 'Qwen/Qwen2-7B-Instruct' + - 'microsoft/Phi-3-mini-4k-instruct' + - 'microsoft/Phi-3-mini-128k-instruct' + - 'microsoft/phi-3-vision-128k-instruct' + - 'openbmb/MiniCPM-V-2_6' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_436.yaml similarity index 65% rename from python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml rename to python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_436.yaml index 68cbaf2a..c224b65e 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_436.yaml @@ -1,11 +1,5 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-1.5B-Instruct' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' - - 'microsoft/phi-3-vision-128k-instruct' - - 'openbmb/MiniCPM-V-2_6' + - 'Qwen/Qwen-VL-Chat' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml index 2730e465..76c35d4d 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml @@ -9,9 +9,14 @@ repo_id: - 'mistralai/Mistral-7B-Instruct-v0.2' - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' + - 'Qwen/Qwen1.5-7B-Chat' + - 'Qwen/Qwen2-1.5B-Instruct' + - 'Qwen/Qwen2-7B-Instruct' + - 'microsoft/Phi-3-mini-4k-instruct' + - 'microsoft/Phi-3-mini-128k-instruct' + - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_436.yaml similarity index 68% rename from python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml rename to python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_436.yaml index 3839d0d2..917e6d0f 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_436.yaml @@ -1,10 +1,5 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-1.5B-Instruct' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' - - 'microsoft/phi-3-vision-128k-instruct' + - 'Qwen/Qwen-VL-Chat' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml index c53e6283..bf5fc1e9 100644 --- a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml @@ -9,9 +9,15 @@ repo_id: - 'mistralai/Mistral-7B-Instruct-v0.2' - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' + - 'Qwen/Qwen1.5-7B-Chat' + - 'Qwen/Qwen2-1.5B-Instruct' + - 'Qwen/Qwen2-7B-Instruct' + - 'microsoft/Phi-3-mini-4k-instruct' + - 'microsoft/Phi-3-mini-128k-instruct' + - 'microsoft/phi-3-vision-128k-instruct' + - 'openbmb/MiniCPM-V-2_6' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_436.yaml similarity index 65% rename from python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml rename to python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_436.yaml index 0eddd403..e9566c13 100644 --- a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_436.yaml @@ -1,11 +1,5 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-1.5B-Instruct' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' - - 'microsoft/phi-3-vision-128k-instruct' - - 'openbmb/MiniCPM-V-2_6' + - 'Qwen/Qwen-VL-Chat' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml index 47b9839a..60202594 100644 --- a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml @@ -8,9 +8,15 @@ repo_id: - 'mistralai/Mistral-7B-Instruct-v0.2' - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' + - 'Qwen/Qwen1.5-7B-Chat' + - 'Qwen/Qwen2-1.5B-Instruct' + - 'Qwen/Qwen2-7B-Instruct' + - 'microsoft/Phi-3-mini-4k-instruct' + - 'microsoft/Phi-3-mini-128k-instruct' + - 'microsoft/phi-3-vision-128k-instruct' + - 'openbmb/MiniCPM-V-2_6' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_436.yaml similarity index 52% rename from python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_437.yaml rename to python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_436.yaml index 087da977..6448a358 100644 --- a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_436.yaml @@ -1,11 +1,5 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-1.5B-Instruct' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' - - 'microsoft/phi-3-vision-128k-instruct' - - 'openbmb/MiniCPM-V-2_6' + - 'Qwen/Qwen-VL-Chat' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 @@ -15,5 +9,5 @@ batch_size: 1 # default to 1 in_out_pairs: - '3072-384' test_api: - - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows, use fp16 for non-linear layer cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml index 39115e02..e7017874 100644 --- a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml @@ -9,9 +9,15 @@ repo_id: - 'mistralai/Mistral-7B-Instruct-v0.2' - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' + - 'Qwen/Qwen1.5-7B-Chat' + - 'Qwen/Qwen2-1.5B-Instruct' + - 'Qwen/Qwen2-7B-Instruct' + - 'microsoft/Phi-3-mini-4k-instruct' + - 'microsoft/Phi-3-mini-128k-instruct' + - 'microsoft/phi-3-vision-128k-instruct' + - 'openbmb/MiniCPM-V-2_6' local_model_hub: 'path to your local model hub' warm_up: 3 num_trials: 5 diff --git a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_436.yaml similarity index 65% rename from python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml rename to python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_436.yaml index 1f0d11a2..8faf43ae 100644 --- a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_436.yaml @@ -1,11 +1,5 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-1.5B-Instruct' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' - - 'microsoft/phi-3-vision-128k-instruct' - - 'openbmb/MiniCPM-V-2_6' + - 'Qwen/Qwen-VL-Chat' local_model_hub: 'path to your local model hub' warm_up: 3 num_trials: 5 diff --git a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml index 26e128a5..514037a7 100644 --- a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml @@ -10,6 +10,13 @@ repo_id: - '01-ai/Yi-6B-Chat' - 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' + - 'Qwen/Qwen1.5-7B-Chat' + - 'Qwen/Qwen2-1.5B-Instruct' + - 'Qwen/Qwen2-7B-Instruct' + - 'microsoft/Phi-3-mini-4k-instruct' + - 'microsoft/Phi-3-mini-128k-instruct' + - 'microsoft/phi-3-vision-128k-instruct' + - 'openbmb/MiniCPM-V-2_6' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_437.yaml deleted file mode 100644 index 4472b5da..00000000 --- a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_437.yaml +++ /dev/null @@ -1,19 +0,0 @@ -repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-1.5B-Instruct' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' - - 'microsoft/phi-3-vision-128k-instruct' - - 'openbmb/MiniCPM-V-2_6' -local_model_hub: 'path to your local model hub' -warm_up: 1 -num_trials: 3 -num_beams: 1 # default to greedy search -low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) -batch_size: 1 # default to 1 -in_out_pairs: - - '4096-512' -test_api: - - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) -cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)