LLM: add remaining models to the arc perf test (#9384)

* add remaining models

* modify the filepath which stores the test result on ftp server

* resolve some comments
This commit is contained in:
WeiguangHan 2023-11-09 14:28:42 +08:00 committed by GitHub
parent d4b248fcd4
commit 34449cb4bb
2 changed files with 4 additions and 5 deletions

View file

@ -108,7 +108,6 @@ jobs:
python -m pip install --upgrade einops
python -m pip install --upgrade transformers_stream_generator
python -m pip install --upgrade tiktoken
python -m pip install transformers==4.34.0
- name: Download llm binary
uses: ./.github/actions/llm/download-llm-binary
@ -132,12 +131,12 @@ jobs:
export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
mv python/llm/test/benchmark/arc-perf-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml
cd python/llm/dev/benchmark/all-in-one
export http_proxy=${HTTP_PROXY}
export https_proxy=${HTTPS_PROXY}
python run.py
cp ./*.csv /mnt/disk1/nightly_perf_gpu/
cd ../../../test/benchmark
python csv_to_html.py -f /mnt/disk1/nightly_perf_gpu/
cd ../../dev/benchmark/all-in-one/
curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/gpu/
llm-performance-test-on-spr:

View file

@ -4,7 +4,6 @@ repo_id:
- 'THUDM/chatglm2-6b'
- 'tiiuae/falcon-7b-instruct-with-patch'
- 'mosaicml/mpt-7b-chat'
# - 'bigscience/bloomz-7b1' # temporarily removed
- 'redpajama/gptneox-7b-redpajama-bf16'
- 'bigcode/starcoder-15.5b'
- 'databricks/dolly-v1-6b'
@ -16,7 +15,8 @@ repo_id:
- 'Qwen/Qwen-7B-Chat-10-12'
- 'BAAI/AquilaChat-7B'
- 'baichuan-inc/Baichuan2-7B-Chat'
# - 'mistralai/Mistral-7B-v0.1' # temporarily removed
# - 'mistralai/Mistral-7B-v0.1'
- 'bigscience/bloomz-7b1'
local_model_hub: '/mnt/disk1/models'
warm_up: 1
num_trials: 3