LLM: add more models and skip runtime error (#9349)

* add more models and skip runtime error

* upgrade transformers

* temporarily removed Mistral-7B-v0.1

* temporarily disable the upload of arc perf result
This commit is contained in:
WeiguangHan 2023-11-08 09:45:53 +08:00 committed by GitHub
parent fae6db3ddc
commit 84ab614aab
3 changed files with 43 additions and 34 deletions

View file

@ -108,6 +108,7 @@ jobs:
python -m pip install --upgrade einops
python -m pip install --upgrade transformers_stream_generator
python -m pip install --upgrade tiktoken
python -m pip install transformers==4.34.0
- name: Download llm binary
uses: ./.github/actions/llm/download-llm-binary
@ -134,7 +135,6 @@ jobs:
export http_proxy=${HTTP_PROXY}
export https_proxy=${HTTPS_PROXY}
python run.py
curl -T ./*.csv ${LLM_FTP_URL}/llm/ggml-actions/perf/
cp ./*.csv /mnt/disk1/nightly_perf_gpu/
cd ../../../test/benchmark
python csv_to_html.py -f /mnt/disk1/nightly_perf_gpu/

View file

@ -59,7 +59,7 @@ def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1,
result = run_deepspeed_transformer_int4_cpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit)
for in_out_pair in in_out_pairs:
if result:
if result and result[in_out_pair]:
results.append([repo_id,
round(np.mean(result[in_out_pair], axis=0)[0]*1000.0, 2),
round(np.mean(result[in_out_pair], axis=0)[1]*1000.0, 2),
@ -357,6 +357,7 @@ def run_transformer_int4_gpu(repo_id,
result = {}
with torch.inference_mode():
for in_out in in_out_pairs:
try:
in_out_len = in_out.split("-")
in_len = int(in_out_len[0])
out_len = int(in_out_len[1])
@ -389,6 +390,8 @@ def run_transformer_int4_gpu(repo_id,
if i >= warm_up:
result[in_out].append([model.first_cost, model.rest_cost_mean, model.encoder_time,
actual_in_len, actual_out_len])
except RuntimeError:
pass
torch.xpu.empty_cache()
return result

View file

@ -4,13 +4,19 @@ repo_id:
- 'THUDM/chatglm2-6b'
- 'tiiuae/falcon-7b-instruct-with-patch'
- 'mosaicml/mpt-7b-chat'
# - 'bigscience/bloomz-7b1' # temporarily removed
- 'redpajama/gptneox-7b-redpajama-bf16'
- 'bigcode/starcoder-15.5b'
- 'databricks/dolly-v1-6b'
- 'databricks/dolly-v2-7b'
- 'databricks/dolly-v2-12b'
- 'internlm/internlm-chat-7b-8k'
- 'baichuan-inc/Baichuan-13B-Chat'
- 'fnlp/moss-moon-003-sft'
- 'Qwen/Qwen-7B-Chat-10-12'
- 'BAAI/AquilaChat-7B'
- 'baichuan-inc/Baichuan2-7B-Chat'
# - 'mistralai/Mistral-7B-v0.1' # temporarily removed
local_model_hub: '/mnt/disk1/models'
warm_up: 1
num_trials: 3