LLM: add more models to the arc perf test (#9297)
* LLM: add more models to the arc perf test * remove some old models * install some dependencies
This commit is contained in:
parent
6a128aee32
commit
9722e811be
2 changed files with 12 additions and 1 deletions
4
.github/workflows/llm_performance_tests.yml
vendored
4
.github/workflows/llm_performance_tests.yml
vendored
|
|
@ -98,12 +98,16 @@ jobs:
|
|||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
# pip install transformers_stream_generator for model internlm-chat-7b-8k
|
||||
# pip install tiktoken for model Qwen-7B-Chat-10-12
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install --upgrade wheel
|
||||
python -m pip install --upgrade omegaconf
|
||||
python -m pip install --upgrade pandas
|
||||
python -m pip install --upgrade einops
|
||||
python -m pip install --upgrade transformers_stream_generator
|
||||
python -m pip install --upgrade tiktoken
|
||||
|
||||
- name: Download llm binary
|
||||
uses: ./.github/actions/llm/download-llm-binary
|
||||
|
|
|
|||
|
|
@ -1,9 +1,16 @@
|
|||
repo_id:
|
||||
- 'THUDM/chatglm2-6b'
|
||||
- 'meta-llama/Llama-2-7b-chat-hf'
|
||||
- 'meta-llama/Llama-2-13b-chat-hf'
|
||||
- 'THUDM/chatglm2-6b'
|
||||
- 'tiiuae/falcon-7b-instruct-with-patch'
|
||||
- 'mosaicml/mpt-7b-chat'
|
||||
- 'redpajama/gptneox-7b-redpajama-bf16'
|
||||
- 'databricks/dolly-v1-6b'
|
||||
- 'databricks/dolly-v2-7b'
|
||||
- 'databricks/dolly-v2-12b'
|
||||
- 'internlm/internlm-chat-7b-8k'
|
||||
- 'Qwen/Qwen-7B-Chat-10-12'
|
||||
- 'BAAI/AquilaChat-7B'
|
||||
local_model_hub: '/mnt/disk1/models'
|
||||
warm_up: 1
|
||||
num_trials: 3
|
||||
|
|
|
|||
Loading…
Reference in a new issue