[LLM] Small fixes for win igpu test for ipex 2.1 (#9686)
* Fixes to install for igpu performance tests * Small update for core performance tests model lists
This commit is contained in:
parent
3e8d198b57
commit
82ac2dbf55
2 changed files with 3 additions and 2 deletions
3
.github/workflows/llm_performance_tests.yml
vendored
3
.github/workflows/llm_performance_tests.yml
vendored
|
|
@ -232,7 +232,8 @@ jobs:
|
|||
- name: Prepare for install bigdl-llm from source
|
||||
shell: bash
|
||||
run: |
|
||||
sed -i 's/"bigdl-core-xe==" + VERSION + "/"bigdl-core-xe/g' python/llm/setup.py
|
||||
sed -i 's/"bigdl-core-xe-21==" + VERSION + "/"bigdl-core-xe-21/g' python/llm/setup.py
|
||||
sed -i 's/"bigdl-core-xe-21==" + VERSION/"bigdl-core-xe-21"/g' python/llm/setup.py
|
||||
|
||||
- name: Install bigdl-llm and other related packages
|
||||
shell: cmd
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ repo_id:
|
|||
- 'THUDM/chatglm3-6b'
|
||||
- 'baichuan-inc/Baichuan2-7B-Chat'
|
||||
- 'internlm/internlm-chat-7b-8k'
|
||||
- 'Qwen/Qwen-7B-Chat-10-12'
|
||||
- 'Qwen/Qwen-7B-Chat'
|
||||
- 'BAAI/AquilaChat2-7B'
|
||||
- 'meta-llama/Llama-2-7b-chat-hf'
|
||||
- 'WisdomShell/CodeShell-7B'
|
||||
|
|
|
|||
Loading…
Reference in a new issue