ipex-llm/python/llm/test/benchmark/stable-version-arc-perf-test-sym_int4.yaml
dingbaorong 36c9442c6d Arc Stable version test (#10087)
* add batch_size in stable version test

* add batch_size in excludes

* add excludes for batch_size

* fix ci

* triger regression test

* fix xpu version

* disable ci

* address kai's comment

---------

Co-authored-by: Ariadne <wyn2000330@126.com>
2024-02-06 10:23:50 +08:00

38 lines
1.2 KiB
YAML

repo_id:
- 'meta-llama/Llama-2-7b-chat-hf'
- 'THUDM/chatglm2-6b'
- 'THUDM/chatglm3-6b'
- 'baichuan-inc/Baichuan2-7B-Chat'
- 'Qwen/Qwen-7B-Chat'
local_model_hub: '/mnt/disk1/models'
warm_up: 1
num_trials: 3
num_beams: 1 # default to greedy search
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
batch_size: 1 # default to 1
in_out_pairs:
- '32-32'
- '512-256'
- '1024-128'
- '2048-256'
test_api:
- "transformer_int4_gpu" # on Intel GPU
cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api)
exclude:
- 'meta-llama/Llama-2-7b-chat-hf:2048:4'
- 'meta-llama/Llama-2-7b-chat-hf:1024:8'
- 'meta-llama/Llama-2-7b-chat-hf:2048:8'
- 'THUDM/chatglm2-6b:2048:8'
- 'THUDM/chatglm3-6b:2048:8'
- 'baichuan-inc/Baichuan2-7B-Chat:2048:2'
- 'baichuan-inc/Baichuan2-7B-Chat:1024:4'
- 'baichuan-inc/Baichuan2-7B-Chat:2048:4'
- 'baichuan-inc/Baichuan2-7B-Chat:512:8'
- 'baichuan-inc/Baichuan2-7B-Chat:1024:8'
- 'baichuan-inc/Baichuan2-7B-Chat:2048:8'
- 'Qwen/Qwen-7B-Chat:2048:2'
- 'Qwen/Qwen-7B-Chat:1024:4'
- 'Qwen/Qwen-7B-Chat:2048:4'
- 'Qwen/Qwen-7B-Chat:512:8'
- 'Qwen/Qwen-7B-Chat:1024:8'
- 'Qwen/Qwen-7B-Chat:2048:8'