ipex-llm/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml
Jin, Qiao 7240c283a3
Add dummy model in iGPU perf (#12341)
* Add dummy model in iGPU perf

* Add dummy model in iGPU perf

* Fix
2024-11-05 17:56:10 +08:00

32 lines
1.1 KiB
YAML

repo_id:
- 'THUDM/chatglm3-6b'
- 'THUDM/glm-4-9b-chat'
- 'baichuan-inc/Baichuan2-7B-Chat'
- 'meta-llama/Llama-2-7b-chat-hf'
- 'meta-llama/Llama-2-13b-chat-hf'
- 'meta-llama/Meta-Llama-3-8B-Instruct'
- 'mistralai/Mistral-7B-Instruct-v0.2'
- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
- '01-ai/Yi-6B-Chat'
- 'openbmb/MiniCPM-1B-sft-bf16'
- 'openbmb/MiniCPM-2B-sft-bf16'
- 'Qwen/Qwen1.5-7B-Chat'
- 'Qwen/Qwen2-1.5B-Instruct'
- 'Qwen/Qwen2-7B-Instruct'
- 'microsoft/Phi-3-mini-4k-instruct'
- 'microsoft/Phi-3-mini-128k-instruct'
- 'microsoft/phi-3-vision-128k-instruct'
- 'openbmb/MiniCPM-V-2_6'
- 'dummy/dummy-1.5B'
- 'dummy/dummy-4B'
local_model_hub: 'path to your local model hub'
warm_up: 1
num_trials: 3
num_beams: 1 # default to greedy search
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
batch_size: 1 # default to 1
in_out_pairs:
- '4096-512'
test_api:
- "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows, use fp16 for non-linear layer
cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)