* add config and default value * add config in taml * remove lookahead and max_matching_ngram_size in config * remove streaming and use_fp16_torch_dtype in test yaml * update task in readme * update commit of task
39 lines
No EOL
1.3 KiB
YAML
39 lines
No EOL
1.3 KiB
YAML
repo_id:
|
|
- 'meta-llama/Llama-2-7b-chat-hf'
|
|
- 'meta-llama/Llama-2-13b-chat-hf'
|
|
- 'THUDM/chatglm2-6b'
|
|
- 'THUDM/chatglm3-6b-4bit'
|
|
- 'tiiuae/falcon-7b-instruct-with-patch'
|
|
- 'mosaicml/mpt-7b-chat'
|
|
- 'redpajama/gptneox-7b-redpajama-bf16'
|
|
- 'bigcode/starcoder-15.5b-4bit'
|
|
- 'databricks/dolly-v1-6b'
|
|
- 'databricks/dolly-v2-7b'
|
|
# - 'databricks/dolly-v2-12b'
|
|
- 'internlm/internlm-chat-7b'
|
|
- 'Qwen/Qwen-7B-Chat'
|
|
- 'BAAI/AquilaChat-7B'
|
|
- 'baichuan-inc/Baichuan2-7B-Chat'
|
|
- 'baichuan-inc/Baichuan2-13B-Chat-4bit'
|
|
- 'bigscience/bloomz-7b1'
|
|
# - 'fnlp/moss-moon-003-sft-4bit' # moss-moon-003-sft cannot work on transformers 4.34+
|
|
- 'mistralai/Mistral-7B-v0.1'
|
|
local_model_hub: '/mnt/disk1/models'
|
|
warm_up: 1
|
|
num_trials: 3
|
|
num_beams: 1 # default to greedy search
|
|
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
|
|
batch_size: 2 # default to 1
|
|
in_out_pairs:
|
|
- '32-32'
|
|
- '1024-128'
|
|
- '2048-256'
|
|
test_api:
|
|
- "transformer_int4_fp16_gpu" # on Intel GPU
|
|
cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api)
|
|
exclude:
|
|
- 'bigcode/starcoder-15.5b-4bit:2048'
|
|
# - 'databricks/dolly-v2-12b:2048'
|
|
- 'baichuan-inc/Baichuan2-13B-Chat-4bit:2048'
|
|
- 'bigscience/bloomz-7b1:2048'
|
|
task: 'continuation' # task can be 'continuation', 'QA' and 'summarize' |