repo_id: - 'THUDM/chatglm3-6b' - 'THUDM/chatglm2-6b' - 'THUDM/glm-4-9b-chat' - 'baichuan-inc/Baichuan2-7B-Chat' - 'baichuan-inc/Baichuan2-13B-Chat' - 'internlm/internlm-chat-7b' - 'Qwen/Qwen-7B-Chat' - 'BAAI/AquilaChat2-7B' # - '01-ai/Yi-6B' - 'meta-llama/Llama-2-7b-chat-hf' - 'meta-llama/Llama-2-13b-chat-hf' - 'WisdomShell/CodeShell-7B-Chat' - 'tiiuae/falcon-7b-instruct-with-patch' - 'mosaicml/mpt-7b-chat' # - 'liuhaotian/llava-v1.5-7b' # Cannot load using AutoModelForCausalLM in 4.36+ - 'RWKV/rwkv-4-world-7b' - 'RWKV/rwkv-5-world-7b' - 'IEITYuan/Yuan2-2B-hf' - 'mistralai/Mistral-7B-Instruct-v0.1' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 num_beams: 1 # default to greedy search low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) batch_size: 1 # default to 1 in_out_pairs: - '2048-256' test_api: - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)