Support fp8_e4m3 scale search (#11339)

* fp8e4m3 switch off

* fix style
This commit is contained in:
Yina Chen 2024-06-18 11:47:43 +08:00 committed by GitHub
parent e50c890e1f
commit 5dad33e5af
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -290,6 +290,13 @@ def convert_gptq(module, awq=False, llm_awq=False, act_order=False):
def use_scale_search(model_config, qtype):
if qtype == ggml_tensor_qtype["fp6"] and model_config.model_type not in ["qwen2"]:
return True
elif qtype == ggml_tensor_qtype["fp8_e4m3"] and \
model_config.model_type not in ["qwen2", "baichuan"]:
if model_config.model_type == "llama" and model_config.vocab_size == 128256 and \
"instruct" in model_config._name_or_path.lower():
# Llama-3-instruct
return False
return True
return False