diff --git a/python/llm/src/ipex_llm/transformers/convert.py b/python/llm/src/ipex_llm/transformers/convert.py index 168a83ab..b89320f8 100644 --- a/python/llm/src/ipex_llm/transformers/convert.py +++ b/python/llm/src/ipex_llm/transformers/convert.py @@ -822,7 +822,7 @@ def ggml_convert_low_bit(model, qtype, optimize_model=True, if optimize_model: model = _optimize_post(model, lightweight_bmm) - if hasattr(model, "config") and \ + if hasattr(model, "config") and hasattr(model.config, "model_type") and \ model.config.model_type == "qwen" and hasattr(model.config, "visual"): # for Qwen-VL-Chat # Due to issue https://github.com/intel/intel-extension-for-pytorch/issues/454,