From f6d5c6af78468b8c2458f5e857edfdef95e48022 Mon Sep 17 00:00:00 2001 From: Guoqiong Song Date: Wed, 5 Jun 2024 13:35:57 -0700 Subject: [PATCH] fix issue 1407 (#11171) --- python/llm/src/ipex_llm/transformers/convert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/llm/src/ipex_llm/transformers/convert.py b/python/llm/src/ipex_llm/transformers/convert.py index 168a83ab..b89320f8 100644 --- a/python/llm/src/ipex_llm/transformers/convert.py +++ b/python/llm/src/ipex_llm/transformers/convert.py @@ -822,7 +822,7 @@ def ggml_convert_low_bit(model, qtype, optimize_model=True, if optimize_model: model = _optimize_post(model, lightweight_bmm) - if hasattr(model, "config") and \ + if hasattr(model, "config") and hasattr(model.config, "model_type") and \ model.config.model_type == "qwen" and hasattr(model.config, "visual"): # for Qwen-VL-Chat # Due to issue https://github.com/intel/intel-extension-for-pytorch/issues/454,