From 77afb8796bcab78adb78aa9af411521ac42edb2a Mon Sep 17 00:00:00 2001 From: Ruonan Wang <105281011+rnwang04@users.noreply.github.com> Date: Tue, 17 Oct 2023 10:48:13 +0800 Subject: [PATCH] LLM: fix convert of chatglm (#9190) --- python/llm/src/bigdl/llm/transformers/convert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/llm/src/bigdl/llm/transformers/convert.py b/python/llm/src/bigdl/llm/transformers/convert.py index 8529a7f0..6d03a55d 100644 --- a/python/llm/src/bigdl/llm/transformers/convert.py +++ b/python/llm/src/bigdl/llm/transformers/convert.py @@ -181,7 +181,7 @@ def optimize(model): # todo implement 4.28.0 ~ 4.30.2 pass - if model.config.architectures[0] == "ChatGLMModel": + if model.config.architectures is not None and model.config.architectures[0] == "ChatGLMModel": if model.config.num_layers == 28 and hasattr(model.config, 'rope_ratio'): # chatglm2-6b-32k modeling_module_name = model.__class__.__module__