LLM: fix convert of chatglm (#9190)

This commit is contained in:
Ruonan Wang 2023-10-17 10:48:13 +08:00 committed by GitHub
parent af3b575c7e
commit 77afb8796b

View file

@ -181,7 +181,7 @@ def optimize(model):
# todo implement 4.28.0 ~ 4.30.2
pass
if model.config.architectures[0] == "ChatGLMModel":
if model.config.architectures is not None and model.config.architectures[0] == "ChatGLMModel":
if model.config.num_layers == 28 and hasattr(model.config, 'rope_ratio'):
# chatglm2-6b-32k
modeling_module_name = model.__class__.__module__