Small optimization to glm4 models (#12351)

This commit is contained in:
Yuwen Hu 2024-11-06 19:16:58 +08:00 committed by GitHub
parent c267355b35
commit 872a74481a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 2 additions and 1 deletions

View file

@ -1458,7 +1458,7 @@ def _optimize_post(model, lightweight_bmm=False):
from ipex_llm.transformers.models.chatglm4v import vision_model_forward
convert_forward(model, vision_module.VisionModel, vision_model_forward)
elif model.config.num_layers == 40:
elif model.config.num_layers in [40, 28]:
# glm-4-9b
from ipex_llm.transformers.models.chatglm4 import chatglm4_attention_forward
from ipex_llm.transformers.models.chatglm4 import chatglm4_model_forward

View file

@ -44,6 +44,7 @@ def chatglm4_model_forward(
use_cache: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[Tuple, BaseModelOutputWithPast]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else