Small optimization to glm4 models (#12351)
This commit is contained in:
		
							parent
							
								
									c267355b35
								
							
						
					
					
						commit
						872a74481a
					
				
					 2 changed files with 2 additions and 1 deletions
				
			
		| 
						 | 
				
			
			@ -1458,7 +1458,7 @@ def _optimize_post(model, lightweight_bmm=False):
 | 
			
		|||
                    from ipex_llm.transformers.models.chatglm4v import vision_model_forward
 | 
			
		||||
                    convert_forward(model, vision_module.VisionModel, vision_model_forward)
 | 
			
		||||
 | 
			
		||||
            elif model.config.num_layers == 40:
 | 
			
		||||
            elif model.config.num_layers in [40, 28]:
 | 
			
		||||
                # glm-4-9b
 | 
			
		||||
                from ipex_llm.transformers.models.chatglm4 import chatglm4_attention_forward
 | 
			
		||||
                from ipex_llm.transformers.models.chatglm4 import chatglm4_model_forward
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -44,6 +44,7 @@ def chatglm4_model_forward(
 | 
			
		|||
    use_cache: Optional[bool] = None,
 | 
			
		||||
    output_hidden_states: Optional[bool] = None,
 | 
			
		||||
    return_dict: Optional[bool] = None,
 | 
			
		||||
    **kwargs,
 | 
			
		||||
) -> Union[Tuple, BaseModelOutputWithPast]:
 | 
			
		||||
    output_hidden_states = (
 | 
			
		||||
        output_hidden_states if output_hidden_states is not None else
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue