diff --git a/python/llm/src/ipex_llm/transformers/models/llama.py b/python/llm/src/ipex_llm/transformers/models/llama.py index 2c9c17e7..dfbbaf00 100644 --- a/python/llm/src/ipex_llm/transformers/models/llama.py +++ b/python/llm/src/ipex_llm/transformers/models/llama.py @@ -305,6 +305,16 @@ def llama_mlp_forward( ) hidden_states = attn_output.view(x.shape) return hidden_states + elif x.device.type == "xpu" and not self.training: + import xe_addons + gate = self.gate_proj(x) + up = self.up_proj(x) + xe_addons.mlp_silu_mul_inplaced(gate, up) + out = self.down_proj(gate) + if residual is not None: + return out + residual + else: + return out else: a = self.act_fn(self.gate_proj(x)) b = self.up_proj(x) diff --git a/python/llm/test/inference_gpu/test_transformers_api_mlp.py b/python/llm/test/inference_gpu/test_transformers_api_mlp.py index c6229d73..d46d939a 100644 --- a/python/llm/test/inference_gpu/test_transformers_api_mlp.py +++ b/python/llm/test/inference_gpu/test_transformers_api_mlp.py @@ -134,7 +134,7 @@ class Test_Optimize_Gpu_Model: # currently only compare the output of the last mlp layer. layer_before_MLP = "model.layers.31.post_attention_layernorm" MLP_layer = "model.layers.31.mlp" - lower_bound = 0 + lower_bound = 1e-3 self.run_optimize_gpu_model(Name, Model, Tokenizer, model_path, MLP_layer, layer_before_MLP, lower_bound) def Llama2_7B_gpu_model(self, Name, Model, Tokenizer, model_path):