From 3685622f295587829a52ef02181f198ee1aee540 Mon Sep 17 00:00:00 2001 From: Ruonan Wang Date: Wed, 31 Jan 2024 10:31:10 +0800 Subject: [PATCH] LLM: fix llama 4.36 forward(#10047) --- python/llm/src/bigdl/llm/transformers/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/llm/src/bigdl/llm/transformers/models/llama.py b/python/llm/src/bigdl/llm/transformers/models/llama.py index b773c424..f1e160a1 100644 --- a/python/llm/src/bigdl/llm/transformers/models/llama.py +++ b/python/llm/src/bigdl/llm/transformers/models/llama.py @@ -638,7 +638,7 @@ def llama_attention_forward_4_36( "Please make sure use `attention_mask` instead.`" ) - bsz, q_len, _ = hidden_states.size() + bsz, q_len, hidden_size = hidden_states.size() device = hidden_states.device # for flash attention original_dtype = hidden_states.dtype