From 054952f82f13994a94fd8b3d96df5afdb64f1a16 Mon Sep 17 00:00:00 2001 From: Ruonan Wang Date: Thu, 18 Jan 2024 09:28:10 +0800 Subject: [PATCH] LLM: Fix rope of chatglm3 to support speculative decoding on CPU (#9926) --- python/llm/src/bigdl/llm/transformers/models/chatglm2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/llm/src/bigdl/llm/transformers/models/chatglm2.py b/python/llm/src/bigdl/llm/transformers/models/chatglm2.py index bef388d8..4118f6bd 100644 --- a/python/llm/src/bigdl/llm/transformers/models/chatglm2.py +++ b/python/llm/src/bigdl/llm/transformers/models/chatglm2.py @@ -218,7 +218,8 @@ def chatglm2_attention_forward_8eb45c( # apply relative positional encoding (rotary embedding) if rotary_pos_emb is not None: - if len(rotary_pos_emb) == 2: # use_fuse_rope, see chatglm2_model_forward + if len(rotary_pos_emb) == 2 and isinstance(rotary_pos_emb, tuple): + # use_fuse_rope, see chatglm2_model_forward cos, sin = rotary_pos_emb rot_dim = cos.shape[-1] query_layer = query_layer.transpose(0, 1)