From 942d6418e7b391f07dcc90fa6651c2103c4bf71b Mon Sep 17 00:00:00 2001 From: Ruonan Wang <105281011+rnwang04@users.noreply.github.com> Date: Wed, 18 Oct 2023 19:09:53 +0800 Subject: [PATCH] LLM: fix chatglm kv cache (#9215) --- python/llm/src/bigdl/llm/transformers/models/chatglm.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/llm/src/bigdl/llm/transformers/models/chatglm.py b/python/llm/src/bigdl/llm/transformers/models/chatglm.py index 4f773772..9285df44 100644 --- a/python/llm/src/bigdl/llm/transformers/models/chatglm.py +++ b/python/llm/src/bigdl/llm/transformers/models/chatglm.py @@ -77,6 +77,8 @@ def attention_fn( device=device) new_cache_k[:] = cache_k new_cache_v[:] = cache_v + cache_k = new_cache_k + cache_v = new_cache_v key_layer, value_layer = append_kv_cache(cache_k, cache_v, key_layer, value_layer) elif use_cache: