From 3d24823cda0db012e828a53a924962f19283f44c Mon Sep 17 00:00:00 2001 From: SONG Ge <38711238+sgwhat@users.noreply.github.com> Date: Fri, 24 Nov 2023 14:33:04 +0800 Subject: [PATCH] hot-fix mistral kv_cache (#9528) --- python/llm/src/bigdl/llm/transformers/models/mistral.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/python/llm/src/bigdl/llm/transformers/models/mistral.py b/python/llm/src/bigdl/llm/transformers/models/mistral.py index 847f43b9..9a9618bf 100644 --- a/python/llm/src/bigdl/llm/transformers/models/mistral.py +++ b/python/llm/src/bigdl/llm/transformers/models/mistral.py @@ -114,6 +114,11 @@ def mistral_attention_forward( dtype=cache_k.dtype, device=device) + new_cache_k[:] = cache_k + new_cache_v[:] = cache_v + cache_k = new_cache_k + cache_v = new_cache_v + key_states, value_states = append_kv_cache(cache_k, cache_v, key_states, value_states) elif use_cache: