From deee65785cc90edcd307ea2f4723d3152086f8ca Mon Sep 17 00:00:00 2001 From: Xiangyu Tian <109123695+xiangyuT@users.noreply.github.com> Date: Thu, 7 Dec 2023 11:32:33 +0800 Subject: [PATCH] [LLM] vLLM: Delete last_kv_cache before prefilling (#9619) Remove last_kv_cache before prefilling to reduce peak memory usage. --- .../llm/src/bigdl/llm/vllm/model_executor/models/bigdl_llama.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/llm/src/bigdl/llm/vllm/model_executor/models/bigdl_llama.py b/python/llm/src/bigdl/llm/vllm/model_executor/models/bigdl_llama.py index 331e740e..cecf4df6 100644 --- a/python/llm/src/bigdl/llm/vllm/model_executor/models/bigdl_llama.py +++ b/python/llm/src/bigdl/llm/vllm/model_executor/models/bigdl_llama.py @@ -186,6 +186,8 @@ class BigDLLlamaForCausalLM(BigDLModelForCausalLM): "use_cache": True, # "return_dict": True, } + if self.last_kv_cache: + del self.last_kv_cache # pdb.set_trace() if self.device.type == 'xpu':