From 1b3c7a69283ac5a327a61bed2faaf6a56aa2bb10 Mon Sep 17 00:00:00 2001 From: Yishuo Wang Date: Mon, 13 May 2024 14:09:55 +0800 Subject: [PATCH] remove phi3 empty cache (#10997) --- python/llm/src/ipex_llm/transformers/convert.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/python/llm/src/ipex_llm/transformers/convert.py b/python/llm/src/ipex_llm/transformers/convert.py index e6e0e009..fc0766ae 100644 --- a/python/llm/src/ipex_llm/transformers/convert.py +++ b/python/llm/src/ipex_llm/transformers/convert.py @@ -188,13 +188,6 @@ def is_linear_module(module): return result, (in_features, out_features, mp_group) -def empty_cache_post(module, input, output): - try: - torch.xpu.empty_cache() - except: # cpu - pass - - def convert_gptq(module, awq=False, llm_awq=False, act_order=False): from ipex_llm.transformers.low_bit_linear import get_block_size Q4_1 = get_block_size("asym_int4") @@ -1524,8 +1517,6 @@ def _optimize_post(model, lightweight_bmm=False): from ipex_llm.transformers.models.phi3 import model_forward_wrapper model_forward = model_forward_wrapper(module.Phi3Model.forward) convert_forward(model, module.Phi3Model, model_forward) - # Empty cache after the first attention to run long context. - model.model.layers[0].self_attn.register_forward_hook(empty_cache_post) elif model.config.model_type == 'yuan': modeling_module_name = model.__class__.__module__ module = importlib.import_module(modeling_module_name)