From 6a5ca17afc9d3bbcc92522e9d599ffcbbe084378 Mon Sep 17 00:00:00 2001 From: "Huang, Xinshengzi" Date: Thu, 22 Aug 2024 11:09:58 +0800 Subject: [PATCH] fix typoes --- python/llm/src/ipex_llm/transformers/models/baichuan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/llm/src/ipex_llm/transformers/models/baichuan.py b/python/llm/src/ipex_llm/transformers/models/baichuan.py index 111dc1e5..d91eb1e7 100644 --- a/python/llm/src/ipex_llm/transformers/models/baichuan.py +++ b/python/llm/src/ipex_llm/transformers/models/baichuan.py @@ -283,7 +283,7 @@ def baichuan_attention_forward_7b( key_states, value_states, self.layer_idx, query_states, attention_mask, 1, self.config, enough_kv_room, KV_CACHE_ALLOC_BLOCK_LENGTH) - + if self.training: warnings.warn("xops is not supported on Intel GPU, so just use normal implementation")