From b3f5dd5b5d0536394925a13555b371f89fcae811 Mon Sep 17 00:00:00 2001 From: Ruonan Wang <105281011+rnwang04@users.noreply.github.com> Date: Fri, 8 Sep 2023 16:01:17 +0800 Subject: [PATCH] LLM: update q8 convert xpu&cpu (#8930) --- python/llm/src/bigdl/llm/transformers/low_bit_linear.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/llm/src/bigdl/llm/transformers/low_bit_linear.py b/python/llm/src/bigdl/llm/transformers/low_bit_linear.py index 51231275..27e1e44c 100644 --- a/python/llm/src/bigdl/llm/transformers/low_bit_linear.py +++ b/python/llm/src/bigdl/llm/transformers/low_bit_linear.py @@ -60,6 +60,7 @@ IS_SERVER = is_server() IS_SPR = is_spr() TORCH_LINEAR_THRESHOLD = 96 SYM_INT4 = ggml_tensor_qtype["sym_int4"] +SYM_INT8 = ggml_tensor_qtype["sym_int8"] def ggml_convert_qtype(tensor: torch.Tensor, qtype: int, device=None): @@ -98,7 +99,7 @@ def ggml_q_format_convet_cpu2xpu(tensor: torch.Tensor, num_elem: int, qtype: int src = ctypes.c_void_p(tensor.data.data_ptr()) - if qtype == ggml_tensor_qtype["sym_int4"]: + if qtype in [SYM_INT4, SYM_INT8]: dst_tensor = torch.empty_like(tensor) elif qtype == ggml_tensor_qtype["sym_int5"]: QK = ggml.ggml_qk_size(qtype) @@ -123,7 +124,7 @@ def ggml_q_format_convet_xpu2cpu(tensor: torch.Tensor, num_elem: int, qtype: int src = ctypes.c_void_p(tensor.data.data_ptr()) - if qtype == ggml_tensor_qtype["sym_int4"]: + if qtype in [SYM_INT4, SYM_INT8]: dst_tensor = torch.empty_like(tensor) elif qtype == ggml_tensor_qtype["sym_int5"]: QK = ggml.ggml_qk_size(ggml_tensor_qtype["asym_int5"])