From 33d75adadff020de8bbcb3f166f53ee564c2f82b Mon Sep 17 00:00:00 2001 From: Yina Chen <33650826+cyita@users.noreply.github.com> Date: Fri, 8 Sep 2023 15:52:36 +0800 Subject: [PATCH] [LLM]Support q5_0 on arc (#8926) * support q5_0 * delete * fix style --- .../llm/src/bigdl/llm/transformers/low_bit_linear.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/python/llm/src/bigdl/llm/transformers/low_bit_linear.py b/python/llm/src/bigdl/llm/transformers/low_bit_linear.py index 2a040bf0..51231275 100644 --- a/python/llm/src/bigdl/llm/transformers/low_bit_linear.py +++ b/python/llm/src/bigdl/llm/transformers/low_bit_linear.py @@ -100,6 +100,12 @@ def ggml_q_format_convet_cpu2xpu(tensor: torch.Tensor, num_elem: int, qtype: int if qtype == ggml_tensor_qtype["sym_int4"]: dst_tensor = torch.empty_like(tensor) + elif qtype == ggml_tensor_qtype["sym_int5"]: + QK = ggml.ggml_qk_size(qtype) + block_size_in_bytes = ggml.ggml_type_size(ggml_tensor_qtype["asym_int5"]) + dst_size = (num_elem // QK) * block_size_in_bytes + dst_tensor = torch.empty(dst_size, dtype=torch.uint8, + device=torch.device('cpu')) else: return tensor dst = ctypes.c_void_p(dst_tensor.data.data_ptr()) @@ -119,6 +125,12 @@ def ggml_q_format_convet_xpu2cpu(tensor: torch.Tensor, num_elem: int, qtype: int if qtype == ggml_tensor_qtype["sym_int4"]: dst_tensor = torch.empty_like(tensor) + elif qtype == ggml_tensor_qtype["sym_int5"]: + QK = ggml.ggml_qk_size(ggml_tensor_qtype["asym_int5"]) + block_size_in_bytes = ggml.ggml_type_size(qtype) + dst_size = (num_elem // QK) * block_size_in_bytes + dst_tensor = torch.empty(dst_size, dtype=torch.uint8, + device=torch.device('cpu')) else: return tensor dst = ctypes.c_void_p(dst_tensor.data.data_ptr())