From 0136fad1d4575465cb24f313595119da0c9f76f8 Mon Sep 17 00:00:00 2001 From: Ruonan Wang Date: Fri, 29 Mar 2024 09:43:55 +0800 Subject: [PATCH] LLM: support iq1_s (#10564) * init version * update utils * remove unsed code --- python/llm/src/ipex_llm/ggml/quantize.py | 4 +++- .../src/ipex_llm/transformers/low_bit_linear.py | 4 ++-- python/llm/src/ipex_llm/transformers/model.py | 15 ++++++++------- python/llm/src/ipex_llm/transformers/utils.py | 5 ++++- 4 files changed, 17 insertions(+), 11 deletions(-) diff --git a/python/llm/src/ipex_llm/ggml/quantize.py b/python/llm/src/ipex_llm/ggml/quantize.py index 15d36202..8dab4aaf 100644 --- a/python/llm/src/ipex_llm/ggml/quantize.py +++ b/python/llm/src/ipex_llm/ggml/quantize.py @@ -42,7 +42,9 @@ ggml_tensor_qtype = {"sym_int4": 2, # q4_0 in ggml "bf16": 20, "gguf_iq2_xxs": 21, "gguf_iq2_xs": 22, - "q2_k": 23} + "q2_k": 23, + "gguf_iq1_s": 24, + "gguf_iq1_m": 25} _llama_quantize_type = {"q4_0": 2, "q4_1": 3, diff --git a/python/llm/src/ipex_llm/transformers/low_bit_linear.py b/python/llm/src/ipex_llm/transformers/low_bit_linear.py index d5e35d3e..797abcb3 100644 --- a/python/llm/src/ipex_llm/transformers/low_bit_linear.py +++ b/python/llm/src/ipex_llm/transformers/low_bit_linear.py @@ -73,7 +73,7 @@ FP8E5 = ggml_tensor_qtype["fp8_e5m2"] IQ2_XXS = ggml_tensor_qtype["gguf_iq2_xxs"] IQ2_XS = ggml_tensor_qtype["gguf_iq2_xs"] Q2_K = ggml_tensor_qtype["q2_k"] - +IQ1_S = ggml_tensor_qtype["gguf_iq1_s"] # The ggml_weight is col major and packs two rows at a stride of Q4_0//2. # @@ -156,7 +156,7 @@ def ggml_convert_qtype(tensor: torch.Tensor, qtype: int, if not convert_shape_only and device != 'meta': dst = ctypes.c_void_p(dst_tensor.data.data_ptr()) hist = (ctypes.c_int64 * 16)() - if qtype not in [IQ2_XXS, IQ2_XS, Q2_K]: + if qtype not in [IQ2_XXS, IQ2_XS, Q2_K, IQ1_S]: ggml.ggml_quantize_tensor(src, dst, qtype, n, k, hist) else: if imatrix is not None: diff --git a/python/llm/src/ipex_llm/transformers/model.py b/python/llm/src/ipex_llm/transformers/model.py index f4c5bd25..c4410d4c 100644 --- a/python/llm/src/ipex_llm/transformers/model.py +++ b/python/llm/src/ipex_llm/transformers/model.py @@ -118,7 +118,8 @@ class _BaseAutoModelClass: :param load_in_low_bit: str value, options are ``'sym_int4'``, ``'asym_int4'``, ``'sym_int5'``, ``'asym_int5'``, ``'sym_int8'``, ``'nf3'``, ``'nf4'``, ``'fp4'``, ``'fp8'``, ``'fp8_e4m3'``, ``'fp8_e5m2'``, - ``'gguf_iq2_xxs'``, ``'gguf_iq2_xs'``, ``'fp16'`` or ``'bf16'``, + ``'gguf_iq2_xxs'``, ``'gguf_iq2_xs'``, gguf_iq1_s'``, + ``'fp16'`` or ``'bf16'``, ``'sym_int4'`` means symmetric int 4, ``'asym_int4'`` means asymmetric int 4, ``'nf4'`` means 4-bit NormalFloat, etc. Relevant low bit optimizations will be applied to the model. @@ -304,14 +305,14 @@ class _BaseAutoModelClass: kwargs["pretraining_tp"] = 1 q_k = load_in_low_bit if load_in_low_bit else "sym_int4" imatrix_file = kwargs.pop("imatrix", None) - if q_k in ["gguf_iq2_xxs", "gguf_iq2_xs"]: + if q_k in ["gguf_iq2_xxs", "gguf_iq2_xs", "gguf_iq1_s"]: invalidInputError(imatrix_file is not None, - "For gguf_iq2_xxs and gguf_iq2_xs quantization," + "For gguf_iq2 and gguf_iq1 quantization," "imatrix is needed.") cpu_embedding = kwargs.get("cpu_embedding", False) # for 2bit, default use embedding_quantization - if q_k in ["gguf_iq2_xxs", "gguf_iq2_xs", "q2_k"] and not cpu_embedding and \ - embedding_qtype is None: + if q_k in ["gguf_iq2_xxs", "gguf_iq2_xs", "gguf_iq1_s", "q2_k"] and \ + not cpu_embedding and embedding_qtype is None: embedding_qtype = "q2_k" if imatrix_file is not None: imatrix_data = load_imatrix_data(imatrix_file) @@ -361,7 +362,7 @@ class _BaseAutoModelClass: f"Unknown load_in_low_bit value: {q_k}, expected:" f" sym_int4, asym_int4, sym_int5, asym_int5, sym_int8, nf3, nf4, " f"fp4, fp8, fp8_e4m3, fp8_e5m2, fp16, bf16, gguf_iq2_xxs, " - f"gguf_iq2_xs, mixed_fp4 or mixed_fp8.") + f"gguf_iq2_xs, gguf_iq1_s, mixed_fp4 or mixed_fp8.") qtype = ggml_tensor_qtype[q_k] # In case it needs a second try, @@ -535,7 +536,7 @@ class _BaseAutoModelClass: optimize_model = kwargs.pop("optimize_model", True) qtype = ggml_tensor_qtype[bigdl_transformers_low_bit] - if bigdl_transformers_low_bit in ["gguf_iq2_xxs", "gguf_iq2_xs", "q2_k"] and \ + if bigdl_transformers_low_bit in ["gguf_iq2_xxs", "gguf_iq2_xs", "gguf_iq1_s", "q2_k"] and \ not cpu_embedding: embedding_qtype = "q2_k" if embedding_qtype is not None: diff --git a/python/llm/src/ipex_llm/transformers/utils.py b/python/llm/src/ipex_llm/transformers/utils.py index 39211fd4..1450cd0c 100644 --- a/python/llm/src/ipex_llm/transformers/utils.py +++ b/python/llm/src/ipex_llm/transformers/utils.py @@ -269,7 +269,8 @@ def module_name_process(full_module_name): def get_cur_qtype_and_imatrix(qtype, full_module_name, imatrix_data, model_type=None): cur_qtype = qtype - if qtype in [ggml_tensor_qtype["gguf_iq2_xxs"], ggml_tensor_qtype["gguf_iq2_xs"]]: + if qtype in [ggml_tensor_qtype["gguf_iq2_xxs"], ggml_tensor_qtype["gguf_iq2_xs"], + ggml_tensor_qtype["gguf_iq1_s"]]: # For quantization which needs importance matrix new_module_name, layer, cur_module = module_name_process(full_module_name) # custom mixed quantization strategy @@ -282,6 +283,8 @@ def get_cur_qtype_and_imatrix(qtype, full_module_name, imatrix_data, model_type= else: if cur_module == 'v' or (cur_module == 'down' and int(layer) in [0, 1, 10, 11]): cur_qtype = ggml_tensor_qtype['q2_k'] + if qtype == ggml_tensor_qtype["gguf_iq1_s"] and cur_module == 'o': + cur_qtype = ggml_tensor_qtype['gguf_iq2_xxs'] if imatrix_data is not None and new_module_name in imatrix_data: cur_imatrix = imatrix_data[new_module_name] else: