diff --git a/python/llm/src/ipex_llm/ggml/quantize.py b/python/llm/src/ipex_llm/ggml/quantize.py index a5e2cb91..bdaeccaf 100644 --- a/python/llm/src/ipex_llm/ggml/quantize.py +++ b/python/llm/src/ipex_llm/ggml/quantize.py @@ -46,7 +46,8 @@ ggml_tensor_qtype = {"sym_int4": 2, # q4_0 in ggml "gguf_iq1_s": 24, "gguf_iq1_m": 25, "q6_k": 26, - "q4_k": 27} + "q4_k": 27, + "fp6": 29} _llama_quantize_type = {"q4_0": 2, "q4_1": 3, diff --git a/python/llm/src/ipex_llm/transformers/low_bit_linear.py b/python/llm/src/ipex_llm/transformers/low_bit_linear.py index 91fcbd62..6b27546a 100644 --- a/python/llm/src/ipex_llm/transformers/low_bit_linear.py +++ b/python/llm/src/ipex_llm/transformers/low_bit_linear.py @@ -72,6 +72,7 @@ FP4 = ggml_tensor_qtype["fp4"] MOFQ4 = ggml_tensor_qtype["mixed_fp4"] MOFQ8 = ggml_tensor_qtype["mixed_fp8"] FP8E5 = ggml_tensor_qtype["fp8_e5m2"] +FP6 = ggml_tensor_qtype["fp6"] IQ2_XXS = ggml_tensor_qtype["gguf_iq2_xxs"] IQ2_XS = ggml_tensor_qtype["gguf_iq2_xs"] Q2_K = ggml_tensor_qtype["q2_k"] @@ -242,7 +243,7 @@ def ggml_q_format_convet_cpu2xpu(tensor: torch.Tensor, num_elem: int, qtype: int src = ctypes.c_void_p(tensor.data.data_ptr()) - if qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, NF4, NF3, FP4, FP8E4, FP8E5]: + if qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, NF4, NF3, FP4, FP6, FP8E4, FP8E5]: dst_tensor = torch.empty_like(tensor) elif qtype == ggml_tensor_qtype["sym_int5"]: QK = ggml.ggml_qk_size(qtype) diff --git a/python/llm/src/ipex_llm/transformers/model.py b/python/llm/src/ipex_llm/transformers/model.py index fcc4558c..e3c5448c 100644 --- a/python/llm/src/ipex_llm/transformers/model.py +++ b/python/llm/src/ipex_llm/transformers/model.py @@ -117,11 +117,12 @@ class _BaseAutoModelClass: Default to be ``False``. :param load_in_low_bit: str value, options are ``'sym_int4'``, ``'asym_int4'``, ``'sym_int5'``, ``'asym_int5'``, ``'sym_int8'``, ``'nf3'``, - ``'nf4'``, ``'fp4'``, ``'fp8'``, ``'fp8_e4m3'``, ``'fp8_e5m2'``, - ``'gguf_iq2_xxs'``, ``'gguf_iq2_xs'``, gguf_iq1_s'``, - ``'fp16'``, ``'bf16'``, ``'q4_k'`` or ``'q6_k'``, - ``'sym_int4'`` means symmetric int 4, ``'asym_int4'`` means - asymmetric int 4, ``'nf4'`` means 4-bit NormalFloat, etc. + ``'nf4'``, ``'fp4'``, ``'fp6'`` ``'fp8'``, ``'fp8_e4m3'``, + ``'fp8_e5m2'``, ``'gguf_iq2_xxs'``, ``'gguf_iq2_xs'``, + ``'gguf_iq1_s'``, ``'fp16'``, ``'bf16'``, ``'q4_k'`` or + ``'q6_k'``, ``'sym_int4'`` means symmetric int 4, + ``'asym_int4'`` means asymmetric int 4, + ``'nf4'`` means 4-bit NormalFloat, etc. Relevant low bit optimizations will be applied to the model. :param optimize_model: boolean value, Whether to further optimize the low_bit llm model. Default to be ``True``. @@ -378,7 +379,7 @@ class _BaseAutoModelClass: invalidInputError(q_k in ggml_tensor_qtype, f"Unknown load_in_low_bit value: {q_k}, expected:" f" sym_int4, asym_int4, sym_int5, asym_int5, sym_int8, nf3, nf4, " - f"fp4, fp8, fp8_e4m3, fp8_e5m2, fp16, bf16, gguf_iq2_xxs, " + f"fp4, fp6, fp8, fp8_e4m3, fp8_e5m2, fp16, bf16, gguf_iq2_xxs, " f"gguf_iq2_xs, gguf_iq1_s, q2_k, q4_k, q6_k, mixed_fp4 or mixed_fp8.") qtype = ggml_tensor_qtype[q_k]