add q6k precision in ipex-llm (#10792)

* add q6k

* add initial 16k

* update

* fix style
This commit is contained in:
Ruonan Wang 2024-04-18 16:52:09 +08:00 committed by GitHub
parent e90e31719f
commit 0e8aac19e3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 6 additions and 4 deletions

View file

@ -44,7 +44,8 @@ ggml_tensor_qtype = {"sym_int4": 2, # q4_0 in ggml
"gguf_iq2_xs": 22,
"q2_k": 23,
"gguf_iq1_s": 24,
"gguf_iq1_m": 25}
"gguf_iq1_m": 25,
"q6_k": 26}
_llama_quantize_type = {"q4_0": 2,
"q4_1": 3,

View file

@ -74,6 +74,7 @@ IQ2_XXS = ggml_tensor_qtype["gguf_iq2_xxs"]
IQ2_XS = ggml_tensor_qtype["gguf_iq2_xs"]
Q2_K = ggml_tensor_qtype["q2_k"]
IQ1_S = ggml_tensor_qtype["gguf_iq1_s"]
Q6_K = ggml_tensor_qtype["q6_k"]
# For sym_int4
@ -214,7 +215,7 @@ def ggml_convert_qtype(tensor: torch.Tensor, qtype: int,
if not convert_shape_only and device != 'meta':
dst = ctypes.c_void_p(dst_tensor.data.data_ptr())
hist = (ctypes.c_int64 * 16)()
if qtype not in [IQ2_XXS, IQ2_XS, Q2_K, IQ1_S]:
if qtype not in [IQ2_XXS, IQ2_XS, Q2_K, IQ1_S, Q6_K]:
ggml.ggml_quantize_tensor(src, dst, qtype, n, k, hist)
else:
if imatrix is not None:

View file

@ -119,7 +119,7 @@ class _BaseAutoModelClass:
``'sym_int5'``, ``'asym_int5'``, ``'sym_int8'``, ``'nf3'``,
``'nf4'``, ``'fp4'``, ``'fp8'``, ``'fp8_e4m3'``, ``'fp8_e5m2'``,
``'gguf_iq2_xxs'``, ``'gguf_iq2_xs'``, gguf_iq1_s'``,
``'fp16'`` or ``'bf16'``,
``'fp16'``, ``'bf16'``, or ``'q6_k'``,
``'sym_int4'`` means symmetric int 4, ``'asym_int4'`` means
asymmetric int 4, ``'nf4'`` means 4-bit NormalFloat, etc.
Relevant low bit optimizations will be applied to the model.
@ -378,7 +378,7 @@ class _BaseAutoModelClass:
f"Unknown load_in_low_bit value: {q_k}, expected:"
f" sym_int4, asym_int4, sym_int5, asym_int5, sym_int8, nf3, nf4, "
f"fp4, fp8, fp8_e4m3, fp8_e5m2, fp16, bf16, gguf_iq2_xxs, "
f"gguf_iq2_xs, gguf_iq1_s, mixed_fp4 or mixed_fp8.")
f"gguf_iq2_xs, gguf_iq1_s, q2_k, q6_k, mixed_fp4 or mixed_fp8.")
qtype = ggml_tensor_qtype[q_k]
# In case it needs a second try,