Support Fp6 k in ipex-llm (#11222)

* support fp6_k

* support fp6_k

* remove

* fix style
This commit is contained in:
Yina Chen 2024-06-05 17:34:36 +08:00 committed by GitHub
parent a6674f5bce
commit ed67435491
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 11 additions and 6 deletions

View file

@ -48,7 +48,9 @@ ggml_tensor_qtype = {"sym_int4": 2, # q4_0 in ggml
"q6_k": 26,
"q4_k": 27,
"q5_k": 28,
"fp6": 29}
"fp6": 29,
"fp6_k": 30,
}
# mixed precison from llama.cpp
gguf_mixed_qtype = {"gguf_q4k_s": 101,

View file

@ -80,6 +80,7 @@ IQ1_S = ggml_tensor_qtype["gguf_iq1_s"]
Q4_K = ggml_tensor_qtype["q4_k"]
Q6_K = ggml_tensor_qtype["q6_k"]
Q5_K = ggml_tensor_qtype["q5_k"]
FP6_K = ggml_tensor_qtype["fp6_k"]
# For sym_int4
@ -220,7 +221,7 @@ def ggml_convert_qtype(tensor: torch.Tensor, qtype: int,
if not convert_shape_only and device != 'meta':
dst = ctypes.c_void_p(dst_tensor.data.data_ptr())
hist = (ctypes.c_int64 * 16)()
if qtype not in [IQ2_XXS, IQ2_XS, Q2_K, IQ1_S, Q4_K, Q6_K, Q5_K]:
if qtype not in [IQ2_XXS, IQ2_XS, Q2_K, IQ1_S, Q4_K, Q6_K, Q5_K, FP6_K]:
ggml.ggml_quantize_tensor(src, dst, qtype, n, k, hist)
else:
if imatrix is not None:
@ -244,7 +245,8 @@ def ggml_q_format_convet_cpu2xpu(tensor: torch.Tensor, num_elem: int, qtype: int
src = ctypes.c_void_p(tensor.data.data_ptr())
if qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, NF4, NF3, FP4, FP6, FP8E4, FP8E5, Q4_K, Q6_K]:
if qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, NF4, NF3, FP4, FP6, FP8E4, FP8E5,
Q4_K, Q6_K, FP6_K]:
dst_tensor = torch.empty_like(tensor)
elif qtype == ggml_tensor_qtype["sym_int5"]:
QK = ggml.ggml_qk_size(qtype)
@ -269,7 +271,8 @@ def ggml_q_format_convet_xpu2cpu(tensor: torch.Tensor, num_elem: int, qtype: int
src = ctypes.c_void_p(tensor.data.data_ptr())
if qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, NF4, NF3, FP4, FP6, FP8E4, FP8E5, Q4_K, Q6_K]:
if qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, NF4, NF3, FP4, FP6, FP8E4, FP8E5,
Q4_K, Q6_K, FP6_K]:
dst_tensor = torch.empty_like(tensor)
elif qtype == ggml_tensor_qtype["sym_int5"]:
QK = ggml.ggml_qk_size(ggml_tensor_qtype["asym_int5"])

View file

@ -154,7 +154,7 @@ class _BaseAutoModelClass:
``'nf4'``, ``'fp4'``, ``'fp8'``, ``'fp8_e4m3'``, ``'fp8_e5m2'``,
``'fp6'``, ``'gguf_iq2_xxs'``, ``'gguf_iq2_xs'``,
``'gguf_iq1_s'``, ``'gguf_q4k_m'``, ``'gguf_q4k_s'``,
``'fp16'``, ``'bf16'``,
``'fp16'``, ``'bf16'``, ``'fp6_k'``,
``'sym_int4'`` means symmetric int 4, ``'asym_int4'`` means
asymmetric int 4, ``'nf4'`` means 4-bit NormalFloat, etc.
Relevant low bit optimizations will be applied to the model.
@ -428,7 +428,7 @@ class _BaseAutoModelClass:
f"Unknown load_in_low_bit value: {q_k}, expected:"
f" sym_int4, asym_int4, sym_int5, asym_int5, sym_int8, nf3, nf4, "
f"fp4, fp6, fp8, fp8_e4m3, fp8_e5m2, fp16, bf16, gguf_iq2_xxs, "
f"gguf_iq2_xs, gguf_iq1_s, q2_k, q4_k, q5_k, q6_k, "
f"gguf_iq2_xs, gguf_iq1_s, q2_k, q4_k, q5_k, q6_k, fp6_k"
f"gguf_q4k_s, gguf_q4k_m, mixed_fp4 or mixed_fp8.")
if q_k in ggml_tensor_qtype:
qtype = ggml_tensor_qtype[q_k]