LLM: add new qtype woq_int4 to support gemm int4 temporary. (#12706)
This PR add temporary qtype woq_int4 to avoid affecting other qtype and models. Co-authored-by: leonardozcm <leonardo1997zcm@gmail.com>
This commit is contained in:
parent
6d03d06ebb
commit
9930351112
3 changed files with 8 additions and 6 deletions
|
|
@ -53,6 +53,7 @@ ggml_tensor_qtype = {"sym_int4": 2, # q4_0 in ggml
|
|||
"sym_int4_rtn": 31,
|
||||
"sym_int8_rtn": 32,
|
||||
"asym_int4_rtn": 33,
|
||||
"woq_int4": 34,
|
||||
}
|
||||
|
||||
# mixed precison from llama.cpp
|
||||
|
|
|
|||
|
|
@ -84,6 +84,7 @@ FP6_K = ggml_tensor_qtype["fp6_k"]
|
|||
SYM_INT4_RTN = ggml_tensor_qtype["sym_int4_rtn"]
|
||||
SYM_INT8_RTN = ggml_tensor_qtype["sym_int8_rtn"]
|
||||
ASYM_INT4_RTN = ggml_tensor_qtype["asym_int4_rtn"]
|
||||
WOQ_INT4 = ggml_tensor_qtype["woq_int4"]
|
||||
RTN_DTYPE = {
|
||||
SYM_INT4_RTN: torch.uint8,
|
||||
ASYM_INT4_RTN: torch.uint8,
|
||||
|
|
@ -187,7 +188,7 @@ def ggml_q_format_convet_cpu2xpu(tensor: torch.Tensor, num_elem: int, qtype: int
|
|||
src = ctypes.c_void_p(tensor.data.data_ptr())
|
||||
|
||||
if qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, NF4, NF3, FP4, FP6, FP8E4, FP8E5,
|
||||
Q4_K, Q6_K, FP6_K]:
|
||||
Q4_K, Q6_K, FP6_K, WOQ_INT4]:
|
||||
dst_tensor = torch.empty_like(tensor)
|
||||
elif qtype == ggml_tensor_qtype["sym_int5"]:
|
||||
QK = ggml.ggml_qk_size(qtype)
|
||||
|
|
@ -213,7 +214,7 @@ def ggml_q_format_convet_xpu2cpu(tensor: torch.Tensor, num_elem: int, qtype: int
|
|||
src = ctypes.c_void_p(tensor.data.data_ptr())
|
||||
|
||||
if qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, NF4, NF3, FP4, FP6, FP8E4, FP8E5,
|
||||
Q4_K, Q6_K, FP6_K]:
|
||||
Q4_K, Q6_K, FP6_K, WOQ_INT4]:
|
||||
dst_tensor = torch.empty_like(tensor)
|
||||
elif qtype == ggml_tensor_qtype["sym_int5"]:
|
||||
QK = ggml.ggml_qk_size(ggml_tensor_qtype["asym_int5"])
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ from ipex_llm.utils.common import invalidInputError
|
|||
from ipex_llm.ggml.quantize import ggml_tensor_qtype
|
||||
from ipex_llm.transformers.utils import get_xpu_device_name
|
||||
from ipex_llm.transformers.low_bit_linear import SYM_INT4, SYM_INT8, FP8E5, IQ2_XXS, FP4, FP8E4,\
|
||||
FP6, ASYM_INT4
|
||||
FP6, ASYM_INT4, WOQ_INT4
|
||||
|
||||
FP8_KV_ALLOC_LENGTH = 512
|
||||
KV_CACHE_ALLOC_BLOCK_LENGTH = int(os.environ.get("KV_CACHE_ALLOC_BLOCK_LENGTH", 256))
|
||||
|
|
@ -33,7 +33,7 @@ GELU = 1
|
|||
|
||||
def decoding_fast_path_qtype_check(proj):
|
||||
qtype = getattr(proj, "qtype", None)
|
||||
return qtype in [SYM_INT4, FP8E5, FP4]
|
||||
return qtype in [SYM_INT4, FP8E5, FP4, WOQ_INT4]
|
||||
|
||||
|
||||
def init_kv_cache(batch_size, num_heads, head_dim, current_length, max_length, dtype, device):
|
||||
|
|
@ -248,7 +248,7 @@ def mlp_fusion_check(x, qtype, training):
|
|||
return False
|
||||
if x.device.type != 'xpu':
|
||||
return False
|
||||
if qtype not in [SYM_INT4, FP8E5, FP4, IQ2_XXS, FP6]:
|
||||
if qtype not in [SYM_INT4, FP8E5, FP4, IQ2_XXS, FP6, WOQ_INT4]:
|
||||
return False
|
||||
if training or x.requires_grad:
|
||||
return False
|
||||
|
|
@ -263,7 +263,7 @@ def use_xmx(x: torch.Tensor, qtype: int):
|
|||
device = get_xpu_device_name(x.device)
|
||||
return (
|
||||
device in ["arc", "pvc"]
|
||||
and qtype in [SYM_INT4, SYM_INT8, FP8E4, FP8E5]
|
||||
and qtype in [SYM_INT4, SYM_INT8, FP8E4, FP8E5, WOQ_INT4]
|
||||
and (
|
||||
(device == "pvc" and 1 < x.size(0) <= 16)
|
||||
or
|
||||
|
|
|
|||
Loading…
Reference in a new issue