enable new q4_1 (#9479)
This commit is contained in:
parent
3dac21ac7b
commit
50b01058f1
1 changed files with 3 additions and 2 deletions
|
|
@ -61,6 +61,7 @@ IS_SERVER = is_server()
|
|||
IS_SPR = is_spr()
|
||||
TORCH_LINEAR_THRESHOLD = 96
|
||||
SYM_INT4 = ggml_tensor_qtype["sym_int4"]
|
||||
ASYM_INT4 = ggml_tensor_qtype["asym_int4"]
|
||||
SYM_INT8 = ggml_tensor_qtype["sym_int8"]
|
||||
NF4 = ggml_tensor_qtype["nf4"]
|
||||
NF3 = ggml_tensor_qtype["nf3"]
|
||||
|
|
@ -111,7 +112,7 @@ def ggml_q_format_convet_cpu2xpu(tensor: torch.Tensor, num_elem: int, qtype: int
|
|||
|
||||
src = ctypes.c_void_p(tensor.data.data_ptr())
|
||||
|
||||
if qtype in [SYM_INT4, SYM_INT8, NF4, NF3, FP4, FP8]:
|
||||
if qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, NF4, NF3, FP4, FP8]:
|
||||
dst_tensor = torch.empty_like(tensor)
|
||||
elif qtype == ggml_tensor_qtype["sym_int5"]:
|
||||
QK = ggml.ggml_qk_size(qtype)
|
||||
|
|
@ -136,7 +137,7 @@ def ggml_q_format_convet_xpu2cpu(tensor: torch.Tensor, num_elem: int, qtype: int
|
|||
|
||||
src = ctypes.c_void_p(tensor.data.data_ptr())
|
||||
|
||||
if qtype in [SYM_INT4, SYM_INT8, NF4, NF3, FP4, FP8]:
|
||||
if qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, NF4, NF3, FP4, FP8]:
|
||||
dst_tensor = torch.empty_like(tensor)
|
||||
elif qtype == ggml_tensor_qtype["sym_int5"]:
|
||||
QK = ggml.ggml_qk_size(ggml_tensor_qtype["asym_int5"])
|
||||
|
|
|
|||
Loading…
Reference in a new issue