parent
ab40607b87
commit
e2264e8845
3 changed files with 15 additions and 10 deletions
|
|
@ -33,7 +33,8 @@ ggml_tensor_qtype = {"sym_int4": 2, # q4_0 in ggml
|
||||||
"nf4": 10,
|
"nf4": 10,
|
||||||
"nf3": 11,
|
"nf3": 11,
|
||||||
"fp16": 12,
|
"fp16": 12,
|
||||||
"fp8": 15}
|
"fp8": 15,
|
||||||
|
"fp4": 16}
|
||||||
|
|
||||||
_llama_quantize_type = {"q4_0": 2,
|
_llama_quantize_type = {"q4_0": 2,
|
||||||
"q4_1": 3,
|
"q4_1": 3,
|
||||||
|
|
|
||||||
|
|
@ -65,6 +65,7 @@ SYM_INT8 = ggml_tensor_qtype["sym_int8"]
|
||||||
NF4 = ggml_tensor_qtype["nf4"]
|
NF4 = ggml_tensor_qtype["nf4"]
|
||||||
NF3 = ggml_tensor_qtype["nf3"]
|
NF3 = ggml_tensor_qtype["nf3"]
|
||||||
FP8 = ggml_tensor_qtype["fp8"]
|
FP8 = ggml_tensor_qtype["fp8"]
|
||||||
|
FP4 = ggml_tensor_qtype["fp4"]
|
||||||
|
|
||||||
|
|
||||||
def ggml_convert_qtype(tensor: torch.Tensor, qtype: int,
|
def ggml_convert_qtype(tensor: torch.Tensor, qtype: int,
|
||||||
|
|
@ -108,7 +109,7 @@ def ggml_q_format_convet_cpu2xpu(tensor: torch.Tensor, num_elem: int, qtype: int
|
||||||
|
|
||||||
src = ctypes.c_void_p(tensor.data.data_ptr())
|
src = ctypes.c_void_p(tensor.data.data_ptr())
|
||||||
|
|
||||||
if qtype in [SYM_INT4, SYM_INT8, NF4, NF3]:
|
if qtype in [SYM_INT4, SYM_INT8, NF4, NF3, FP4]:
|
||||||
dst_tensor = torch.empty_like(tensor)
|
dst_tensor = torch.empty_like(tensor)
|
||||||
elif qtype == ggml_tensor_qtype["sym_int5"]:
|
elif qtype == ggml_tensor_qtype["sym_int5"]:
|
||||||
QK = ggml.ggml_qk_size(qtype)
|
QK = ggml.ggml_qk_size(qtype)
|
||||||
|
|
@ -133,7 +134,7 @@ def ggml_q_format_convet_xpu2cpu(tensor: torch.Tensor, num_elem: int, qtype: int
|
||||||
|
|
||||||
src = ctypes.c_void_p(tensor.data.data_ptr())
|
src = ctypes.c_void_p(tensor.data.data_ptr())
|
||||||
|
|
||||||
if qtype in [SYM_INT4, SYM_INT8, NF4, NF3]:
|
if qtype in [SYM_INT4, SYM_INT8, NF4, NF3, FP4]:
|
||||||
dst_tensor = torch.empty_like(tensor)
|
dst_tensor = torch.empty_like(tensor)
|
||||||
elif qtype == ggml_tensor_qtype["sym_int5"]:
|
elif qtype == ggml_tensor_qtype["sym_int5"]:
|
||||||
QK = ggml.ggml_qk_size(ggml_tensor_qtype["asym_int5"])
|
QK = ggml.ggml_qk_size(ggml_tensor_qtype["asym_int5"])
|
||||||
|
|
@ -387,8 +388,10 @@ class LowBitLinear(nn.Linear):
|
||||||
else:
|
else:
|
||||||
# CPU logic
|
# CPU logic
|
||||||
# todo may need to set a different number on different platforms
|
# todo may need to set a different number on different platforms
|
||||||
invalidInputError(self.qtype != NF3 and self.qtype != NF4 and self.qtype != FP8,
|
invalidInputError(self.qtype != NF3 and self.qtype != NF4 and self.qtype != FP8
|
||||||
"NF3, NF4 and FP8 quantization are currently not supported on CPU")
|
and self.qtype != FP4,
|
||||||
|
"NF3, NF4, FP4 and FP8 quantization are currently not"
|
||||||
|
" supported on CPU")
|
||||||
if IS_SERVER and (not IS_SPR) and \
|
if IS_SERVER and (not IS_SPR) and \
|
||||||
self.qtype == SYM_INT4 and x_2d.shape[0] >= TORCH_LINEAR_THRESHOLD:
|
self.qtype == SYM_INT4 and x_2d.shape[0] >= TORCH_LINEAR_THRESHOLD:
|
||||||
x0_fp32 = ggml_int4_convert_fp32(x0, self.weight_shape, self.weight_length)
|
x0_fp32 = ggml_int4_convert_fp32(x0, self.weight_shape, self.weight_length)
|
||||||
|
|
|
||||||
|
|
@ -60,9 +60,10 @@ class _BaseAutoModelClass:
|
||||||
:param load_in_4bit: boolean value, True means load linear's weight to symmetric int 4.
|
:param load_in_4bit: boolean value, True means load linear's weight to symmetric int 4.
|
||||||
Default to be False.
|
Default to be False.
|
||||||
:param load_in_low_bit: str value, options are sym_int4, asym_int4, sym_int5, asym_int5
|
:param load_in_low_bit: str value, options are sym_int4, asym_int4, sym_int5, asym_int5
|
||||||
, sym_int8, nf3, nf4 or fp16. sym_int4 means symmetric int 4,
|
, sym_int8, nf3, nf4, fp4, fp8 or fp16. sym_int4 means symmetric
|
||||||
asym_int4 means asymmetric int 4, nf4 means 4-bit NormalFloat, etc.
|
int 4, asym_int4 means asymmetric int 4, nf4 means 4-bit
|
||||||
Relevant low bit optimizations will be applied to the model.
|
NormalFloat, etc. Relevant low bit optimizations will be applied
|
||||||
|
to the model.
|
||||||
:param optimize_model: boolean value, Whether to further optimize the low_bit llm model.
|
:param optimize_model: boolean value, Whether to further optimize the low_bit llm model.
|
||||||
Default to be True.
|
Default to be True.
|
||||||
:param modules_to_not_convert: list of str value, modules (nn.Module) that are skipped when
|
:param modules_to_not_convert: list of str value, modules (nn.Module) that are skipped when
|
||||||
|
|
@ -106,8 +107,8 @@ class _BaseAutoModelClass:
|
||||||
from .convert import ggml_convert_low_bit
|
from .convert import ggml_convert_low_bit
|
||||||
invalidInputError(q_k in ggml_tensor_qtype,
|
invalidInputError(q_k in ggml_tensor_qtype,
|
||||||
f"Unknown load_in_low_bit value: {q_k}, expected:"
|
f"Unknown load_in_low_bit value: {q_k}, expected:"
|
||||||
f" sym_int4, asym_int4, sym_int5, asym_int5, sym_int8, nf3, nf4 "
|
f" sym_int4, asym_int4, sym_int5, asym_int5, sym_int8, nf3, nf4, "
|
||||||
"or fp16.")
|
"fp4, fp8 or fp16.")
|
||||||
qtype = ggml_tensor_qtype[q_k]
|
qtype = ggml_tensor_qtype[q_k]
|
||||||
|
|
||||||
# In case it needs a second try,
|
# In case it needs a second try,
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue