LLM: support iq1_s (#10564)
* init version * update utils * remove unsed code
This commit is contained in:
		
							parent
							
								
									f4537798c1
								
							
						
					
					
						commit
						0136fad1d4
					
				
					 4 changed files with 17 additions and 11 deletions
				
			
		| 
						 | 
					@ -42,7 +42,9 @@ ggml_tensor_qtype = {"sym_int4": 2,   # q4_0 in ggml
 | 
				
			||||||
                     "bf16": 20,
 | 
					                     "bf16": 20,
 | 
				
			||||||
                     "gguf_iq2_xxs": 21,
 | 
					                     "gguf_iq2_xxs": 21,
 | 
				
			||||||
                     "gguf_iq2_xs": 22,
 | 
					                     "gguf_iq2_xs": 22,
 | 
				
			||||||
                     "q2_k": 23}
 | 
					                     "q2_k": 23,
 | 
				
			||||||
 | 
					                     "gguf_iq1_s": 24,
 | 
				
			||||||
 | 
					                     "gguf_iq1_m": 25}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
_llama_quantize_type = {"q4_0": 2,
 | 
					_llama_quantize_type = {"q4_0": 2,
 | 
				
			||||||
                        "q4_1": 3,
 | 
					                        "q4_1": 3,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -73,7 +73,7 @@ FP8E5 = ggml_tensor_qtype["fp8_e5m2"]
 | 
				
			||||||
IQ2_XXS = ggml_tensor_qtype["gguf_iq2_xxs"]
 | 
					IQ2_XXS = ggml_tensor_qtype["gguf_iq2_xxs"]
 | 
				
			||||||
IQ2_XS = ggml_tensor_qtype["gguf_iq2_xs"]
 | 
					IQ2_XS = ggml_tensor_qtype["gguf_iq2_xs"]
 | 
				
			||||||
Q2_K = ggml_tensor_qtype["q2_k"]
 | 
					Q2_K = ggml_tensor_qtype["q2_k"]
 | 
				
			||||||
 | 
					IQ1_S = ggml_tensor_qtype["gguf_iq1_s"]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# The ggml_weight is col major and packs two rows at a stride of Q4_0//2.
 | 
					# The ggml_weight is col major and packs two rows at a stride of Q4_0//2.
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
| 
						 | 
					@ -156,7 +156,7 @@ def ggml_convert_qtype(tensor: torch.Tensor, qtype: int,
 | 
				
			||||||
    if not convert_shape_only and device != 'meta':
 | 
					    if not convert_shape_only and device != 'meta':
 | 
				
			||||||
        dst = ctypes.c_void_p(dst_tensor.data.data_ptr())
 | 
					        dst = ctypes.c_void_p(dst_tensor.data.data_ptr())
 | 
				
			||||||
        hist = (ctypes.c_int64 * 16)()
 | 
					        hist = (ctypes.c_int64 * 16)()
 | 
				
			||||||
        if qtype not in [IQ2_XXS, IQ2_XS, Q2_K]:
 | 
					        if qtype not in [IQ2_XXS, IQ2_XS, Q2_K, IQ1_S]:
 | 
				
			||||||
            ggml.ggml_quantize_tensor(src, dst, qtype, n, k, hist)
 | 
					            ggml.ggml_quantize_tensor(src, dst, qtype, n, k, hist)
 | 
				
			||||||
        else:
 | 
					        else:
 | 
				
			||||||
            if imatrix is not None:
 | 
					            if imatrix is not None:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -118,7 +118,8 @@ class _BaseAutoModelClass:
 | 
				
			||||||
        :param load_in_low_bit: str value, options are ``'sym_int4'``, ``'asym_int4'``,
 | 
					        :param load_in_low_bit: str value, options are ``'sym_int4'``, ``'asym_int4'``,
 | 
				
			||||||
                                ``'sym_int5'``, ``'asym_int5'``, ``'sym_int8'``, ``'nf3'``,
 | 
					                                ``'sym_int5'``, ``'asym_int5'``, ``'sym_int8'``, ``'nf3'``,
 | 
				
			||||||
                                ``'nf4'``, ``'fp4'``, ``'fp8'``, ``'fp8_e4m3'``, ``'fp8_e5m2'``,
 | 
					                                ``'nf4'``, ``'fp4'``, ``'fp8'``, ``'fp8_e4m3'``, ``'fp8_e5m2'``,
 | 
				
			||||||
                                ``'gguf_iq2_xxs'``, ``'gguf_iq2_xs'``, ``'fp16'`` or ``'bf16'``,
 | 
					                                ``'gguf_iq2_xxs'``, ``'gguf_iq2_xs'``, gguf_iq1_s'``,
 | 
				
			||||||
 | 
					                                ``'fp16'`` or ``'bf16'``,
 | 
				
			||||||
                                ``'sym_int4'`` means symmetric int 4, ``'asym_int4'`` means
 | 
					                                ``'sym_int4'`` means symmetric int 4, ``'asym_int4'`` means
 | 
				
			||||||
                                asymmetric int 4, ``'nf4'`` means 4-bit NormalFloat, etc.
 | 
					                                asymmetric int 4, ``'nf4'`` means 4-bit NormalFloat, etc.
 | 
				
			||||||
                                Relevant low bit optimizations will be applied to the model.
 | 
					                                Relevant low bit optimizations will be applied to the model.
 | 
				
			||||||
| 
						 | 
					@ -304,14 +305,14 @@ class _BaseAutoModelClass:
 | 
				
			||||||
                    kwargs["pretraining_tp"] = 1
 | 
					                    kwargs["pretraining_tp"] = 1
 | 
				
			||||||
            q_k = load_in_low_bit if load_in_low_bit else "sym_int4"
 | 
					            q_k = load_in_low_bit if load_in_low_bit else "sym_int4"
 | 
				
			||||||
            imatrix_file = kwargs.pop("imatrix", None)
 | 
					            imatrix_file = kwargs.pop("imatrix", None)
 | 
				
			||||||
            if q_k in ["gguf_iq2_xxs", "gguf_iq2_xs"]:
 | 
					            if q_k in ["gguf_iq2_xxs", "gguf_iq2_xs", "gguf_iq1_s"]:
 | 
				
			||||||
                invalidInputError(imatrix_file is not None,
 | 
					                invalidInputError(imatrix_file is not None,
 | 
				
			||||||
                                  "For gguf_iq2_xxs and gguf_iq2_xs quantization,"
 | 
					                                  "For gguf_iq2 and gguf_iq1 quantization,"
 | 
				
			||||||
                                  "imatrix is needed.")
 | 
					                                  "imatrix is needed.")
 | 
				
			||||||
            cpu_embedding = kwargs.get("cpu_embedding", False)
 | 
					            cpu_embedding = kwargs.get("cpu_embedding", False)
 | 
				
			||||||
            # for 2bit, default use embedding_quantization
 | 
					            # for 2bit, default use embedding_quantization
 | 
				
			||||||
            if q_k in ["gguf_iq2_xxs", "gguf_iq2_xs", "q2_k"] and not cpu_embedding and \
 | 
					            if q_k in ["gguf_iq2_xxs", "gguf_iq2_xs", "gguf_iq1_s", "q2_k"] and \
 | 
				
			||||||
                    embedding_qtype is None:
 | 
					                not cpu_embedding and embedding_qtype is None:
 | 
				
			||||||
                embedding_qtype = "q2_k"
 | 
					                embedding_qtype = "q2_k"
 | 
				
			||||||
            if imatrix_file is not None:
 | 
					            if imatrix_file is not None:
 | 
				
			||||||
                imatrix_data = load_imatrix_data(imatrix_file)
 | 
					                imatrix_data = load_imatrix_data(imatrix_file)
 | 
				
			||||||
| 
						 | 
					@ -361,7 +362,7 @@ class _BaseAutoModelClass:
 | 
				
			||||||
                          f"Unknown load_in_low_bit value: {q_k}, expected:"
 | 
					                          f"Unknown load_in_low_bit value: {q_k}, expected:"
 | 
				
			||||||
                          f" sym_int4, asym_int4, sym_int5, asym_int5, sym_int8, nf3, nf4, "
 | 
					                          f" sym_int4, asym_int4, sym_int5, asym_int5, sym_int8, nf3, nf4, "
 | 
				
			||||||
                          f"fp4, fp8, fp8_e4m3, fp8_e5m2, fp16,  bf16, gguf_iq2_xxs, "
 | 
					                          f"fp4, fp8, fp8_e4m3, fp8_e5m2, fp16,  bf16, gguf_iq2_xxs, "
 | 
				
			||||||
                          f"gguf_iq2_xs, mixed_fp4 or mixed_fp8.")
 | 
					                          f"gguf_iq2_xs, gguf_iq1_s, mixed_fp4 or mixed_fp8.")
 | 
				
			||||||
        qtype = ggml_tensor_qtype[q_k]
 | 
					        qtype = ggml_tensor_qtype[q_k]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        # In case it needs a second try,
 | 
					        # In case it needs a second try,
 | 
				
			||||||
| 
						 | 
					@ -535,7 +536,7 @@ class _BaseAutoModelClass:
 | 
				
			||||||
        optimize_model = kwargs.pop("optimize_model", True)
 | 
					        optimize_model = kwargs.pop("optimize_model", True)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        qtype = ggml_tensor_qtype[bigdl_transformers_low_bit]
 | 
					        qtype = ggml_tensor_qtype[bigdl_transformers_low_bit]
 | 
				
			||||||
        if bigdl_transformers_low_bit in ["gguf_iq2_xxs", "gguf_iq2_xs", "q2_k"] and \
 | 
					        if bigdl_transformers_low_bit in ["gguf_iq2_xxs", "gguf_iq2_xs", "gguf_iq1_s", "q2_k"] and \
 | 
				
			||||||
                not cpu_embedding:
 | 
					                not cpu_embedding:
 | 
				
			||||||
            embedding_qtype = "q2_k"
 | 
					            embedding_qtype = "q2_k"
 | 
				
			||||||
        if embedding_qtype is not None:
 | 
					        if embedding_qtype is not None:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -269,7 +269,8 @@ def module_name_process(full_module_name):
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def get_cur_qtype_and_imatrix(qtype, full_module_name, imatrix_data, model_type=None):
 | 
					def get_cur_qtype_and_imatrix(qtype, full_module_name, imatrix_data, model_type=None):
 | 
				
			||||||
    cur_qtype = qtype
 | 
					    cur_qtype = qtype
 | 
				
			||||||
    if qtype in [ggml_tensor_qtype["gguf_iq2_xxs"], ggml_tensor_qtype["gguf_iq2_xs"]]:
 | 
					    if qtype in [ggml_tensor_qtype["gguf_iq2_xxs"], ggml_tensor_qtype["gguf_iq2_xs"],
 | 
				
			||||||
 | 
					                 ggml_tensor_qtype["gguf_iq1_s"]]:
 | 
				
			||||||
        # For quantization which needs importance matrix
 | 
					        # For quantization which needs importance matrix
 | 
				
			||||||
        new_module_name, layer, cur_module = module_name_process(full_module_name)
 | 
					        new_module_name, layer, cur_module = module_name_process(full_module_name)
 | 
				
			||||||
        # custom mixed quantization strategy
 | 
					        # custom mixed quantization strategy
 | 
				
			||||||
| 
						 | 
					@ -282,6 +283,8 @@ def get_cur_qtype_and_imatrix(qtype, full_module_name, imatrix_data, model_type=
 | 
				
			||||||
        else:
 | 
					        else:
 | 
				
			||||||
            if cur_module == 'v' or (cur_module == 'down' and int(layer) in [0, 1, 10, 11]):
 | 
					            if cur_module == 'v' or (cur_module == 'down' and int(layer) in [0, 1, 10, 11]):
 | 
				
			||||||
                cur_qtype = ggml_tensor_qtype['q2_k']
 | 
					                cur_qtype = ggml_tensor_qtype['q2_k']
 | 
				
			||||||
 | 
					            if qtype == ggml_tensor_qtype["gguf_iq1_s"] and cur_module == 'o':
 | 
				
			||||||
 | 
					                cur_qtype = ggml_tensor_qtype['gguf_iq2_xxs']
 | 
				
			||||||
        if imatrix_data is not None and new_module_name in imatrix_data:
 | 
					        if imatrix_data is not None and new_module_name in imatrix_data:
 | 
				
			||||||
            cur_imatrix = imatrix_data[new_module_name]
 | 
					            cur_imatrix = imatrix_data[new_module_name]
 | 
				
			||||||
        else:
 | 
					        else:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue