parent
							
								
									88463cbf47
								
							
						
					
					
						commit
						8796401b08
					
				
					 3 changed files with 6 additions and 4 deletions
				
			
		| 
						 | 
				
			
			@ -45,7 +45,8 @@ ggml_tensor_qtype = {"sym_int4": 2,   # q4_0 in ggml
 | 
			
		|||
                     "q2_k": 23,
 | 
			
		||||
                     "gguf_iq1_s": 24,
 | 
			
		||||
                     "gguf_iq1_m": 25,
 | 
			
		||||
                     "q6_k": 26}
 | 
			
		||||
                     "q6_k": 26,
 | 
			
		||||
                     "q4_k": 27}
 | 
			
		||||
 | 
			
		||||
_llama_quantize_type = {"q4_0": 2,
 | 
			
		||||
                        "q4_1": 3,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -74,6 +74,7 @@ IQ2_XXS = ggml_tensor_qtype["gguf_iq2_xxs"]
 | 
			
		|||
IQ2_XS = ggml_tensor_qtype["gguf_iq2_xs"]
 | 
			
		||||
Q2_K = ggml_tensor_qtype["q2_k"]
 | 
			
		||||
IQ1_S = ggml_tensor_qtype["gguf_iq1_s"]
 | 
			
		||||
Q4_K = ggml_tensor_qtype["q4_k"]
 | 
			
		||||
Q6_K = ggml_tensor_qtype["q6_k"]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -215,7 +216,7 @@ def ggml_convert_qtype(tensor: torch.Tensor, qtype: int,
 | 
			
		|||
    if not convert_shape_only and device != 'meta':
 | 
			
		||||
        dst = ctypes.c_void_p(dst_tensor.data.data_ptr())
 | 
			
		||||
        hist = (ctypes.c_int64 * 16)()
 | 
			
		||||
        if qtype not in [IQ2_XXS, IQ2_XS, Q2_K, IQ1_S, Q6_K]:
 | 
			
		||||
        if qtype not in [IQ2_XXS, IQ2_XS, Q2_K, IQ1_S, Q4_K, Q6_K]:
 | 
			
		||||
            ggml.ggml_quantize_tensor(src, dst, qtype, n, k, hist)
 | 
			
		||||
        else:
 | 
			
		||||
            if imatrix is not None:
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -119,7 +119,7 @@ class _BaseAutoModelClass:
 | 
			
		|||
                                ``'sym_int5'``, ``'asym_int5'``, ``'sym_int8'``, ``'nf3'``,
 | 
			
		||||
                                ``'nf4'``, ``'fp4'``, ``'fp8'``, ``'fp8_e4m3'``, ``'fp8_e5m2'``,
 | 
			
		||||
                                ``'gguf_iq2_xxs'``, ``'gguf_iq2_xs'``, gguf_iq1_s'``,
 | 
			
		||||
                                ``'fp16'``, ``'bf16'``, or ``'q6_k'``,
 | 
			
		||||
                                ``'fp16'``, ``'bf16'``, ``'q4_k'`` or ``'q6_k'``,
 | 
			
		||||
                                ``'sym_int4'`` means symmetric int 4, ``'asym_int4'`` means
 | 
			
		||||
                                asymmetric int 4, ``'nf4'`` means 4-bit NormalFloat, etc.
 | 
			
		||||
                                Relevant low bit optimizations will be applied to the model.
 | 
			
		||||
| 
						 | 
				
			
			@ -378,7 +378,7 @@ class _BaseAutoModelClass:
 | 
			
		|||
                          f"Unknown load_in_low_bit value: {q_k}, expected:"
 | 
			
		||||
                          f" sym_int4, asym_int4, sym_int5, asym_int5, sym_int8, nf3, nf4, "
 | 
			
		||||
                          f"fp4, fp8, fp8_e4m3, fp8_e5m2, fp16,  bf16, gguf_iq2_xxs, "
 | 
			
		||||
                          f"gguf_iq2_xs, gguf_iq1_s, q2_k, q6_k, mixed_fp4 or mixed_fp8.")
 | 
			
		||||
                          f"gguf_iq2_xs, gguf_iq1_s, q2_k, q4_k, q6_k, mixed_fp4 or mixed_fp8.")
 | 
			
		||||
        qtype = ggml_tensor_qtype[q_k]
 | 
			
		||||
 | 
			
		||||
        # In case it needs a second try,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue