Support fp6 save & load (#11034)
This commit is contained in:
		
							parent
							
								
									ac384e0f45
								
							
						
					
					
						commit
						686f6038a8
					
				
					 1 changed files with 1 additions and 1 deletions
				
			
		| 
						 | 
				
			
			@ -268,7 +268,7 @@ def ggml_q_format_convet_xpu2cpu(tensor: torch.Tensor, num_elem: int, qtype: int
 | 
			
		|||
 | 
			
		||||
    src = ctypes.c_void_p(tensor.data.data_ptr())
 | 
			
		||||
 | 
			
		||||
    if qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, NF4, NF3, FP4, FP8E4, FP8E5]:
 | 
			
		||||
    if qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, NF4, NF3, FP4, FP6, FP8E4, FP8E5]:
 | 
			
		||||
        dst_tensor = torch.empty_like(tensor)
 | 
			
		||||
    elif qtype == ggml_tensor_qtype["sym_int5"]:
 | 
			
		||||
        QK = ggml.ggml_qk_size(ggml_tensor_qtype["asym_int5"])
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue