LLM: update q8 convert xpu&cpu (#8930)
This commit is contained in:
		
							parent
							
								
									33d75adadf
								
							
						
					
					
						commit
						b3f5dd5b5d
					
				
					 1 changed files with 3 additions and 2 deletions
				
			
		| 
						 | 
					@ -60,6 +60,7 @@ IS_SERVER = is_server()
 | 
				
			||||||
IS_SPR = is_spr()
 | 
					IS_SPR = is_spr()
 | 
				
			||||||
TORCH_LINEAR_THRESHOLD = 96
 | 
					TORCH_LINEAR_THRESHOLD = 96
 | 
				
			||||||
SYM_INT4 = ggml_tensor_qtype["sym_int4"]
 | 
					SYM_INT4 = ggml_tensor_qtype["sym_int4"]
 | 
				
			||||||
 | 
					SYM_INT8 = ggml_tensor_qtype["sym_int8"]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def ggml_convert_qtype(tensor: torch.Tensor, qtype: int, device=None):
 | 
					def ggml_convert_qtype(tensor: torch.Tensor, qtype: int, device=None):
 | 
				
			||||||
| 
						 | 
					@ -98,7 +99,7 @@ def ggml_q_format_convet_cpu2xpu(tensor: torch.Tensor, num_elem: int, qtype: int
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    src = ctypes.c_void_p(tensor.data.data_ptr())
 | 
					    src = ctypes.c_void_p(tensor.data.data_ptr())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if qtype == ggml_tensor_qtype["sym_int4"]:
 | 
					    if qtype in [SYM_INT4, SYM_INT8]:
 | 
				
			||||||
        dst_tensor = torch.empty_like(tensor)
 | 
					        dst_tensor = torch.empty_like(tensor)
 | 
				
			||||||
    elif qtype == ggml_tensor_qtype["sym_int5"]:
 | 
					    elif qtype == ggml_tensor_qtype["sym_int5"]:
 | 
				
			||||||
        QK = ggml.ggml_qk_size(qtype)
 | 
					        QK = ggml.ggml_qk_size(qtype)
 | 
				
			||||||
| 
						 | 
					@ -123,7 +124,7 @@ def ggml_q_format_convet_xpu2cpu(tensor: torch.Tensor, num_elem: int, qtype: int
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    src = ctypes.c_void_p(tensor.data.data_ptr())
 | 
					    src = ctypes.c_void_p(tensor.data.data_ptr())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if qtype == ggml_tensor_qtype["sym_int4"]:
 | 
					    if qtype in [SYM_INT4, SYM_INT8]:
 | 
				
			||||||
        dst_tensor = torch.empty_like(tensor)
 | 
					        dst_tensor = torch.empty_like(tensor)
 | 
				
			||||||
    elif qtype == ggml_tensor_qtype["sym_int5"]:
 | 
					    elif qtype == ggml_tensor_qtype["sym_int5"]:
 | 
				
			||||||
        QK = ggml.ggml_qk_size(ggml_tensor_qtype["asym_int5"])
 | 
					        QK = ggml.ggml_qk_size(ggml_tensor_qtype["asym_int5"])
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue