change mixed_precision to q6_k (#11706)

This commit is contained in:
Ruonan Wang 2024-08-02 10:55:16 +03:00 committed by GitHub
parent 1baa3efe0e
commit aa98ef96fe
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -394,7 +394,7 @@ def _replace_with_low_bit_linear(model, qtype, modules_to_not_convert=None,
if mixed_precision and is_lm_head(name, model_config, out_features):
if cur_qtype in [ggml_tensor_qtype["sym_int4"],
ggml_tensor_qtype["asym_int4"]]:
cur_qtype = ggml_tensor_qtype["sym_int8"]
cur_qtype = ggml_tensor_qtype["q6_k"]
# check hidden size whether is a multiple of 256
cur_qtype = check_hidden_size(cur_qtype, in_features)