Skip rescale rwkv linear when load_low_bit (#10164)

* rwkv_ld
This commit is contained in:
Zhao Changmin 2024-02-19 15:56:42 +08:00 committed by GitHub
parent 3e2af5ec0a
commit f8730e8dc1

View file

@ -561,6 +561,10 @@ class _BaseAutoModelClass:
else:
model = model_class(config, *model_args, **kwargs)
# rwkv model linear layers has been rescaled
if model.config.model_type == "rwkv":
model.layers_are_rescaled = True
# Loading args may differ based on their usage
quant_device = "meta" if bigdl_lcmu_enabled else "cpu"
model = ggml_convert_low_bit(model, qtype, optimize_model, device=quant_device,