[NPU] Fix save-load usage of minicpm models (#12628)

This commit is contained in:
binbin Deng 2024-12-27 15:56:46 +08:00 committed by GitHub
parent c72a5db757
commit f17ccfa61a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 7 additions and 5 deletions

View file

@ -445,12 +445,9 @@ class _BaseAutoModelClass:
from .npu_models.npu_llm_cpp import load_model_from_file
from .npu_models.convert import generate, general_convert
from .npu_models.convert import prepare_input_ids, causal_lm_forward
config = AutoConfig.from_pretrained(
os.path.join(pretrained_model_name_or_path, "config.json"),
trust_remote_code=trust_remote_code)
with torch.device('meta'):
model = cls.HF_Model.from_config(
config, trust_remote_code=trust_remote_code)
model = cls.HF_Model.from_config(config,
trust_remote_code=trust_remote_code)
try:
model_ptr = load_model_from_file(pretrained_model_name_or_path)
model.model_ptr = model_ptr

View file

@ -444,6 +444,11 @@ def convert_llm_for_deploy(model: torch.nn.Module,
else:
lm_head_low_bit = model.lm_head.lm_heads[0].qtype
if model._auto_class is not None:
# For a custom model, copy the file defining it in the folder
from transformers.dynamic_module_utils import custom_object_save
custom_object_save(model, save_directory, config=model.config)
if model.config.model_type == "qwen2":
if group_size == 0:
if model.config.hidden_size == 1536: