disable quantize kv in specific qwen model (#11238)

This commit is contained in:
Yishuo Wang 2024-06-06 14:08:39 +08:00 committed by GitHub
parent c4e5806e01
commit e738ec38f4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -38,8 +38,7 @@
# #
import math import math
import warnings from typing import Optional, Tuple, Union, List
from typing import TYPE_CHECKING, Optional, Tuple, Union, Callable, List
import torch import torch
from torch.nn.functional import scaled_dot_product_attention as sdpa from torch.nn.functional import scaled_dot_product_attention as sdpa
@ -74,7 +73,10 @@ def qwen2_model_forward(
return_dict: Optional[bool] = None, return_dict: Optional[bool] = None,
): ):
use_cache = use_cache if use_cache is not None else self.config.use_cache use_cache = use_cache if use_cache is not None else self.config.use_cache
use_quantize_kv = use_quantize_kv_cache(self.layers[0].mlp.up_proj, input_ids) use_quantize_kv = (
self.config.hidden_size != 3584 # disable quantize kv in specific model
and use_quantize_kv_cache(self.layers[0].mlp.up_proj, input_ids)
)
if use_cache: if use_cache:
if use_quantize_kv and not isinstance(past_key_values, DynamicFp8Cache): if use_quantize_kv and not isinstance(past_key_values, DynamicFp8Cache):
past_key_values = DynamicFp8Cache.from_legacy_cache(past_key_values) past_key_values = DynamicFp8Cache.from_legacy_cache(past_key_values)