update code for NPU qwen2 (#12094)

* update code

* fix
This commit is contained in:
Ruonan Wang 2024-09-20 00:58:32 -07:00 committed by GitHub
parent db7500bfd4
commit 09b8c80d9d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 27 additions and 24 deletions

View file

@ -85,7 +85,6 @@ class LMHeadLinear(NNFactory):
Returns:
np.ndarray: result
"""
self.prefetchWeights(1, verify_size=False)
self.set_input_tensor(X, 0)
self.elapsed = backend_lib.run(self._mm)
if len(self.out) == 1:

View file

@ -990,35 +990,39 @@ def gen_qwen2_fused_model_forward(prefill_runner, decode_runner):
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
past_key_values_length = 0
if seq_length > 1:
past_key_values_length = 0
from ipex_llm.transformers.npu_models.kv import DynamicFusedNormalCache
from ipex_llm.transformers.npu_models.kv import DynamicFusedNormalCache
if use_cache and not isinstance(past_key_values, DynamicFusedNormalCache):
past_key_values = DynamicFusedNormalCache.from_legacy_cache(past_key_values)
past_key_values_length = past_key_values.get_seq_length()
if use_cache and not isinstance(past_key_values, DynamicFusedNormalCache):
past_key_values = DynamicFusedNormalCache.from_legacy_cache(past_key_values)
past_key_values_length = past_key_values.get_seq_length()
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(
past_key_values_length,
seq_length + past_key_values_length,
dtype=torch.long,
device=device,
)
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
else:
position_ids = position_ids.view(-1, seq_length).long()
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
attention_mask = _prepare_4d_causal_attention_mask(
attention_mask,
(batch_size, seq_length),
inputs_embeds,
past_key_values_length,
seq_length + past_key_values_length,
dtype=torch.long,
device=device,
sliding_window=self.config.sliding_window,
)
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
else:
position_ids = position_ids.view(-1, seq_length).long()
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
attention_mask = _prepare_4d_causal_attention_mask(
attention_mask,
(batch_size, seq_length),
inputs_embeds,
past_key_values_length,
sliding_window=self.config.sliding_window,
)
attention_mask = None
position_ids = None
# embed positions
hidden_states = inputs_embeds