refactor chatglm2/3 (#11290)
This commit is contained in:
		
							parent
							
								
									ea372cc472
								
							
						
					
					
						commit
						01fe0fc1a2
					
				
					 3 changed files with 152 additions and 509 deletions
				
			
		| 
						 | 
				
			
			@ -19,136 +19,26 @@
 | 
			
		|||
 | 
			
		||||
import math
 | 
			
		||||
import torch
 | 
			
		||||
from typing import Optional, Tuple, List
 | 
			
		||||
import torch.nn.functional as F
 | 
			
		||||
from typing import Optional, Tuple
 | 
			
		||||
from transformers.modeling_outputs import BaseModelOutputWithPast
 | 
			
		||||
from ipex_llm.transformers.models.utils import init_kv_cache, extend_kv_cache, append_kv_cache
 | 
			
		||||
from ipex_llm.transformers.models.utils import init_fp8_kv_cache, append_fp8_kv_cache, \
 | 
			
		||||
    restore_fp8_kv_cache, use_quantize_kv_cache, use_flash_attention
 | 
			
		||||
from ipex_llm.transformers.models.utils import use_sdp
 | 
			
		||||
from ipex_llm.utils.common.log4Error import invalidInputError
 | 
			
		||||
from ipex_llm.transformers.models.utils import restore_fp8_kv_cache, update_past_key_value
 | 
			
		||||
from ipex_llm.transformers.models.utils import use_quantize_kv_cache, use_sdp, use_sdp_causal
 | 
			
		||||
from ipex_llm.transformers.models.utils import should_use_fuse_rope, apply_rotary_pos_emb
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
 | 
			
		||||
KV_CACHE_ALLOC_BLOCK_LENGTH = int(os.environ.get("KV_CACHE_ALLOC_BLOCK_LENGTH", 256))
 | 
			
		||||
KV_CACHE_ALLOC_MIN_LENGTH = 512
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def split_tensor_along_last_dim(
 | 
			
		||||
        tensor: torch.Tensor,
 | 
			
		||||
        num_partitions: int,
 | 
			
		||||
        contiguous_split_chunks: bool = False,
 | 
			
		||||
) -> List[torch.Tensor]:
 | 
			
		||||
    """Split a tensor along its last dimension.
 | 
			
		||||
    Arguments:
 | 
			
		||||
        tensor: input tensor.
 | 
			
		||||
        num_partitions: number of partitions to split the tensor
 | 
			
		||||
        contiguous_split_chunks: If True, make each chunk contiguous
 | 
			
		||||
                                 in memory.
 | 
			
		||||
    Returns:
 | 
			
		||||
        A list of Tensors
 | 
			
		||||
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
 | 
			
		||||
    """
 | 
			
		||||
    # Get the size and dimension.
 | 
			
		||||
    last_dim = tensor.dim() - 1
 | 
			
		||||
    last_dim_size = tensor.size()[last_dim] // num_partitions
 | 
			
		||||
    # Split.
 | 
			
		||||
    tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
 | 
			
		||||
    # Note: torch.split does not create contiguous tensors by default.
 | 
			
		||||
    if contiguous_split_chunks:
 | 
			
		||||
        return tuple(chunk.contiguous() for chunk in tensor_list)
 | 
			
		||||
 | 
			
		||||
    return tensor_list
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def glm_sdpa(query, key, value, attention_mask=None, is_causal=False):
 | 
			
		||||
    if use_flash_attention(query, key, attention_mask) or query.device.type == 'cpu':
 | 
			
		||||
        context_layer = F.scaled_dot_product_attention(query.to(key.dtype),
 | 
			
		||||
                                                       key,
 | 
			
		||||
                                                       value,
 | 
			
		||||
                                                       attention_mask,
 | 
			
		||||
                                                       is_causal=is_causal).to(key.dtype)
 | 
			
		||||
    else:
 | 
			
		||||
        # attention_mask is not None only when past_key_value is not None and q_len > 1
 | 
			
		||||
        if attention_mask is not None:
 | 
			
		||||
            attn_bias = torch.zeros(attention_mask.shape, dtype=query.dtype,
 | 
			
		||||
                                    device=query.device)
 | 
			
		||||
            attention_mask = ~attention_mask
 | 
			
		||||
            if attention_mask.dtype == torch.bool:
 | 
			
		||||
                attn_bias.masked_fill_(attention_mask.logical_not(), float("-inf"))
 | 
			
		||||
            else:
 | 
			
		||||
                attn_bias += attention_mask
 | 
			
		||||
        elif is_causal:
 | 
			
		||||
            L, S = query.size(-2), key.size(-2)
 | 
			
		||||
            attn_bias = torch.zeros(L, S, dtype=query.dtype, device=query.device)
 | 
			
		||||
            temp_mask = torch.ones(L, S, dtype=torch.bool, device=query.device).tril(diagonal=0)
 | 
			
		||||
            attn_bias.masked_fill_(temp_mask.logical_not(), float("-inf"))
 | 
			
		||||
            attn_bias.to(key.dtype)
 | 
			
		||||
        else:
 | 
			
		||||
            attn_bias = None
 | 
			
		||||
        if use_sdp(query.shape[2], key.shape[2],
 | 
			
		||||
                   query.shape[-1], query):
 | 
			
		||||
            import xe_addons
 | 
			
		||||
            attn_output = xe_addons.sdp(query, key, value, attn_bias)
 | 
			
		||||
            context_layer = attn_output.view(query.shape)
 | 
			
		||||
        else:
 | 
			
		||||
            head_dim = query.size(-1)
 | 
			
		||||
            attn = torch.matmul(query.to(key.dtype),
 | 
			
		||||
                                key.transpose(2, 3)) / math.sqrt(head_dim)
 | 
			
		||||
            if attn_bias is not None:
 | 
			
		||||
                attn += attn_bias
 | 
			
		||||
            attn = F.softmax(attn, dim=-1,
 | 
			
		||||
                             dtype=torch.float32).to(value.dtype)
 | 
			
		||||
            context_layer = torch.matmul(attn, value)
 | 
			
		||||
    return context_layer
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@torch.jit.script
 | 
			
		||||
def apply_rotary_pos_emb_chatglm(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor:
 | 
			
		||||
    # x: [sq, b, np, hn]
 | 
			
		||||
    sq, b, np, hn = x.size(0), x.size(1), x.size(2), x.size(3)
 | 
			
		||||
    rot_dim = rope_cache.shape[-2] * 2
 | 
			
		||||
    x, x_pass = x[..., :rot_dim], x[..., rot_dim:]
 | 
			
		||||
    # truncate to support variable sizes
 | 
			
		||||
    rope_cache = rope_cache[:sq]
 | 
			
		||||
    xshaped = x.reshape(sq, -1, np, rot_dim // 2, 2)
 | 
			
		||||
    rope_cache = rope_cache.view(sq, -1, 1, xshaped.size(3), 2)
 | 
			
		||||
    x_out2 = torch.stack(
 | 
			
		||||
        [
 | 
			
		||||
            xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1],
 | 
			
		||||
            xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1],
 | 
			
		||||
        ],
 | 
			
		||||
        -1,
 | 
			
		||||
    )
 | 
			
		||||
    x_out2 = x_out2.flatten(3)
 | 
			
		||||
    return torch.cat((x_out2, x_pass), dim=-1)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def repeat_kv(key: torch.Tensor, value: torch.Tensor, n_head: int) -> (torch.Tensor, torch.Tensor):
 | 
			
		||||
    # key, value's shape: [bs, n_kv_head, seq_len, head_dim] -> [bs, n_head, seq_len, head_dim]
 | 
			
		||||
    batch_size, n_kv_head, seq_len, head_dim = key.shape
 | 
			
		||||
 | 
			
		||||
    key = key.unsqueeze(2)
 | 
			
		||||
    key = key.expand(-1, -1, n_head // n_kv_head, -1, -1)
 | 
			
		||||
    key = key.contiguous().view(batch_size, n_head, seq_len, head_dim)
 | 
			
		||||
 | 
			
		||||
    value = value.unsqueeze(2)
 | 
			
		||||
    value = value.expand(-1, -1, n_head // n_kv_head, -1, -1)
 | 
			
		||||
    value = value.contiguous().view(batch_size, n_head, seq_len, head_dim)
 | 
			
		||||
 | 
			
		||||
    return key, value
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def should_split_qkv_tensor(query_layer, bsz, n_head, seq_len):
 | 
			
		||||
    if os.environ.get("IPEX_LLM_SPLIT_QKV", None) is not None:
 | 
			
		||||
        return os.environ.get("IPEX_LLM_SPLIT_QKV", None) == "1"
 | 
			
		||||
    elif query_layer.dtype == torch.float16 and query_layer.shape[2] >= 5000:
 | 
			
		||||
        # split tensor for memory block limitation
 | 
			
		||||
        # support fp16 and set input length threshold at 5000 for now
 | 
			
		||||
        return True
 | 
			
		||||
    elif query_layer.element_size()*bsz*n_head*seq_len*seq_len >= 4*1024**3:
 | 
			
		||||
        # attn_weight size larger than memory block limitation 4GB
 | 
			
		||||
        return True
 | 
			
		||||
    return False
 | 
			
		||||
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states
 | 
			
		||||
    go from (batch, num_key_value_heads, seqlen, head_dim) to
 | 
			
		||||
    (batch, num_attention_heads, seqlen, head_dim)
 | 
			
		||||
    """
 | 
			
		||||
    batch, num_key_value_heads, slen, head_dim = hidden_states.shape
 | 
			
		||||
    if n_rep == 1:
 | 
			
		||||
        return hidden_states
 | 
			
		||||
    hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads,
 | 
			
		||||
                                                           n_rep, slen, head_dim)
 | 
			
		||||
    return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def chatglm_rms_norm_forward(self, hidden_states):
 | 
			
		||||
| 
						 | 
				
			
			@ -166,16 +56,16 @@ def chatglm_rms_norm_forward(self, hidden_states):
 | 
			
		|||
 | 
			
		||||
 | 
			
		||||
def chatglm2_model_forward(
 | 
			
		||||
        self,
 | 
			
		||||
        input_ids,
 | 
			
		||||
        position_ids: Optional[torch.Tensor]=None,
 | 
			
		||||
        attention_mask: Optional[torch.BoolTensor]=None,
 | 
			
		||||
        full_attention_mask: Optional[torch.BoolTensor]=None,
 | 
			
		||||
        past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]]=None,
 | 
			
		||||
        inputs_embeds: Optional[torch.Tensor]=None,
 | 
			
		||||
        use_cache: Optional[bool]=None,
 | 
			
		||||
        output_hidden_states: Optional[bool]=None,
 | 
			
		||||
        return_dict: Optional[bool]=None,
 | 
			
		||||
    self,
 | 
			
		||||
    input_ids,
 | 
			
		||||
    position_ids: Optional[torch.Tensor]=None,
 | 
			
		||||
    attention_mask: Optional[torch.BoolTensor]=None,
 | 
			
		||||
    full_attention_mask: Optional[torch.BoolTensor]=None,
 | 
			
		||||
    past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]]=None,
 | 
			
		||||
    inputs_embeds: Optional[torch.Tensor]=None,
 | 
			
		||||
    use_cache: Optional[bool]=None,
 | 
			
		||||
    output_hidden_states: Optional[bool]=None,
 | 
			
		||||
    return_dict: Optional[bool]=None,
 | 
			
		||||
):
 | 
			
		||||
    output_hidden_states = (
 | 
			
		||||
        output_hidden_states if output_hidden_states is not None
 | 
			
		||||
| 
						 | 
				
			
			@ -196,33 +86,51 @@ def chatglm2_model_forward(
 | 
			
		|||
                                                 past_key_values,
 | 
			
		||||
                                                 padding_mask=attention_mask)
 | 
			
		||||
 | 
			
		||||
    use_fuse_rope = input_ids.device.type == "xpu"
 | 
			
		||||
    use_fuse_rope = use_fuse_rope and not self.training
 | 
			
		||||
    # ipex-llm changes begin
 | 
			
		||||
    # 1. replace `rotary_pos_emb` with `inv_freq` and `position_ids`
 | 
			
		||||
    # 2. generate `causal_mask` and replace `full_attention_mask` with it
 | 
			
		||||
    if position_ids is None:
 | 
			
		||||
        if past_key_values is None:
 | 
			
		||||
            position_ids = torch.arange(seq_length, dtype=torch.int64, device=inputs_embeds.device)
 | 
			
		||||
        else:
 | 
			
		||||
            kv_length = past_key_values[0][0].size(0)
 | 
			
		||||
            position_ids = torch.arange(kv_length, kv_length + seq_length,
 | 
			
		||||
                                        dtype=torch.int64, device=inputs_embeds.device)
 | 
			
		||||
        position_ids = position_ids.repeat(batch_size, 1)
 | 
			
		||||
 | 
			
		||||
    # Rotary positional embeddings
 | 
			
		||||
    rotary_pos_emb = self.rotary_pos_emb(self.seq_length)
 | 
			
		||||
    if position_ids is not None:
 | 
			
		||||
        rotary_pos_emb = rotary_pos_emb[position_ids]
 | 
			
		||||
    if getattr(self.rotary_pos_emb, "cached_dtype", None) != inputs_embeds.dtype:
 | 
			
		||||
        rot_dim = self.rotary_pos_emb.dim
 | 
			
		||||
        inv_freq = 1.0 / (10000 ** (torch.arange(0, rot_dim, 2,
 | 
			
		||||
                                                 device=inputs_embeds.device,
 | 
			
		||||
                                                 dtype=inputs_embeds.dtype) / rot_dim))
 | 
			
		||||
        self.rotary_pos_emb.register_buffer("inv_freq", inv_freq, persistent=False)
 | 
			
		||||
        self.rotary_pos_emb.cached_dtype = inputs_embeds.dtype
 | 
			
		||||
 | 
			
		||||
    # `full_attention_mask` is not None only when
 | 
			
		||||
    #  `past_key_values` is not None and `seq_length` > 1
 | 
			
		||||
    if full_attention_mask is not None:
 | 
			
		||||
        causal_mask = torch.zeros([batch_size, 1, seq_length, full_attention_mask.size(-1)],
 | 
			
		||||
                                  dtype=inputs_embeds.dtype, device=inputs_embeds.device)
 | 
			
		||||
        mask_value = torch.finfo(inputs_embeds.dtype).min
 | 
			
		||||
        causal_mask.masked_fill_(full_attention_mask, mask_value)
 | 
			
		||||
    elif self.training or (inputs_embeds.device.type != "xpu" and past_key_values is None):
 | 
			
		||||
        full_attention_mask = self.get_masks(input_ids,
 | 
			
		||||
                                             past_key_values,
 | 
			
		||||
                                             padding_mask=attention_mask)
 | 
			
		||||
        causal_mask = torch.zeros([batch_size, 1, seq_length, full_attention_mask.size(-1)],
 | 
			
		||||
                                  dtype=inputs_embeds.dtype, device=inputs_embeds.device)
 | 
			
		||||
        mask_value = torch.finfo(inputs_embeds.dtype).min
 | 
			
		||||
        causal_mask.masked_fill_(full_attention_mask, mask_value)
 | 
			
		||||
    else:
 | 
			
		||||
        rotary_pos_emb = rotary_pos_emb[None, :seq_length]
 | 
			
		||||
    if use_fuse_rope:
 | 
			
		||||
        # Repeat cos sin here, call only once for each token.
 | 
			
		||||
        # Chatglm2's rotary embedding is similar to gptj's, is rotate_every_two.
 | 
			
		||||
        # If put this to attension forward, it will generate too many times.
 | 
			
		||||
        cos, sin = rotary_pos_emb.split(rotary_pos_emb.shape[-1] // 2, dim=-1)
 | 
			
		||||
        cos = cos.squeeze(-1)
 | 
			
		||||
        sin = sin.squeeze(-1)
 | 
			
		||||
        cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
 | 
			
		||||
        sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
 | 
			
		||||
        rotary_pos_emb = (cos, sin)
 | 
			
		||||
    else:
 | 
			
		||||
        rotary_pos_emb = rotary_pos_emb.transpose(0, 1).contiguous()
 | 
			
		||||
        causal_mask = None
 | 
			
		||||
 | 
			
		||||
    # Run encoder.
 | 
			
		||||
    hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder(
 | 
			
		||||
        inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb,
 | 
			
		||||
        inputs_embeds, causal_mask,
 | 
			
		||||
        rotary_pos_emb=(self.rotary_pos_emb.inv_freq, position_ids),
 | 
			
		||||
        kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states
 | 
			
		||||
    )
 | 
			
		||||
    # ipex-llm changes end
 | 
			
		||||
 | 
			
		||||
    if not return_dict:
 | 
			
		||||
        return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions]
 | 
			
		||||
| 
						 | 
				
			
			@ -239,364 +147,105 @@ def chatglm2_model_forward(
 | 
			
		|||
def chatglm2_attention_forward(
 | 
			
		||||
    self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True
 | 
			
		||||
):
 | 
			
		||||
    if use_quantize_kv_cache(self.query_key_value, hidden_states.transpose(0, 1)):
 | 
			
		||||
        forward_function = chatglm2_quantized_attention_forward_8eb45c
 | 
			
		||||
    else:
 | 
			
		||||
        forward_function = chatglm2_attention_forward_8eb45c
 | 
			
		||||
    return forward_function(
 | 
			
		||||
        self=self,
 | 
			
		||||
        hidden_states=hidden_states,
 | 
			
		||||
        attention_mask=attention_mask,
 | 
			
		||||
        rotary_pos_emb=rotary_pos_emb,
 | 
			
		||||
        kv_cache=kv_cache,
 | 
			
		||||
        use_cache=use_cache
 | 
			
		||||
    )
 | 
			
		||||
    # hidden_states: [seq_len, bsz, head_dim]
 | 
			
		||||
    q_len, bsz, _ = hidden_states.size()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def chatglm2_quantized_attention_forward_8eb45c(
 | 
			
		||||
    self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True
 | 
			
		||||
):
 | 
			
		||||
    # hidden_states: [seq_len, bs, head_dim]
 | 
			
		||||
    mixed_x_layer = self.query_key_value(hidden_states)
 | 
			
		||||
    # kv_cache: [seq_len, bsz, n_kv_head, head_dim] ->
 | 
			
		||||
    # past_key_value: [bsz, n_kv_head, seq_len, head_dim]
 | 
			
		||||
    past_key_value = None if kv_cache is None else (kv_cache[0].permute(1, 2, 0, 3),
 | 
			
		||||
                                                    kv_cache[1].permute(1, 2, 0, 3))
 | 
			
		||||
 | 
			
		||||
    n_head = self.num_attention_heads_per_partition
 | 
			
		||||
    n_kv_head = self.num_multi_query_groups_per_partition if self.multi_query_attention else n_head
 | 
			
		||||
    head_dim = self.hidden_size_per_attention_head
 | 
			
		||||
 | 
			
		||||
    query_layer, key_layer, value_layer = mixed_x_layer.split(
 | 
			
		||||
        [n_head * head_dim, n_kv_head * head_dim, n_kv_head * head_dim],
 | 
			
		||||
        dim=-1,
 | 
			
		||||
    qkv = self.query_key_value(hidden_states)
 | 
			
		||||
    qkv = qkv.view(q_len, bsz, n_head + 2 * n_kv_head, head_dim)
 | 
			
		||||
    # [seq_len, bsz, n_head, head_dim] -> [bsz, n_head, seq_len, head_dim]
 | 
			
		||||
    qkv = qkv.permute(1, 2, 0, 3)
 | 
			
		||||
 | 
			
		||||
    query_states, key_states, value_states = qkv.split([n_head,
 | 
			
		||||
                                                        n_kv_head,
 | 
			
		||||
                                                        n_kv_head], dim=1)
 | 
			
		||||
 | 
			
		||||
    kv_seq_len = key_states.shape[2]
 | 
			
		||||
    if past_key_value is not None:
 | 
			
		||||
        kv_seq_len += past_key_value[0].shape[2]
 | 
			
		||||
 | 
			
		||||
    # IPEX-LLM OPT: fuse rope
 | 
			
		||||
    inv_freq, position_ids = rotary_pos_emb
 | 
			
		||||
    rot_dim = inv_freq.size(-1) * 2
 | 
			
		||||
    if should_use_fuse_rope(hidden_states, rotary_pos_emb[1], self.training):
 | 
			
		||||
        import xe_addons
 | 
			
		||||
        xe_addons.rotary_two_inplaced(inv_freq, position_ids,
 | 
			
		||||
                                      query_states[..., :rot_dim], key_states[..., :rot_dim])
 | 
			
		||||
    else:
 | 
			
		||||
        idx_theta = torch.outer(position_ids[0].float(),
 | 
			
		||||
                                inv_freq.float()).to(hidden_states.dtype)
 | 
			
		||||
        idx_theta = idx_theta.unsqueeze(0).unsqueeze(0)
 | 
			
		||||
        cos = torch.cos(idx_theta).repeat_interleave(2, -1)
 | 
			
		||||
        sin = torch.sin(idx_theta).repeat_interleave(2, -1)
 | 
			
		||||
        q_rot, k_rot = apply_rotary_pos_emb(query_states[..., :rot_dim], key_states[..., :rot_dim],
 | 
			
		||||
                                            cos, sin, position_ids, "chatglm")
 | 
			
		||||
        query_states[..., :rot_dim] = q_rot[...]
 | 
			
		||||
        key_states[..., :rot_dim] = k_rot[...]
 | 
			
		||||
 | 
			
		||||
    # IPEX-LLM OPT: kv cache and quantize kv
 | 
			
		||||
    use_quantize_kv = use_quantize_kv_cache(self.query_key_value, hidden_states)
 | 
			
		||||
    key_states, value_states = update_past_key_value(
 | 
			
		||||
        past_key_value, key_states, value_states,
 | 
			
		||||
        kv_seq_len, use_quantize_kv, hidden_states.device
 | 
			
		||||
    )
 | 
			
		||||
    query_layer = query_layer.view(query_layer.shape[:-1] + (n_head, head_dim))
 | 
			
		||||
    key_layer = key_layer.view(key_layer.shape[:-1] + (n_kv_head, head_dim))
 | 
			
		||||
    value_layer = value_layer.view(value_layer.shape[:-1] + (n_kv_head, head_dim))
 | 
			
		||||
    # query, key, value's shape: [seq_len, bs, n_head/n_kv_head, head_dim]
 | 
			
		||||
    # past_key_value: [bsz, n_kv_head, seq_len, head_dim] -> [seq_len, bsz, n_kv_head, head_dim]
 | 
			
		||||
    past_key_value = (key_states.permute(2, 0, 1, 3),
 | 
			
		||||
                      value_states.permute(2, 0, 1, 3)) if use_cache else None
 | 
			
		||||
 | 
			
		||||
    # apply relative positional encoding (rotary embedding)
 | 
			
		||||
    if rotary_pos_emb is not None:
 | 
			
		||||
        if len(rotary_pos_emb) == 2 and isinstance(rotary_pos_emb, tuple):
 | 
			
		||||
            # use_fuse_rope, see chatglm2_model_forward
 | 
			
		||||
            cos, sin = rotary_pos_emb
 | 
			
		||||
            rot_dim = cos.shape[-1]
 | 
			
		||||
            query_layer = query_layer.transpose(0, 1)
 | 
			
		||||
            key_layer = key_layer.transpose(0, 1)
 | 
			
		||||
            query_layer_cur = query_layer[..., :rot_dim]
 | 
			
		||||
            key_layer_cur = key_layer[..., :rot_dim]
 | 
			
		||||
            # ipex_llm's apply_rotary_embedding can change the origin storage,
 | 
			
		||||
            # so query_layer will get the result directly.
 | 
			
		||||
            torch.ops.torch_ipex.apply_rotary_embedding(query_layer_cur, sin, cos, query_layer_cur)
 | 
			
		||||
            torch.ops.torch_ipex.apply_rotary_embedding(key_layer_cur, sin, cos, key_layer_cur)
 | 
			
		||||
            query_layer = query_layer.transpose(0, 1)
 | 
			
		||||
            key_layer = key_layer.transpose(0, 1)
 | 
			
		||||
    # IPEX-LLM OPT: sdp
 | 
			
		||||
    attn_weights = None
 | 
			
		||||
    if use_sdp(q_len, kv_seq_len, head_dim, query_states):
 | 
			
		||||
        import xe_addons
 | 
			
		||||
        if use_quantize_kv:
 | 
			
		||||
            attn_output = xe_addons.sdp_fp8(query_states, key_states, value_states, attention_mask)
 | 
			
		||||
        else:
 | 
			
		||||
            query_layer = apply_rotary_pos_emb_chatglm(query_layer, rotary_pos_emb)
 | 
			
		||||
            key_layer = apply_rotary_pos_emb_chatglm(key_layer, rotary_pos_emb)
 | 
			
		||||
 | 
			
		||||
    query_layer = query_layer.permute(1, 2, 0, 3)
 | 
			
		||||
    key_layer = key_layer.permute(1, 2, 0, 3)
 | 
			
		||||
    value_layer = value_layer.permute(1, 2, 0, 3)
 | 
			
		||||
    # query, key, value's shape: [bs, n_head/n_kv_head, seq_len, head_dim]
 | 
			
		||||
    batch_size, _, seq_len, _ = query_layer.shape
 | 
			
		||||
 | 
			
		||||
    if kv_cache is None:
 | 
			
		||||
        # first token
 | 
			
		||||
        if self.multi_query_attention:
 | 
			
		||||
            key, value = repeat_kv(key_layer, value_layer, n_head)
 | 
			
		||||
            attn_output = xe_addons.sdp(query_states, key_states, value_states, attention_mask)
 | 
			
		||||
    elif use_sdp_causal(q_len, kv_seq_len, head_dim, query_states, self.training):
 | 
			
		||||
        import xe_addons
 | 
			
		||||
        if use_quantize_kv:
 | 
			
		||||
            attn_output = xe_addons.sdp_fp8_causal(query_states, key_states, value_states,
 | 
			
		||||
                                                   attention_mask)
 | 
			
		||||
        else:
 | 
			
		||||
            key, value = key_layer, value_layer
 | 
			
		||||
 | 
			
		||||
        if should_split_qkv_tensor(query_layer, batch_size, n_head, seq_len):
 | 
			
		||||
            # split second dim to block size = 8
 | 
			
		||||
            block_size = 8
 | 
			
		||||
            query_split = torch.split(query_layer, block_size, dim=1)
 | 
			
		||||
            key_split = torch.split(key, block_size, dim=1)
 | 
			
		||||
            value_split = torch.split(value, block_size, dim=1)
 | 
			
		||||
            results = []
 | 
			
		||||
            for q, k, v in zip(query_split, key_split, value_split):
 | 
			
		||||
                result = glm_sdpa(q, k, v, is_causal=True)
 | 
			
		||||
                results.append(result)
 | 
			
		||||
            context_layer = torch.cat(results, dim=1)
 | 
			
		||||
            attn_output = xe_addons.sdp_causal(query_states, key_states, value_states,
 | 
			
		||||
                                               attention_mask)
 | 
			
		||||
    elif query_states.device.type == "cpu":
 | 
			
		||||
        # repeat k/v heads if n_kv_heads < n_heads
 | 
			
		||||
        key_states = repeat_kv(key_states, n_head // n_kv_head)
 | 
			
		||||
        value_states = repeat_kv(value_states, n_head // n_kv_head)
 | 
			
		||||
        if q_len == kv_seq_len:
 | 
			
		||||
            attn_output = torch.nn.functional.scaled_dot_product_attention(
 | 
			
		||||
                query_states, key_states, value_states, is_causal=True
 | 
			
		||||
            )
 | 
			
		||||
        else:
 | 
			
		||||
            context_layer = glm_sdpa(query_layer, key, value, is_causal=True)
 | 
			
		||||
        context_layer = context_layer.to(query_layer.dtype)
 | 
			
		||||
 | 
			
		||||
        if use_cache:
 | 
			
		||||
            k_cache, v_cache = init_fp8_kv_cache(batch_size,
 | 
			
		||||
                                                 n_kv_head,
 | 
			
		||||
                                                 seq_len,
 | 
			
		||||
                                                 head_dim,
 | 
			
		||||
                                                 query_layer.device)
 | 
			
		||||
            k_cache, v_cache = append_fp8_kv_cache(k_cache, v_cache, key_layer, value_layer)
 | 
			
		||||
            attn_output = torch.nn.functional.scaled_dot_product_attention(
 | 
			
		||||
                query_states, key_states, value_states, attention_mask
 | 
			
		||||
            )
 | 
			
		||||
    else:
 | 
			
		||||
        k_cache, v_cache = kv_cache
 | 
			
		||||
        k_cache = k_cache.permute(1, 2, 0, 3)
 | 
			
		||||
        v_cache = v_cache.permute(1, 2, 0, 3)
 | 
			
		||||
        # k_cache, v_cache's shape: [bs, n_kv_head, seq_len, head_dim]
 | 
			
		||||
 | 
			
		||||
        k_cache, v_cache = append_fp8_kv_cache(k_cache, v_cache, key_layer, value_layer)
 | 
			
		||||
        if use_quantize_kv:
 | 
			
		||||
            key_states, value_states = restore_fp8_kv_cache(key_states, value_states,
 | 
			
		||||
                                                            query_states.dtype)
 | 
			
		||||
        # repeat k/v heads if n_kv_heads < n_heads
 | 
			
		||||
        key_states = repeat_kv(key_states, n_head // n_kv_head)
 | 
			
		||||
        value_states = repeat_kv(value_states, n_head // n_kv_head)
 | 
			
		||||
 | 
			
		||||
        attn_weights = torch.matmul(query_states,
 | 
			
		||||
                                    key_states.transpose(2, 3)) / math.sqrt(head_dim)
 | 
			
		||||
        if attention_mask is not None:
 | 
			
		||||
            attention_mask = ~attention_mask
 | 
			
		||||
            attn_bias = torch.zeros(attention_mask.shape, dtype=query_layer.dtype,
 | 
			
		||||
                                    device=query_layer.device)
 | 
			
		||||
            if attention_mask.dtype == torch.bool:
 | 
			
		||||
                attn_bias.masked_fill_(attention_mask.logical_not(), float("-inf"))
 | 
			
		||||
            else:
 | 
			
		||||
                attn_bias += attention_mask
 | 
			
		||||
        else:
 | 
			
		||||
            attn_bias = None
 | 
			
		||||
            attn_weights = attn_weights + attention_mask
 | 
			
		||||
        attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1,
 | 
			
		||||
                                                   dtype=torch.float32).to(value_states.dtype)
 | 
			
		||||
        attn_output = torch.matmul(attn_weights, value_states)
 | 
			
		||||
 | 
			
		||||
        if seq_len != 1:
 | 
			
		||||
            key, value = restore_fp8_kv_cache(k_cache, v_cache, query_layer.dtype)
 | 
			
		||||
            key, value = repeat_kv(key, value, n_head)
 | 
			
		||||
            attn = torch.matmul(query_layer, key.transpose(2, 3)) / math.sqrt(head_dim)
 | 
			
		||||
            if attn_bias is not None:
 | 
			
		||||
                attn += attn_bias
 | 
			
		||||
            attn = F.softmax(attn, dim=-1, dtype=torch.float32)
 | 
			
		||||
            context_layer = torch.matmul(attn.to(value.dtype), value)
 | 
			
		||||
        else:
 | 
			
		||||
            key, value = k_cache, v_cache
 | 
			
		||||
            import xe_addons
 | 
			
		||||
            context_layer = xe_addons.sdp_fp8(query_layer, key, value, attn_bias)
 | 
			
		||||
    # context_layer's shape: [bsz, n_head, seq_len, head_dim] -> [seq_len, bsz, n_head * head_dim]
 | 
			
		||||
    attn_output = attn_output.permute(2, 0, 1, 3).contiguous().view(q_len, bsz, n_head * head_dim)
 | 
			
		||||
    output = self.dense(attn_output)
 | 
			
		||||
 | 
			
		||||
    # context_layer's shape: [bs, n_head, seq_len, head_dim] -> [seq_len, bs, n_head * head_dim]
 | 
			
		||||
    context_layer = context_layer.permute(2, 0, 1, 3).contiguous().view(seq_len, batch_size, -1)
 | 
			
		||||
 | 
			
		||||
    if use_cache:
 | 
			
		||||
        kv_cache = (k_cache.permute(2, 0, 1, 3), v_cache.permute(2, 0, 1, 3))
 | 
			
		||||
    else:
 | 
			
		||||
        kv_cache = None
 | 
			
		||||
 | 
			
		||||
    output = self.dense(context_layer)
 | 
			
		||||
 | 
			
		||||
    return output, kv_cache
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def chatglm2_attention_forward_8eb45c(
 | 
			
		||||
        self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True
 | 
			
		||||
):
 | 
			
		||||
    # hidden_states: [sq, b, h]
 | 
			
		||||
 | 
			
		||||
    # =================================================
 | 
			
		||||
    # Pre-allocate memory for key-values for inference.
 | 
			
		||||
    # =================================================
 | 
			
		||||
    # =====================
 | 
			
		||||
    # Query, Key, and Value
 | 
			
		||||
    # =====================
 | 
			
		||||
 | 
			
		||||
    # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
 | 
			
		||||
    device = hidden_states.device
 | 
			
		||||
    mixed_x_layer = self.query_key_value(hidden_states)
 | 
			
		||||
 | 
			
		||||
    if self.multi_query_attention:
 | 
			
		||||
        (query_layer, key_layer, value_layer) = mixed_x_layer.split(
 | 
			
		||||
            [
 | 
			
		||||
                self.num_attention_heads_per_partition * self.hidden_size_per_attention_head,
 | 
			
		||||
                self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
 | 
			
		||||
                self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
 | 
			
		||||
            ],
 | 
			
		||||
            dim=-1,
 | 
			
		||||
        )
 | 
			
		||||
        query_layer = query_layer.view(
 | 
			
		||||
            query_layer.size()[:-1] + (self.num_attention_heads_per_partition,
 | 
			
		||||
                                       self.hidden_size_per_attention_head)
 | 
			
		||||
        )
 | 
			
		||||
        key_layer = key_layer.view(
 | 
			
		||||
            key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition,
 | 
			
		||||
                                     self.hidden_size_per_attention_head)
 | 
			
		||||
        )
 | 
			
		||||
        value_layer = value_layer.view(
 | 
			
		||||
            value_layer.size()[:-1]
 | 
			
		||||
            + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
 | 
			
		||||
        )
 | 
			
		||||
    else:
 | 
			
		||||
        new_tensor_shape = mixed_x_layer.size()[:-1] + (self.num_attention_heads_per_partition,
 | 
			
		||||
                                                        3 * self.hidden_size_per_attention_head)
 | 
			
		||||
        mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
 | 
			
		||||
 | 
			
		||||
        # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
 | 
			
		||||
        (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3)
 | 
			
		||||
 | 
			
		||||
    cur_length, batch_size = query_layer.shape[0], query_layer.shape[1]
 | 
			
		||||
 | 
			
		||||
    # apply relative positional encoding (rotary embedding)
 | 
			
		||||
    if rotary_pos_emb is not None:
 | 
			
		||||
        if len(rotary_pos_emb) == 2 and isinstance(rotary_pos_emb, tuple):
 | 
			
		||||
            # use_fuse_rope, see chatglm2_model_forward
 | 
			
		||||
            cos, sin = rotary_pos_emb
 | 
			
		||||
            rot_dim = cos.shape[-1]
 | 
			
		||||
            query_layer = query_layer.transpose(0, 1)
 | 
			
		||||
            key_layer = key_layer.transpose(0, 1)
 | 
			
		||||
            query_layer_cur = query_layer[..., :rot_dim]
 | 
			
		||||
            key_layer_cur = key_layer[..., :rot_dim]
 | 
			
		||||
            # ipex_llm's apply_rotary_embedding can change the origin storage,
 | 
			
		||||
            # so query_layer will get the result directly.
 | 
			
		||||
            torch.ops.torch_ipex.apply_rotary_embedding(query_layer_cur, sin, cos, query_layer_cur)
 | 
			
		||||
            torch.ops.torch_ipex.apply_rotary_embedding(key_layer_cur, sin, cos, key_layer_cur)
 | 
			
		||||
            query_layer = query_layer.transpose(0, 1)
 | 
			
		||||
            key_layer = key_layer.transpose(0, 1)
 | 
			
		||||
        else:
 | 
			
		||||
            query_layer = apply_rotary_pos_emb_chatglm(query_layer, rotary_pos_emb)
 | 
			
		||||
            key_layer = apply_rotary_pos_emb_chatglm(key_layer, rotary_pos_emb)
 | 
			
		||||
 | 
			
		||||
    if self.multi_query_attention:
 | 
			
		||||
        if device.type == "xpu" and batch_size > 1:  # use beam_search for generation.
 | 
			
		||||
            # If batch_size > 1 on gpu, permute key/value_layer to [bs, np, sl, hn]
 | 
			
		||||
            # to reduce memory usage. Otherwise,expend key/value_layer to [bs, nh, sl, hn].
 | 
			
		||||
            key_layer = key_layer.permute(1, 2, 0, 3)  # [bs, np, sl, hn]
 | 
			
		||||
            value_layer = value_layer.permute(1, 2, 0, 3)  # [bs, np, sl, hn]
 | 
			
		||||
        else:
 | 
			
		||||
            key_length = key_layer.size(0)
 | 
			
		||||
            query_group_size = self.num_attention_heads_per_partition // \
 | 
			
		||||
                self.num_multi_query_groups_per_partition
 | 
			
		||||
            key_layer = key_layer.permute(1, 2, 0, 3).unsqueeze(-3)  # [bs, nh/k, sl, hn]
 | 
			
		||||
            key_layer = key_layer.expand(-1, -1, query_group_size, -1, -1)
 | 
			
		||||
            key_layer = key_layer.contiguous().view((batch_size,
 | 
			
		||||
                                                     self.num_attention_heads_per_partition,
 | 
			
		||||
                                                     key_length,
 | 
			
		||||
                                                     self.hidden_size_per_attention_head))
 | 
			
		||||
            value_layer = value_layer.permute(1, 2, 0, 3).unsqueeze(-3)  # [bs, nh/k, sl, hn]
 | 
			
		||||
            value_layer = value_layer.expand(-1, -1, query_group_size, -1, -1)
 | 
			
		||||
            value_layer = value_layer.contiguous().view((batch_size,
 | 
			
		||||
                                                         self.num_attention_heads_per_partition,
 | 
			
		||||
                                                         key_length,
 | 
			
		||||
                                                         self.hidden_size_per_attention_head))
 | 
			
		||||
 | 
			
		||||
    # adjust key and value for inference
 | 
			
		||||
    if kv_cache is not None:
 | 
			
		||||
        cache_k, cache_v = kv_cache
 | 
			
		||||
        cache_k = cache_k.permute(1, 2, 0, 3)
 | 
			
		||||
        cache_v = cache_v.permute(1, 2, 0, 3)
 | 
			
		||||
        past_length = cache_k.size(2)
 | 
			
		||||
 | 
			
		||||
        if cache_k.stride()[1] < (past_length + cur_length) * cache_k.size(3):
 | 
			
		||||
            max_cache_length = past_length + cur_length + KV_CACHE_ALLOC_BLOCK_LENGTH
 | 
			
		||||
            if device.type == "xpu" and batch_size > 1:  # use beam_search for generation.
 | 
			
		||||
                # If batch_size > 1 on gpu, use init_kv_cache to avoid empty cache for ensuring
 | 
			
		||||
                # generation correctness.
 | 
			
		||||
                # Set the num_heads in init_kv_cache to np, ensuring that the tensors of
 | 
			
		||||
                # new_cache_k/v and key/value_layer have the same size.
 | 
			
		||||
                new_cache_k, new_cache_v = init_kv_cache(batch_size,
 | 
			
		||||
                                                         self.num_multi_query_groups_per_partition,
 | 
			
		||||
                                                         self.hidden_size_per_attention_head,
 | 
			
		||||
                                                         past_length,
 | 
			
		||||
                                                         max_cache_length,
 | 
			
		||||
                                                         dtype=query_layer.dtype,
 | 
			
		||||
                                                         device=device)
 | 
			
		||||
            else:
 | 
			
		||||
                new_cache_k, new_cache_v = extend_kv_cache(batch_size,
 | 
			
		||||
                                                           self.num_attention_heads_per_partition,
 | 
			
		||||
                                                           self.hidden_size_per_attention_head,
 | 
			
		||||
                                                           past_length,
 | 
			
		||||
                                                           max_cache_length,
 | 
			
		||||
                                                           dtype=query_layer.dtype,
 | 
			
		||||
                                                           device=device)
 | 
			
		||||
            new_cache_k[:] = cache_k
 | 
			
		||||
            new_cache_v[:] = cache_v
 | 
			
		||||
            cache_k = new_cache_k
 | 
			
		||||
            cache_v = new_cache_v
 | 
			
		||||
 | 
			
		||||
        key_layer, value_layer = append_kv_cache(cache_k, cache_v, key_layer, value_layer)
 | 
			
		||||
 | 
			
		||||
    elif use_cache:
 | 
			
		||||
        max_cache_length = max(KV_CACHE_ALLOC_MIN_LENGTH, cur_length) \
 | 
			
		||||
            + KV_CACHE_ALLOC_BLOCK_LENGTH
 | 
			
		||||
 | 
			
		||||
        if device.type == "xpu" and batch_size > 1:  # use beam_search for generation.
 | 
			
		||||
            # Ensure the tensors of key/value_cache and key/value_layer have the same size.
 | 
			
		||||
            nums_per_partition = self.num_multi_query_groups_per_partition
 | 
			
		||||
        else:
 | 
			
		||||
            nums_per_partition = self.num_attention_heads_per_partition
 | 
			
		||||
 | 
			
		||||
        key_cache, value_cache = init_kv_cache(batch_size,
 | 
			
		||||
                                               nums_per_partition,
 | 
			
		||||
                                               self.hidden_size_per_attention_head,
 | 
			
		||||
                                               cur_length,
 | 
			
		||||
                                               max_cache_length,
 | 
			
		||||
                                               dtype=query_layer.dtype,
 | 
			
		||||
                                               device=device)
 | 
			
		||||
        key_cache[:] = key_layer
 | 
			
		||||
        value_cache[:] = value_layer
 | 
			
		||||
        key_layer = key_cache
 | 
			
		||||
        value_layer = value_cache
 | 
			
		||||
 | 
			
		||||
    # If batch_size > 1, return tensors with shape [bs, np, sl, hn] as past_key_values. This could
 | 
			
		||||
    # reduce memory usage as tensors are not expended to [bs, nh, sl, hn].
 | 
			
		||||
    # Otherwise, return views of [bs, nh, sl, hn].
 | 
			
		||||
    cache_key_layer = key_layer
 | 
			
		||||
    cache_value_layer = value_layer
 | 
			
		||||
 | 
			
		||||
    if use_cache:
 | 
			
		||||
        kv_cache = (key_layer, value_layer)
 | 
			
		||||
    else:
 | 
			
		||||
        kv_cache = None
 | 
			
		||||
 | 
			
		||||
    # ==================================
 | 
			
		||||
    # core attention computation
 | 
			
		||||
    # ==================================
 | 
			
		||||
    if device.type == "xpu" and batch_size > 1:  # use beam_search for generation.
 | 
			
		||||
        # If batch_size > 1, expend key/value_layer to [ns, nh, sl, bn] for
 | 
			
		||||
        # core attention computation.
 | 
			
		||||
        # The expanded tensors will not be returned as past_key_values.
 | 
			
		||||
        if self.multi_query_attention:
 | 
			
		||||
            query_group_size = self.num_attention_heads_per_partition // \
 | 
			
		||||
                self.num_multi_query_groups_per_partition
 | 
			
		||||
            key_layer = key_layer.unsqueeze(-3)
 | 
			
		||||
            key_layer = key_layer.expand(-1, -1, query_group_size, -1, -1)
 | 
			
		||||
            save_length = key_layer.size(3)
 | 
			
		||||
            # [bs, np, sl, hn] --> [bs, nh, sl, hn]
 | 
			
		||||
            key_layer = key_layer.contiguous().view((batch_size,
 | 
			
		||||
                                                     self.num_attention_heads_per_partition,
 | 
			
		||||
                                                     save_length,
 | 
			
		||||
                                                     self.hidden_size_per_attention_head))
 | 
			
		||||
            value_layer = value_layer.unsqueeze(-3)
 | 
			
		||||
            value_layer = value_layer.expand(-1, -1, query_group_size, -1, -1)
 | 
			
		||||
            # [bs, np, sl, hn] --> [bs, nh, sl, hn]
 | 
			
		||||
            value_layer = value_layer.contiguous().view((batch_size,
 | 
			
		||||
                                                         self.num_attention_heads_per_partition,
 | 
			
		||||
                                                         save_length,
 | 
			
		||||
                                                         self.hidden_size_per_attention_head))
 | 
			
		||||
 | 
			
		||||
    context_layer = core_attn_forward_8eb45c(query_layer, key_layer, value_layer, attention_mask)
 | 
			
		||||
 | 
			
		||||
    # =================
 | 
			
		||||
    # Output. [sq, b, h]
 | 
			
		||||
    # =================
 | 
			
		||||
 | 
			
		||||
    output = self.dense(context_layer)
 | 
			
		||||
 | 
			
		||||
    return output, (cache_key_layer.permute(2, 0, 1, 3), cache_value_layer.permute(2, 0, 1, 3))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def core_attn_forward_8eb45c(query_layer, key_layer, value_layer, attention_mask):
 | 
			
		||||
    query_layer = query_layer.permute(1, 2, 0, 3)
 | 
			
		||||
    L, S = query_layer.shape[2], key_layer.shape[2]
 | 
			
		||||
    batch_size, n_head, seq_len, head_dim = query_layer.shape
 | 
			
		||||
    if attention_mask is None and L == S:
 | 
			
		||||
        if should_split_qkv_tensor(query_layer, batch_size, n_head, seq_len):
 | 
			
		||||
            # split second dim to block size = 8
 | 
			
		||||
            block_size = 8
 | 
			
		||||
            query_layer = query_layer.to(key_layer.dtype)
 | 
			
		||||
            query_split = torch.split(query_layer, block_size, dim=1)
 | 
			
		||||
            key_split = torch.split(key_layer, block_size, dim=1)
 | 
			
		||||
            value_split = torch.split(value_layer, block_size, dim=1)
 | 
			
		||||
            results = []
 | 
			
		||||
            for q, k, v in zip(query_split, key_split, value_split):
 | 
			
		||||
                result = glm_sdpa(q, k, v, is_causal=True)
 | 
			
		||||
                results.append(result)
 | 
			
		||||
            context_layer = torch.cat(results, dim=1)
 | 
			
		||||
        else:
 | 
			
		||||
            context_layer = glm_sdpa(query_layer,
 | 
			
		||||
                                     key_layer,
 | 
			
		||||
                                     value_layer,
 | 
			
		||||
                                     is_causal=True)
 | 
			
		||||
    else:
 | 
			
		||||
        context_layer = glm_sdpa(query_layer,
 | 
			
		||||
                                 key_layer,
 | 
			
		||||
                                 value_layer,
 | 
			
		||||
                                 attention_mask)
 | 
			
		||||
    context_layer = context_layer.permute(2, 0, 1, 3)
 | 
			
		||||
    new_context_layer_shape = context_layer.size()[:-2] + (-1,)
 | 
			
		||||
    context_layer = context_layer.reshape(*new_context_layer_shape)
 | 
			
		||||
 | 
			
		||||
    return context_layer
 | 
			
		||||
    return output, past_key_value
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -186,7 +186,7 @@ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, model_family):
 | 
			
		|||
        q_embed = (q * cos) + (rotate_half(q) * sin)
 | 
			
		||||
        k_embed = (k * cos) + (rotate_half(k) * sin)
 | 
			
		||||
        return q_embed, k_embed
 | 
			
		||||
    elif model_family == "gptj":
 | 
			
		||||
    elif model_family in ["gptj", "chatglm"]:
 | 
			
		||||
        q_embed = (q * cos) + (rotate_every_two(q) * sin)
 | 
			
		||||
        k_embed = (k * cos) + (rotate_every_two(k) * sin)
 | 
			
		||||
        return q_embed, k_embed
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -107,12 +107,6 @@ class Test_Optimize_Gpu_Model:
 | 
			
		|||
                    elif isinstance(t1, tuple) and isinstance(t2, tuple):
 | 
			
		||||
                        # if 'past_key_value'is of type tuple
 | 
			
		||||
                        for i, (t3, t4) in enumerate(zip(t1, t2)):
 | 
			
		||||
                            if model.config.architectures[0] == "ChatGLMModel" and \
 | 
			
		||||
                                    hasattr(model.config, 'padded_vocab_size') and \
 | 
			
		||||
                                    model.config.padded_vocab_size == 65024:
 | 
			
		||||
                                # chatglm2's past_key_value is expanded 16x for some speedup.
 | 
			
		||||
                                # We need to narrow it here.
 | 
			
		||||
                                t4 = t4[:, :, 15:17, :]
 | 
			
		||||
                            attn_output_diff.append(t3 - t4)
 | 
			
		||||
                    else:
 | 
			
		||||
                        # if 'past_key_value'is of type Cache, get last layer cache pair (key, value)
 | 
			
		||||
| 
						 | 
				
			
			@ -171,7 +165,7 @@ class Test_Optimize_Gpu_Model:
 | 
			
		|||
        # currently only need to compare the output of one self-attention layer.
 | 
			
		||||
        layer_norm = "transformer.encoder.layers.27.input_layernorm"
 | 
			
		||||
        self_attn = "transformer.encoder.layers.27.self_attention"
 | 
			
		||||
        lower_bound = 8e-3
 | 
			
		||||
        lower_bound = 4e-2
 | 
			
		||||
        self.run_optimize_gpu_model(Name, Model, Tokenizer, model_path, self_attn, layer_norm, lower_bound)
 | 
			
		||||
 | 
			
		||||
    def Mistral_gpu_model(self, Name, Model, Tokenizer, model_path):
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue