LLM: Support load BaiChuan model family gguf model (#9685)
* support baichuan model family gguf model * update gguf generate.py * add verify models * add support model_family * update * update style * update type * update readme * update * remove support model_family
This commit is contained in:
		
							parent
							
								
									3afed99216
								
							
						
					
					
						commit
						496bb2e845
					
				
					 9 changed files with 1200 additions and 7 deletions
				
			
		| 
						 | 
					@ -1,6 +1,10 @@
 | 
				
			||||||
# Loading GGUF models
 | 
					# Loading GGUF models
 | 
				
			||||||
In this directory, you will find examples on how to load GGUF model into `bigdl-llm`. For illustration purposes, we utilize the [llama-2-7b-chat.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/tree/main) as a reference LLaMA2 GGUF model.
 | 
					In this directory, you will find examples on how to load GGUF model into `bigdl-llm`.
 | 
				
			||||||
>Note: Only LLaMA2 family models are currently supported
 | 
					
 | 
				
			||||||
 | 
					## Verified Models(Q4_0)
 | 
				
			||||||
 | 
					- [Llama-2-7B-Chat-GGUF](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/tree/main)
 | 
				
			||||||
 | 
					- [Mistral-7B-Instruct-v0.1-GGUF](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF)
 | 
				
			||||||
 | 
					- [Baichuan2-7B-Chat-GGUF](https://huggingface.co/second-state/Baichuan2-7B-Chat-GGUF/tree/main)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
## Requirements
 | 
					## Requirements
 | 
				
			||||||
To run these examples with BigDL-LLM, we have some recommended requirements for your machine, please refer to [here](../../../README.md#system-support) for more information.
 | 
					To run these examples with BigDL-LLM, we have some recommended requirements for your machine, please refer to [here](../../../README.md#system-support) for more information.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -18,12 +18,11 @@ import torch
 | 
				
			||||||
import time
 | 
					import time
 | 
				
			||||||
import argparse
 | 
					import argparse
 | 
				
			||||||
 | 
					
 | 
				
			||||||
from transformers import LlamaTokenizer
 | 
					 | 
				
			||||||
from bigdl.llm.transformers import AutoModelForCausalLM
 | 
					from bigdl.llm.transformers import AutoModelForCausalLM
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# you could tune the prompt based on your own model,
 | 
					# you could tune the prompt based on your own model,
 | 
				
			||||||
# here the prompt tuning refers to https://huggingface.co/georgesung/llama2_7b_chat_uncensored#prompt-style
 | 
					# here the prompt tuning refers to https://huggingface.co/georgesung/llama2_7b_chat_uncensored#prompt-style
 | 
				
			||||||
LLAMA2_PROMPT_FORMAT = """### HUMAN:
 | 
					PROMPT_FORMAT = """### HUMAN:
 | 
				
			||||||
{prompt}
 | 
					{prompt}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
### RESPONSE:
 | 
					### RESPONSE:
 | 
				
			||||||
| 
						 | 
					@ -47,7 +46,7 @@ if __name__ == '__main__':
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    # Generate predicted tokens
 | 
					    # Generate predicted tokens
 | 
				
			||||||
    with torch.inference_mode():
 | 
					    with torch.inference_mode():
 | 
				
			||||||
        prompt = LLAMA2_PROMPT_FORMAT.format(prompt=args.prompt)
 | 
					        prompt = PROMPT_FORMAT.format(prompt=args.prompt)
 | 
				
			||||||
        input_ids = tokenizer.encode(prompt, return_tensors="pt")
 | 
					        input_ids = tokenizer.encode(prompt, return_tensors="pt")
 | 
				
			||||||
        st = time.time()
 | 
					        st = time.time()
 | 
				
			||||||
        output = model.generate(input_ids,
 | 
					        output = model.generate(input_ids,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,6 +1,10 @@
 | 
				
			||||||
# Loading GGUF models
 | 
					# Loading GGUF models
 | 
				
			||||||
In this directory, you will find examples on how to load GGUF model into `bigdl-llm`. For illustration purposes, we utilize the [llama-2-7b-chat.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/tree/main) as a reference LLaMA2 GGUF model.
 | 
					In this directory, you will find examples on how to load GGUF model into `bigdl-llm`.
 | 
				
			||||||
>Note: Only LLaMA2 family models are currently supported
 | 
					
 | 
				
			||||||
 | 
					## Verified Models(Q4_0)
 | 
				
			||||||
 | 
					- [Llama-2-7B-Chat-GGUF](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/tree/main)
 | 
				
			||||||
 | 
					- [Mistral-7B-Instruct-v0.1-GGUF](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF)
 | 
				
			||||||
 | 
					- [Baichuan2-7B-Chat-GGUF](https://huggingface.co/second-state/Baichuan2-7B-Chat-GGUF/tree/main)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
## Requirements
 | 
					## Requirements
 | 
				
			||||||
To run these examples with BigDL-LLM, we have some recommended requirements for your machine, please refer to [here](../../../README.md#system-support) for more information.
 | 
					To run these examples with BigDL-LLM, we have some recommended requirements for your machine, please refer to [here](../../../README.md#system-support) for more information.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -32,6 +32,7 @@ def load_gguf_model(fpath: str, dtype: torch.dtype = torch.float):
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    loader = GGUFFileLoader(fpath)
 | 
					    loader = GGUFFileLoader(fpath)
 | 
				
			||||||
    model_family = loader.config["general.architecture"]
 | 
					    model_family = loader.config["general.architecture"]
 | 
				
			||||||
 | 
					    print("model_family:" + model_family)
 | 
				
			||||||
    qtype = loader.config["general.file_type"]
 | 
					    qtype = loader.config["general.file_type"]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    invalidInputError(qtype in qtype_map, f"Unsupported gguf quantize type: {qtype}")
 | 
					    invalidInputError(qtype in qtype_map, f"Unsupported gguf quantize type: {qtype}")
 | 
				
			||||||
| 
						 | 
					@ -42,6 +43,10 @@ def load_gguf_model(fpath: str, dtype: torch.dtype = torch.float):
 | 
				
			||||||
            from .models.llama import load_gguf_llama
 | 
					            from .models.llama import load_gguf_llama
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            model, tokenizer = load_gguf_llama(loader, dtype)
 | 
					            model, tokenizer = load_gguf_llama(loader, dtype)
 | 
				
			||||||
 | 
					        elif model_family == "baichuan":
 | 
				
			||||||
 | 
					            from .models.baichuan import load_gguf_baichuan
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            model, tokenizer = load_gguf_baichuan(loader, dtype)
 | 
				
			||||||
        else:
 | 
					        else:
 | 
				
			||||||
            invalidInputError(False, f"Unsupported model family: {model_family}")
 | 
					            invalidInputError(False, f"Unsupported model family: {model_family}")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										123
									
								
								python/llm/src/bigdl/llm/transformers/gguf/models/baichuan.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										123
									
								
								python/llm/src/bigdl/llm/transformers/gguf/models/baichuan.py
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,123 @@
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Copyright 2016 The BigDL Authors.
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||||
 | 
					# you may not use this file except in compliance with the License.
 | 
				
			||||||
 | 
					# You may obtain a copy of the License at
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					#     http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Unless required by applicable law or agreed to in writing, software
 | 
				
			||||||
 | 
					# distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||||
 | 
					# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||||
 | 
					# See the License for the specific language governing permissions and
 | 
				
			||||||
 | 
					# limitations under the License.
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import os
 | 
				
			||||||
 | 
					import torch
 | 
				
			||||||
 | 
					from accelerate import init_empty_weights
 | 
				
			||||||
 | 
					from accelerate.utils import set_module_tensor_to_device
 | 
				
			||||||
 | 
					from tempfile import NamedTemporaryFile
 | 
				
			||||||
 | 
					from .model_implement.baichuan.configuration_baichuan import BaiChuanConfig
 | 
				
			||||||
 | 
					from .model_implement.baichuan.modeling_baichuan import BaiChuanForCausalLM
 | 
				
			||||||
 | 
					from .model_implement.baichuan.tokenization_baichuan import BaiChuanTokenizer
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					from ..gguf import GGUFFileLoader
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def load_gguf_baichuan(loader: GGUFFileLoader, dtype: torch.dtype = torch.float):
 | 
				
			||||||
 | 
					    config = loader.config
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    baichuan_config = BaiChuanConfig(
 | 
				
			||||||
 | 
					        vocab_size=len(config['tokenizer.ggml.tokens']),
 | 
				
			||||||
 | 
					        hidden_size=config['baichuan.embedding_length'],
 | 
				
			||||||
 | 
					        intermediate_size=config['baichuan.feed_forward_length'],
 | 
				
			||||||
 | 
					        num_hidden_layers=config['baichuan.block_count'],
 | 
				
			||||||
 | 
					        num_attention_heads=config['baichuan.attention.head_count'],
 | 
				
			||||||
 | 
					        num_key_value_heads=config['baichuan.attention.head_count_kv'],
 | 
				
			||||||
 | 
					        hidden_act="silu",
 | 
				
			||||||
 | 
					        max_position_embeddings=config['baichuan.context_length'],
 | 
				
			||||||
 | 
					        rms_norm_eps=config['baichuan.attention.layer_norm_rms_epsilon'],
 | 
				
			||||||
 | 
					        use_cache=True,
 | 
				
			||||||
 | 
					        pad_token_id=None,
 | 
				
			||||||
 | 
					        bos_token_id=config['tokenizer.ggml.bos_token_id'],
 | 
				
			||||||
 | 
					        eos_token_id=config['tokenizer.ggml.eos_token_id'],
 | 
				
			||||||
 | 
					        pretraining_tp=1,
 | 
				
			||||||
 | 
					    )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    ckpt = loader.tensors(dtype)
 | 
				
			||||||
 | 
					    n_head = config['baichuan.attention.head_count']
 | 
				
			||||||
 | 
					    n_head_kv = config['baichuan.attention.head_count_kv']
 | 
				
			||||||
 | 
					    ckpt = restore_baichuan_weight(ckpt, n_head, n_head_kv)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    state_dict = {}
 | 
				
			||||||
 | 
					    state_dict['model.embed_tokens.weight'] = ckpt['token_embd.weight']
 | 
				
			||||||
 | 
					    state_dict['model.norm.weight'] = ckpt['output_norm.weight']
 | 
				
			||||||
 | 
					    state_dict['lm_head.weight'] = ckpt['output.weight']
 | 
				
			||||||
 | 
					    for i in range(config['baichuan.block_count']):
 | 
				
			||||||
 | 
					        # rebuild W_pack
 | 
				
			||||||
 | 
					        a = ckpt[f'blk.{i}.attn_q.weight']
 | 
				
			||||||
 | 
					        b = ckpt[f'blk.{i}.attn_k.weight']
 | 
				
			||||||
 | 
					        c = ckpt[f'blk.{i}.attn_v.weight']
 | 
				
			||||||
 | 
					        d = torch.cat([a, b, c], dim=0)
 | 
				
			||||||
 | 
					        state_dict[f'model.layers.{i}.self_attn.W_pack.weight'] = d
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        state_dict[f'model.layers.{i}.self_attn.o_proj.weight'] = \
 | 
				
			||||||
 | 
					            ckpt[f'blk.{i}.attn_output.weight']
 | 
				
			||||||
 | 
					        state_dict[f'model.layers.{i}.mlp.gate_proj.weight'] = \
 | 
				
			||||||
 | 
					            ckpt[f'blk.{i}.ffn_gate.weight']
 | 
				
			||||||
 | 
					        state_dict[f'model.layers.{i}.mlp.up_proj.weight'] = \
 | 
				
			||||||
 | 
					            ckpt[f'blk.{i}.ffn_up.weight']
 | 
				
			||||||
 | 
					        state_dict[f'model.layers.{i}.mlp.down_proj.weight'] = \
 | 
				
			||||||
 | 
					            ckpt[f'blk.{i}.ffn_down.weight']
 | 
				
			||||||
 | 
					        state_dict[f'model.layers.{i}.input_layernorm.weight'] = \
 | 
				
			||||||
 | 
					            ckpt[f'blk.{i}.attn_norm.weight']
 | 
				
			||||||
 | 
					        state_dict[f'model.layers.{i}.post_attention_layernorm.weight'] = \
 | 
				
			||||||
 | 
					            ckpt[f'blk.{i}.ffn_norm.weight']
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    with init_empty_weights():
 | 
				
			||||||
 | 
					        model = BaiChuanForCausalLM(baichuan_config)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    for name, weight in state_dict.items():
 | 
				
			||||||
 | 
					        set_module_tensor_to_device(model, name, "cpu", weight)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    model = model.cpu()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    # see https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
 | 
				
			||||||
 | 
					    from transformers.convert_slow_tokenizer import import_protobuf
 | 
				
			||||||
 | 
					    spm_pb2 = import_protobuf("Failed to import protobuf")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    pieces = loader.tokenizer_pieces()
 | 
				
			||||||
 | 
					    trainer_spec = spm_pb2.TrainerSpec(byte_fallback=True,
 | 
				
			||||||
 | 
					                                       model_type=spm_pb2.TrainerSpec.ModelType.BPE)
 | 
				
			||||||
 | 
					    proto = spm_pb2.ModelProto(pieces=pieces, trainer_spec=trainer_spec)
 | 
				
			||||||
 | 
					    proto = proto.SerializeToString()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    with NamedTemporaryFile(delete=False) as f:
 | 
				
			||||||
 | 
					        f.write(proto)
 | 
				
			||||||
 | 
					        f.close()
 | 
				
			||||||
 | 
					        tokenizer = BaiChuanTokenizer(f.name)
 | 
				
			||||||
 | 
					        os.remove(f.name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    return model, tokenizer
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def restore_baichuan_weight(ckpt: dict, n_head: int, n_head_kv: int):
 | 
				
			||||||
 | 
					    # see https://github.com/ggerganov/llama.cpp/blob/master/convert-hf-to-gguf.py#L535
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    for name, weight in ckpt.items():
 | 
				
			||||||
 | 
					        head, hd_size = weight.shape[0], weight.shape[1:]
 | 
				
			||||||
 | 
					        if n_head != n_head_kv:
 | 
				
			||||||
 | 
					            new_n_head = n_head // n_head_kv
 | 
				
			||||||
 | 
					        else:
 | 
				
			||||||
 | 
					            new_n_head = n_head
 | 
				
			||||||
 | 
					        if name.endswith("attn_q.weight"):
 | 
				
			||||||
 | 
					            ckpt[name] = (weight.reshape(new_n_head, head // new_n_head // 2, 2, *hd_size)
 | 
				
			||||||
 | 
					                                .swapaxes(1, 2)
 | 
				
			||||||
 | 
					                                .reshape(weight.shape))
 | 
				
			||||||
 | 
					        elif name.endswith("attn_k.weight"):
 | 
				
			||||||
 | 
					            ckpt[name] = (weight.reshape(new_n_head, head // new_n_head // 2, 2, *hd_size)
 | 
				
			||||||
 | 
					                                .swapaxes(1, 2)
 | 
				
			||||||
 | 
					                                .reshape(weight.shape))
 | 
				
			||||||
 | 
					    return ckpt
 | 
				
			||||||
| 
						 | 
					@ -0,0 +1,15 @@
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Copyright 2016 The BigDL Authors.
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||||
 | 
					# you may not use this file except in compliance with the License.
 | 
				
			||||||
 | 
					# You may obtain a copy of the License at
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					#     http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Unless required by applicable law or agreed to in writing, software
 | 
				
			||||||
 | 
					# distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||||
 | 
					# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||||
 | 
					# See the License for the specific language governing permissions and
 | 
				
			||||||
 | 
					# limitations under the License.
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
| 
						 | 
					@ -0,0 +1,66 @@
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Copyright 2016 The BigDL Authors.
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
 | 
				
			||||||
 | 
					# and OPT implementations in this library. It has been modified from its
 | 
				
			||||||
 | 
					# original forms to accommodate minor architectural differences compared
 | 
				
			||||||
 | 
					# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||||
 | 
					# you may not use this file except in compliance with the License.
 | 
				
			||||||
 | 
					# You may obtain a copy of the License at
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					#     http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Unless required by applicable law or agreed to in writing, software
 | 
				
			||||||
 | 
					# distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||||
 | 
					# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||||
 | 
					# See the License for the specific language governing permissions and
 | 
				
			||||||
 | 
					# limitations under the License.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					from transformers.configuration_utils import PretrainedConfig
 | 
				
			||||||
 | 
					from transformers.utils import logging
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					logger = logging.get_logger(__name__)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					class BaiChuanConfig(PretrainedConfig):
 | 
				
			||||||
 | 
					    model_type = "baichuan"
 | 
				
			||||||
 | 
					    keys_to_ignore_at_inference = ["past_key_values"]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def __init__(
 | 
				
			||||||
 | 
					        self,
 | 
				
			||||||
 | 
					        vocab_size=64000,
 | 
				
			||||||
 | 
					        hidden_size=4096,
 | 
				
			||||||
 | 
					        intermediate_size=11008,
 | 
				
			||||||
 | 
					        num_hidden_layers=32,
 | 
				
			||||||
 | 
					        num_attention_heads=32,
 | 
				
			||||||
 | 
					        hidden_act="silu",
 | 
				
			||||||
 | 
					        max_position_embeddings=4096,
 | 
				
			||||||
 | 
					        initializer_range=0.02,
 | 
				
			||||||
 | 
					        rms_norm_eps=1e-6,
 | 
				
			||||||
 | 
					        use_cache=True,
 | 
				
			||||||
 | 
					        pad_token_id=0,
 | 
				
			||||||
 | 
					        bos_token_id=1,
 | 
				
			||||||
 | 
					        eos_token_id=2,
 | 
				
			||||||
 | 
					        tie_word_embeddings=False,
 | 
				
			||||||
 | 
					        **kwargs,
 | 
				
			||||||
 | 
					    ):
 | 
				
			||||||
 | 
					        self.vocab_size = vocab_size
 | 
				
			||||||
 | 
					        self.max_position_embeddings = max_position_embeddings
 | 
				
			||||||
 | 
					        self.hidden_size = hidden_size
 | 
				
			||||||
 | 
					        self.intermediate_size = intermediate_size
 | 
				
			||||||
 | 
					        self.num_hidden_layers = num_hidden_layers
 | 
				
			||||||
 | 
					        self.num_attention_heads = num_attention_heads
 | 
				
			||||||
 | 
					        self.hidden_act = hidden_act
 | 
				
			||||||
 | 
					        self.initializer_range = initializer_range
 | 
				
			||||||
 | 
					        self.rms_norm_eps = rms_norm_eps
 | 
				
			||||||
 | 
					        self.use_cache = use_cache
 | 
				
			||||||
 | 
					        super().__init__(
 | 
				
			||||||
 | 
					            pad_token_id=pad_token_id,
 | 
				
			||||||
 | 
					            bos_token_id=bos_token_id,
 | 
				
			||||||
 | 
					            eos_token_id=eos_token_id,
 | 
				
			||||||
 | 
					            tie_word_embeddings=tie_word_embeddings,
 | 
				
			||||||
 | 
					            **kwargs,
 | 
				
			||||||
 | 
					        )
 | 
				
			||||||
| 
						 | 
					@ -0,0 +1,715 @@
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Copyright 2016 The BigDL Authors.
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
 | 
				
			||||||
 | 
					# and OPT implementations in this library. It has been modified from its
 | 
				
			||||||
 | 
					# original forms to accommodate minor architectural differences compared
 | 
				
			||||||
 | 
					# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||||
 | 
					# you may not use this file except in compliance with the License.
 | 
				
			||||||
 | 
					# You may obtain a copy of the License at
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					#     http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Unless required by applicable law or agreed to in writing, software
 | 
				
			||||||
 | 
					# distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||||
 | 
					# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||||
 | 
					# See the License for the specific language governing permissions and
 | 
				
			||||||
 | 
					# limitations under the License.
 | 
				
			||||||
 | 
					from .configuration_baichuan import BaiChuanConfig
 | 
				
			||||||
 | 
					from transformers import PreTrainedModel, add_start_docstrings
 | 
				
			||||||
 | 
					from transformers.activations import ACT2FN
 | 
				
			||||||
 | 
					from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, \
 | 
				
			||||||
 | 
					    SequenceClassifierOutputWithPast
 | 
				
			||||||
 | 
					from transformers.utils import logging, add_start_docstrings_to_model_forward, \
 | 
				
			||||||
 | 
					    replace_return_docstrings
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import math
 | 
				
			||||||
 | 
					from typing import List, Optional, Tuple, Union
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import torch
 | 
				
			||||||
 | 
					import torch.utils.checkpoint
 | 
				
			||||||
 | 
					from torch import nn
 | 
				
			||||||
 | 
					from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					logger = logging.get_logger(__name__)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Copied from transformers.models.bart.modeling_bart._make_causal_mask
 | 
				
			||||||
 | 
					def _make_causal_mask(
 | 
				
			||||||
 | 
					        input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device,
 | 
				
			||||||
 | 
					        past_key_values_length: int = 0
 | 
				
			||||||
 | 
					):
 | 
				
			||||||
 | 
					    """
 | 
				
			||||||
 | 
					    Make causal mask used for bi-directional self-attention.
 | 
				
			||||||
 | 
					    """
 | 
				
			||||||
 | 
					    bsz, tgt_len = input_ids_shape
 | 
				
			||||||
 | 
					    mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min,
 | 
				
			||||||
 | 
					                                                       device=device), device=device)
 | 
				
			||||||
 | 
					    mask_cond = torch.arange(mask.size(-1), device=device)
 | 
				
			||||||
 | 
					    mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
 | 
				
			||||||
 | 
					    mask = mask.to(dtype)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    if past_key_values_length > 0:
 | 
				
			||||||
 | 
					        mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype,
 | 
				
			||||||
 | 
					                                      device=device), mask], dim=-1)
 | 
				
			||||||
 | 
					    return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Copied from transformers.models.bart.modeling_bart._expand_mask
 | 
				
			||||||
 | 
					def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
 | 
				
			||||||
 | 
					    """
 | 
				
			||||||
 | 
					    Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
 | 
				
			||||||
 | 
					    """
 | 
				
			||||||
 | 
					    bsz, src_len = mask.size()
 | 
				
			||||||
 | 
					    tgt_len = tgt_len if tgt_len is not None else src_len
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    inverted_mask = 1.0 - expanded_mask
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					class RMSNorm(nn.Module):
 | 
				
			||||||
 | 
					    def __init__(self, hidden_size, eps=1e-6):
 | 
				
			||||||
 | 
					        """
 | 
				
			||||||
 | 
					        RMSNorm is equivalent to T5LayerNorm
 | 
				
			||||||
 | 
					        """
 | 
				
			||||||
 | 
					        super().__init__()
 | 
				
			||||||
 | 
					        self.weight = nn.Parameter(torch.ones(hidden_size))
 | 
				
			||||||
 | 
					        self.variance_epsilon = eps
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def forward(self, hidden_states):
 | 
				
			||||||
 | 
					        variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
 | 
				
			||||||
 | 
					        hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        # convert into half-precision if necessary
 | 
				
			||||||
 | 
					        if self.weight.dtype in [torch.float16, torch.bfloat16]:
 | 
				
			||||||
 | 
					            hidden_states = hidden_states.to(self.weight.dtype)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        return self.weight * hidden_states
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					class RotaryEmbedding(torch.nn.Module):
 | 
				
			||||||
 | 
					    def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
 | 
				
			||||||
 | 
					        super().__init__()
 | 
				
			||||||
 | 
					        inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
 | 
				
			||||||
 | 
					        self.register_buffer("inv_freq", inv_freq)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        # Build here to make `torch.jit.trace` work.
 | 
				
			||||||
 | 
					        self.max_seq_len_cached = max_position_embeddings
 | 
				
			||||||
 | 
					        t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device,
 | 
				
			||||||
 | 
					                         dtype=self.inv_freq.dtype)
 | 
				
			||||||
 | 
					        freqs = torch.einsum("i,j->ij", t, self.inv_freq)
 | 
				
			||||||
 | 
					        # Different from paper, but it uses a different permutation
 | 
				
			||||||
 | 
					        # in order to obtain the same calculation
 | 
				
			||||||
 | 
					        emb = torch.cat((freqs, freqs), dim=-1)
 | 
				
			||||||
 | 
					        self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
 | 
				
			||||||
 | 
					        self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def forward(self, x, seq_len=None):
 | 
				
			||||||
 | 
					        # x: [bs, num_attention_heads, seq_len, head_size]
 | 
				
			||||||
 | 
					        # This `if` block is unlikely to be run after we build sin/cos
 | 
				
			||||||
 | 
					        # in `__init__`. Keep the logic here just in case.
 | 
				
			||||||
 | 
					        if seq_len > self.max_seq_len_cached:
 | 
				
			||||||
 | 
					            self.max_seq_len_cached = seq_len
 | 
				
			||||||
 | 
					            t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
 | 
				
			||||||
 | 
					            freqs = torch.einsum("i,j->ij", t, self.inv_freq)
 | 
				
			||||||
 | 
					            # Different from paper, but it uses a different permutation
 | 
				
			||||||
 | 
					            # in order to obtain the same calculation
 | 
				
			||||||
 | 
					            emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
 | 
				
			||||||
 | 
					            self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
 | 
				
			||||||
 | 
					            self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
 | 
				
			||||||
 | 
					        return (
 | 
				
			||||||
 | 
					            self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
 | 
				
			||||||
 | 
					            self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
 | 
				
			||||||
 | 
					        )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def rotate_half(x):
 | 
				
			||||||
 | 
					    """Rotates half the hidden dims of the input."""
 | 
				
			||||||
 | 
					    x1 = x[..., : x.shape[-1] // 2]
 | 
				
			||||||
 | 
					    x2 = x[..., x.shape[-1] // 2:]
 | 
				
			||||||
 | 
					    return torch.cat((-x2, x1), dim=-1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
 | 
				
			||||||
 | 
					    # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
 | 
				
			||||||
 | 
					    cos = cos.squeeze(1).squeeze(0)  # [seq_len, dim]
 | 
				
			||||||
 | 
					    sin = sin.squeeze(1).squeeze(0)  # [seq_len, dim]
 | 
				
			||||||
 | 
					    cos = cos[position_ids].unsqueeze(1)  # [bs, 1, seq_len, dim]
 | 
				
			||||||
 | 
					    sin = sin[position_ids].unsqueeze(1)  # [bs, 1, seq_len, dim]
 | 
				
			||||||
 | 
					    q_embed = (q * cos) + (rotate_half(q) * sin)
 | 
				
			||||||
 | 
					    k_embed = (k * cos) + (rotate_half(k) * sin)
 | 
				
			||||||
 | 
					    return q_embed, k_embed
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					class MLP(nn.Module):
 | 
				
			||||||
 | 
					    def __init__(
 | 
				
			||||||
 | 
					            self,
 | 
				
			||||||
 | 
					            hidden_size: int,
 | 
				
			||||||
 | 
					            intermediate_size: int,
 | 
				
			||||||
 | 
					            hidden_act: str,
 | 
				
			||||||
 | 
					    ):
 | 
				
			||||||
 | 
					        super().__init__()
 | 
				
			||||||
 | 
					        self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
 | 
				
			||||||
 | 
					        self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
 | 
				
			||||||
 | 
					        self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
 | 
				
			||||||
 | 
					        self.act_fn = ACT2FN[hidden_act]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def forward(self, x):
 | 
				
			||||||
 | 
					        return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					class Attention(nn.Module):
 | 
				
			||||||
 | 
					    """Multi-headed attention from 'Attention Is All You Need' paper"""
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def __init__(self, config: BaiChuanConfig):
 | 
				
			||||||
 | 
					        super().__init__()
 | 
				
			||||||
 | 
					        self.config = config
 | 
				
			||||||
 | 
					        self.hidden_size = config.hidden_size
 | 
				
			||||||
 | 
					        self.num_heads = config.num_attention_heads
 | 
				
			||||||
 | 
					        self.head_dim = self.hidden_size // self.num_heads
 | 
				
			||||||
 | 
					        self.max_position_embeddings = config.max_position_embeddings
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if (self.head_dim * self.num_heads) != self.hidden_size:
 | 
				
			||||||
 | 
					            logger.error(
 | 
				
			||||||
 | 
					                f"hidden_size must be divisible by num_heads (got `hidden_size`:{self.hidden_size}"
 | 
				
			||||||
 | 
					                f" and `num_heads`: {self.num_heads})."
 | 
				
			||||||
 | 
					            )
 | 
				
			||||||
 | 
					        self.W_pack = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False)
 | 
				
			||||||
 | 
					        self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
 | 
				
			||||||
 | 
					        self.rotary_emb = RotaryEmbedding(self.head_dim,
 | 
				
			||||||
 | 
					                                          max_position_embeddings=self.max_position_embeddings)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
 | 
				
			||||||
 | 
					        return (tensor.view(bsz, seq_len, self.num_heads, self.head_dim).
 | 
				
			||||||
 | 
					                transpose(1, 2).contiguous())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def forward(
 | 
				
			||||||
 | 
					            self,
 | 
				
			||||||
 | 
					            hidden_states: torch.Tensor,
 | 
				
			||||||
 | 
					            attention_mask: Optional[torch.Tensor] = None,
 | 
				
			||||||
 | 
					            position_ids: Optional[torch.LongTensor] = None,
 | 
				
			||||||
 | 
					            past_key_value: Optional[Tuple[torch.Tensor]] = None,
 | 
				
			||||||
 | 
					            output_attentions: bool = False,
 | 
				
			||||||
 | 
					            use_cache: bool = False,
 | 
				
			||||||
 | 
					    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
 | 
				
			||||||
 | 
					        bsz, q_len, _ = hidden_states.size()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        proj = self.W_pack(hidden_states)
 | 
				
			||||||
 | 
					        proj = proj.unflatten(-1, (3, self.hidden_size)).unsqueeze(0).transpose(0, -2).squeeze(-2)
 | 
				
			||||||
 | 
					        query_states = (proj[0].view(bsz, q_len, self.num_heads, self.head_dim).
 | 
				
			||||||
 | 
					                        transpose(1, 2))  # batch_size x source_len x hidden_size
 | 
				
			||||||
 | 
					        key_states = (proj[1].view(bsz, q_len, self.num_heads, self.head_dim)
 | 
				
			||||||
 | 
					                      .transpose(1, 2))  # batch_size x target_len x head_size
 | 
				
			||||||
 | 
					        value_states = (proj[2].view(bsz, q_len, self.num_heads, self.head_dim)
 | 
				
			||||||
 | 
					                        .transpose(1, 2))  # batch_size x source_len x hidden_size
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        kv_seq_len = key_states.shape[-2]
 | 
				
			||||||
 | 
					        if past_key_value is not None:
 | 
				
			||||||
 | 
					            kv_seq_len += past_key_value[0].shape[-2]
 | 
				
			||||||
 | 
					        cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
 | 
				
			||||||
 | 
					        query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin,
 | 
				
			||||||
 | 
					                                                        position_ids)
 | 
				
			||||||
 | 
					        # [bsz, nh, t, hd]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if past_key_value is not None:
 | 
				
			||||||
 | 
					            # reuse k, v, self_attention
 | 
				
			||||||
 | 
					            key_states = torch.cat([past_key_value[0], key_states], dim=2)
 | 
				
			||||||
 | 
					            value_states = torch.cat([past_key_value[1], value_states], dim=2)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        past_key_value = (key_states, value_states) if use_cache else None
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(
 | 
				
			||||||
 | 
					            self.head_dim)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
 | 
				
			||||||
 | 
					            logger.error(
 | 
				
			||||||
 | 
					                f"Attention weights should be of size "
 | 
				
			||||||
 | 
					                f"{(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
 | 
				
			||||||
 | 
					                f" {attn_weights.size()}"
 | 
				
			||||||
 | 
					            )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if attention_mask is not None:
 | 
				
			||||||
 | 
					            if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
 | 
				
			||||||
 | 
					                logger.error(
 | 
				
			||||||
 | 
					                    f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)},"
 | 
				
			||||||
 | 
					                    f" but is {attention_mask.size()}"
 | 
				
			||||||
 | 
					                )
 | 
				
			||||||
 | 
					            attn_weights = attn_weights + attention_mask
 | 
				
			||||||
 | 
					            attn_weights = torch.max(attn_weights,
 | 
				
			||||||
 | 
					                                     torch.tensor(torch.finfo(attn_weights.dtype).min))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        # upcast attention to fp32
 | 
				
			||||||
 | 
					        attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(
 | 
				
			||||||
 | 
					            query_states.dtype)
 | 
				
			||||||
 | 
					        attn_output = torch.matmul(attn_weights, value_states)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
 | 
				
			||||||
 | 
					            logger.error(
 | 
				
			||||||
 | 
					                f"`attn_output` should be of "
 | 
				
			||||||
 | 
					                f"size {(bsz, self.num_heads, q_len, self.head_dim)}, "
 | 
				
			||||||
 | 
					                f"but is"
 | 
				
			||||||
 | 
					                f" {attn_output.size()}"
 | 
				
			||||||
 | 
					            )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        attn_output = attn_output.transpose(1, 2)
 | 
				
			||||||
 | 
					        attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        attn_output = self.o_proj(attn_output)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if not output_attentions:
 | 
				
			||||||
 | 
					            attn_weights = None
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        return attn_output, attn_weights, past_key_value
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					class DecoderLayer(nn.Module):
 | 
				
			||||||
 | 
					    def __init__(self, config: BaiChuanConfig):
 | 
				
			||||||
 | 
					        super().__init__()
 | 
				
			||||||
 | 
					        self.hidden_size = config.hidden_size
 | 
				
			||||||
 | 
					        self.self_attn = Attention(config=config)
 | 
				
			||||||
 | 
					        self.mlp = MLP(
 | 
				
			||||||
 | 
					            hidden_size=self.hidden_size,
 | 
				
			||||||
 | 
					            intermediate_size=config.intermediate_size,
 | 
				
			||||||
 | 
					            hidden_act=config.hidden_act,
 | 
				
			||||||
 | 
					        )
 | 
				
			||||||
 | 
					        self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
 | 
				
			||||||
 | 
					        self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def forward(
 | 
				
			||||||
 | 
					            self,
 | 
				
			||||||
 | 
					            hidden_states: torch.Tensor,
 | 
				
			||||||
 | 
					            attention_mask: Optional[torch.Tensor] = None,
 | 
				
			||||||
 | 
					            position_ids: Optional[torch.LongTensor] = None,
 | 
				
			||||||
 | 
					            past_key_value: Optional[Tuple[torch.Tensor]] = None,
 | 
				
			||||||
 | 
					            output_attentions: Optional[bool] = False,
 | 
				
			||||||
 | 
					            use_cache: Optional[bool] = False,
 | 
				
			||||||
 | 
					    ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
 | 
				
			||||||
 | 
					        """
 | 
				
			||||||
 | 
					        Args:
 | 
				
			||||||
 | 
					            hidden_states (`torch.FloatTensor`): input to the layer of shape
 | 
				
			||||||
 | 
					            `(batch, seq_len, embed_dim)`
 | 
				
			||||||
 | 
					            attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
 | 
				
			||||||
 | 
					                `(batch, 1, tgt_len, src_len)` where padding elements are
 | 
				
			||||||
 | 
					                indicated by very large negative values.
 | 
				
			||||||
 | 
					            output_attentions (`bool`, *optional*):
 | 
				
			||||||
 | 
					                Whether or not to return the attentions tensors of all
 | 
				
			||||||
 | 
					                 attention layers. See `attentions` under
 | 
				
			||||||
 | 
					                returned tensors for more detail.
 | 
				
			||||||
 | 
					            use_cache (`bool`, *optional*):
 | 
				
			||||||
 | 
					                If set to `True`, `past_key_values` key value states are
 | 
				
			||||||
 | 
					                 returned and can be used to speed up decoding
 | 
				
			||||||
 | 
					                (see `past_key_values`).
 | 
				
			||||||
 | 
					            past_key_value (`Tuple(torch.FloatTensor)`, *optional*):
 | 
				
			||||||
 | 
					            cached past key and value projection states
 | 
				
			||||||
 | 
					        """
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        residual = hidden_states
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        hidden_states = self.input_layernorm(hidden_states)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        # Self Attention
 | 
				
			||||||
 | 
					        hidden_states, self_attn_weights, present_key_value = self.self_attn(
 | 
				
			||||||
 | 
					            hidden_states=hidden_states,
 | 
				
			||||||
 | 
					            attention_mask=attention_mask,
 | 
				
			||||||
 | 
					            position_ids=position_ids,
 | 
				
			||||||
 | 
					            past_key_value=past_key_value,
 | 
				
			||||||
 | 
					            output_attentions=output_attentions,
 | 
				
			||||||
 | 
					            use_cache=use_cache,
 | 
				
			||||||
 | 
					        )
 | 
				
			||||||
 | 
					        hidden_states = residual + hidden_states
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        # Fully Connected
 | 
				
			||||||
 | 
					        residual = hidden_states
 | 
				
			||||||
 | 
					        hidden_states = self.post_attention_layernorm(hidden_states)
 | 
				
			||||||
 | 
					        hidden_states = self.mlp(hidden_states)
 | 
				
			||||||
 | 
					        hidden_states = residual + hidden_states
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        outputs = (hidden_states,)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if output_attentions:
 | 
				
			||||||
 | 
					            outputs += (self_attn_weights,)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if use_cache:
 | 
				
			||||||
 | 
					            outputs += (present_key_value,)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        return outputs
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					class PreTrainedModel(PreTrainedModel):
 | 
				
			||||||
 | 
					    config_class = BaiChuanConfig
 | 
				
			||||||
 | 
					    base_model_prefix = "model"
 | 
				
			||||||
 | 
					    supports_gradient_checkpointing = True
 | 
				
			||||||
 | 
					    _no_split_modules = ["DecoderLayer"]
 | 
				
			||||||
 | 
					    _keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def _init_weights(self, module):
 | 
				
			||||||
 | 
					        std = self.config.initializer_range
 | 
				
			||||||
 | 
					        if isinstance(module, nn.Linear):
 | 
				
			||||||
 | 
					            module.weight.data.normal_(mean=0.0, std=std)
 | 
				
			||||||
 | 
					            if module.bias is not None:
 | 
				
			||||||
 | 
					                module.bias.data.zero_()
 | 
				
			||||||
 | 
					        elif isinstance(module, nn.Embedding):
 | 
				
			||||||
 | 
					            module.weight.data.normal_(mean=0.0, std=std)
 | 
				
			||||||
 | 
					            if module.padding_idx is not None:
 | 
				
			||||||
 | 
					                module.weight.data[module.padding_idx].zero_()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def _set_gradient_checkpointing(self, module, value=False):
 | 
				
			||||||
 | 
					        if isinstance(module, Model):
 | 
				
			||||||
 | 
					            module.gradient_checkpointing = value
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					class Model(PreTrainedModel):
 | 
				
			||||||
 | 
					    """
 | 
				
			||||||
 | 
					    Transformer decoder consisting of *config.num_hidden_layers* layers.
 | 
				
			||||||
 | 
					    Each layer is a [`DecoderLayer`]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    Args:
 | 
				
			||||||
 | 
					        config: BaiChuanConfig
 | 
				
			||||||
 | 
					    """
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def __init__(self, config: BaiChuanConfig):
 | 
				
			||||||
 | 
					        super().__init__(config)
 | 
				
			||||||
 | 
					        self.padding_idx = config.pad_token_id
 | 
				
			||||||
 | 
					        self.vocab_size = config.vocab_size
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
 | 
				
			||||||
 | 
					        self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.num_hidden_layers)])
 | 
				
			||||||
 | 
					        self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        self.gradient_checkpointing = False
 | 
				
			||||||
 | 
					        # Initialize weights and apply final processing
 | 
				
			||||||
 | 
					        self.post_init()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def get_input_embeddings(self):
 | 
				
			||||||
 | 
					        return self.embed_tokens
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def set_input_embeddings(self, value):
 | 
				
			||||||
 | 
					        self.embed_tokens = value
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
 | 
				
			||||||
 | 
					    def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds,
 | 
				
			||||||
 | 
					                                        past_key_values_length):
 | 
				
			||||||
 | 
					        # create causal mask
 | 
				
			||||||
 | 
					        # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
 | 
				
			||||||
 | 
					        combined_attention_mask = None
 | 
				
			||||||
 | 
					        if input_shape[-1] > 1:
 | 
				
			||||||
 | 
					            combined_attention_mask = _make_causal_mask(
 | 
				
			||||||
 | 
					                input_shape,
 | 
				
			||||||
 | 
					                inputs_embeds.dtype,
 | 
				
			||||||
 | 
					                device=inputs_embeds.device,
 | 
				
			||||||
 | 
					                past_key_values_length=past_key_values_length,
 | 
				
			||||||
 | 
					            )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if attention_mask is not None:
 | 
				
			||||||
 | 
					            # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
 | 
				
			||||||
 | 
					            expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype,
 | 
				
			||||||
 | 
					                                              tgt_len=input_shape[-1]).to(
 | 
				
			||||||
 | 
					                inputs_embeds.device
 | 
				
			||||||
 | 
					            )
 | 
				
			||||||
 | 
					            combined_attention_mask = (
 | 
				
			||||||
 | 
					                expanded_attn_mask if combined_attention_mask is None
 | 
				
			||||||
 | 
					                else expanded_attn_mask + combined_attention_mask
 | 
				
			||||||
 | 
					            )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        return combined_attention_mask
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def forward(
 | 
				
			||||||
 | 
					            self,
 | 
				
			||||||
 | 
					            input_ids: torch.LongTensor = None,
 | 
				
			||||||
 | 
					            attention_mask: Optional[torch.Tensor] = None,
 | 
				
			||||||
 | 
					            position_ids: Optional[torch.LongTensor] = None,
 | 
				
			||||||
 | 
					            past_key_values: Optional[List[torch.FloatTensor]] = None,
 | 
				
			||||||
 | 
					            inputs_embeds: Optional[torch.FloatTensor] = None,
 | 
				
			||||||
 | 
					            use_cache: Optional[bool] = None,
 | 
				
			||||||
 | 
					            output_attentions: Optional[bool] = None,
 | 
				
			||||||
 | 
					            output_hidden_states: Optional[bool] = None,
 | 
				
			||||||
 | 
					            return_dict: Optional[bool] = None,
 | 
				
			||||||
 | 
					    ) -> Union[Tuple, BaseModelOutputWithPast]:
 | 
				
			||||||
 | 
					        output_attentions = output_attentions if output_attentions is not None \
 | 
				
			||||||
 | 
					            else self.config.output_attentions
 | 
				
			||||||
 | 
					        output_hidden_states = (
 | 
				
			||||||
 | 
					            output_hidden_states if output_hidden_states is not None
 | 
				
			||||||
 | 
					            else self.config.output_hidden_states
 | 
				
			||||||
 | 
					        )
 | 
				
			||||||
 | 
					        use_cache = use_cache if use_cache is not None else self.config.use_cache
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        # retrieve input_ids and inputs_embeds
 | 
				
			||||||
 | 
					        if input_ids is not None and inputs_embeds is not None:
 | 
				
			||||||
 | 
					            logger.error(
 | 
				
			||||||
 | 
					                "You cannot specify both decoder_input_ids "
 | 
				
			||||||
 | 
					                "and decoder_inputs_embeds at the same time")
 | 
				
			||||||
 | 
					        elif input_ids is not None:
 | 
				
			||||||
 | 
					            batch_size, seq_length = input_ids.shape
 | 
				
			||||||
 | 
					        elif inputs_embeds is not None:
 | 
				
			||||||
 | 
					            batch_size, seq_length, _ = inputs_embeds.shape
 | 
				
			||||||
 | 
					        else:
 | 
				
			||||||
 | 
					            logger.error(
 | 
				
			||||||
 | 
					                "You have to specify either decoder_input_ids or decoder_inputs_embeds")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        seq_length_with_past = seq_length
 | 
				
			||||||
 | 
					        past_key_values_length = 0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if past_key_values is not None:
 | 
				
			||||||
 | 
					            past_key_values_length = past_key_values[0][0].shape[2]
 | 
				
			||||||
 | 
					            seq_length_with_past = seq_length_with_past + past_key_values_length
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if position_ids is None:
 | 
				
			||||||
 | 
					            device = input_ids.device if input_ids is not None else inputs_embeds.device
 | 
				
			||||||
 | 
					            position_ids = torch.arange(
 | 
				
			||||||
 | 
					                past_key_values_length, seq_length + past_key_values_length, dtype=torch.long,
 | 
				
			||||||
 | 
					                device=device
 | 
				
			||||||
 | 
					            )
 | 
				
			||||||
 | 
					            position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
 | 
				
			||||||
 | 
					        else:
 | 
				
			||||||
 | 
					            position_ids = position_ids.view(-1, seq_length).long()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if inputs_embeds is None:
 | 
				
			||||||
 | 
					            inputs_embeds = self.embed_tokens(input_ids)
 | 
				
			||||||
 | 
					        # embed positions
 | 
				
			||||||
 | 
					        if attention_mask is None:
 | 
				
			||||||
 | 
					            attention_mask = torch.ones(
 | 
				
			||||||
 | 
					                (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
 | 
				
			||||||
 | 
					            )
 | 
				
			||||||
 | 
					        attention_mask = self._prepare_decoder_attention_mask(
 | 
				
			||||||
 | 
					            attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
 | 
				
			||||||
 | 
					        )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        hidden_states = inputs_embeds
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if self.gradient_checkpointing and self.training:
 | 
				
			||||||
 | 
					            if use_cache:
 | 
				
			||||||
 | 
					                logger.warning_once(
 | 
				
			||||||
 | 
					                    "`use_cache=True` is incompatible with "
 | 
				
			||||||
 | 
					                    "gradient checkpointing. Setting `use_cache=False`..."
 | 
				
			||||||
 | 
					                )
 | 
				
			||||||
 | 
					                use_cache = False
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        # decoder layers
 | 
				
			||||||
 | 
					        all_hidden_states = () if output_hidden_states else None
 | 
				
			||||||
 | 
					        all_self_attns = () if output_attentions else None
 | 
				
			||||||
 | 
					        next_decoder_cache = () if use_cache else None
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        for idx, decoder_layer in enumerate(self.layers):
 | 
				
			||||||
 | 
					            if output_hidden_states:
 | 
				
			||||||
 | 
					                all_hidden_states += (hidden_states,)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            past_key_value = past_key_values[idx] if past_key_values is not None else None
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            if self.gradient_checkpointing and self.training:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					                def create_custom_forward(module):
 | 
				
			||||||
 | 
					                    def custom_forward(*inputs):
 | 
				
			||||||
 | 
					                        # None for past_key_value
 | 
				
			||||||
 | 
					                        return module(*inputs, output_attentions, None)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					                    return custom_forward
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					                layer_outputs = torch.utils.checkpoint.checkpoint(
 | 
				
			||||||
 | 
					                    create_custom_forward(decoder_layer),
 | 
				
			||||||
 | 
					                    hidden_states,
 | 
				
			||||||
 | 
					                    attention_mask,
 | 
				
			||||||
 | 
					                    position_ids,
 | 
				
			||||||
 | 
					                    None,
 | 
				
			||||||
 | 
					                )
 | 
				
			||||||
 | 
					            else:
 | 
				
			||||||
 | 
					                layer_outputs = decoder_layer(
 | 
				
			||||||
 | 
					                    hidden_states,
 | 
				
			||||||
 | 
					                    attention_mask=attention_mask,
 | 
				
			||||||
 | 
					                    position_ids=position_ids,
 | 
				
			||||||
 | 
					                    past_key_value=past_key_value,
 | 
				
			||||||
 | 
					                    output_attentions=output_attentions,
 | 
				
			||||||
 | 
					                    use_cache=use_cache,
 | 
				
			||||||
 | 
					                )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            hidden_states = layer_outputs[0]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            if use_cache:
 | 
				
			||||||
 | 
					                next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            if output_attentions:
 | 
				
			||||||
 | 
					                all_self_attns += (layer_outputs[1],)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        hidden_states = self.norm(hidden_states)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        # add hidden states from the last decoder layer
 | 
				
			||||||
 | 
					        if output_hidden_states:
 | 
				
			||||||
 | 
					            all_hidden_states += (hidden_states,)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        next_cache = next_decoder_cache if use_cache else None
 | 
				
			||||||
 | 
					        if not return_dict:
 | 
				
			||||||
 | 
					            return tuple(
 | 
				
			||||||
 | 
					                v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if
 | 
				
			||||||
 | 
					                v is not None)
 | 
				
			||||||
 | 
					        return BaseModelOutputWithPast(
 | 
				
			||||||
 | 
					            last_hidden_state=hidden_states,
 | 
				
			||||||
 | 
					            past_key_values=next_cache,
 | 
				
			||||||
 | 
					            hidden_states=all_hidden_states,
 | 
				
			||||||
 | 
					            attentions=all_self_attns,
 | 
				
			||||||
 | 
					        )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					class BaiChuanForCausalLM(PreTrainedModel):
 | 
				
			||||||
 | 
					    def __init__(self, config):
 | 
				
			||||||
 | 
					        super().__init__(config)
 | 
				
			||||||
 | 
					        self.model = Model(config)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        # Initialize weights and apply final processing
 | 
				
			||||||
 | 
					        self.post_init()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def get_input_embeddings(self):
 | 
				
			||||||
 | 
					        return self.model.embed_tokens
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def set_input_embeddings(self, value):
 | 
				
			||||||
 | 
					        self.model.embed_tokens = value
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def get_output_embeddings(self):
 | 
				
			||||||
 | 
					        return self.lm_head
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def set_output_embeddings(self, new_embeddings):
 | 
				
			||||||
 | 
					        self.lm_head = new_embeddings
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def set_decoder(self, decoder):
 | 
				
			||||||
 | 
					        self.model = decoder
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def get_decoder(self):
 | 
				
			||||||
 | 
					        return self.model
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def forward(
 | 
				
			||||||
 | 
					            self,
 | 
				
			||||||
 | 
					            input_ids: torch.LongTensor = None,
 | 
				
			||||||
 | 
					            attention_mask: Optional[torch.Tensor] = None,
 | 
				
			||||||
 | 
					            position_ids: Optional[torch.LongTensor] = None,
 | 
				
			||||||
 | 
					            past_key_values: Optional[List[torch.FloatTensor]] = None,
 | 
				
			||||||
 | 
					            inputs_embeds: Optional[torch.FloatTensor] = None,
 | 
				
			||||||
 | 
					            labels: Optional[torch.LongTensor] = None,
 | 
				
			||||||
 | 
					            use_cache: Optional[bool] = None,
 | 
				
			||||||
 | 
					            output_attentions: Optional[bool] = None,
 | 
				
			||||||
 | 
					            output_hidden_states: Optional[bool] = None,
 | 
				
			||||||
 | 
					            return_dict: Optional[bool] = None,
 | 
				
			||||||
 | 
					    ) -> Union[Tuple, CausalLMOutputWithPast]:
 | 
				
			||||||
 | 
					        r"""
 | 
				
			||||||
 | 
					        Args:
 | 
				
			||||||
 | 
					            labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
 | 
				
			||||||
 | 
					                Labels for computing the masked language modeling loss.
 | 
				
			||||||
 | 
					                 Indices should either be in `[0, ...,
 | 
				
			||||||
 | 
					                config.vocab_size]` or -100 (see `input_ids` docstring).
 | 
				
			||||||
 | 
					                 Tokens with indices set to `-100` are ignored
 | 
				
			||||||
 | 
					                (masked), the loss is only computed for the tokens
 | 
				
			||||||
 | 
					                with labels in `[0, ..., config.vocab_size]`.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        Returns:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        Example:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        ```python
 | 
				
			||||||
 | 
					        >>> from transformers import AutoTokenizer, ModelForCausalLM
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        >>> model = ModelForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
 | 
				
			||||||
 | 
					        >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        >>> prompt = "Hey, are you consciours? Can you talk to me?"
 | 
				
			||||||
 | 
					        >>> inputs = tokenizer(prompt, return_tensors="pt")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        >>> # Generate
 | 
				
			||||||
 | 
					        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
 | 
				
			||||||
 | 
					        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True,
 | 
				
			||||||
 | 
					        clean_up_tokenization_spaces=False)[0]
 | 
				
			||||||
 | 
					        "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
 | 
				
			||||||
 | 
					        ```"""
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        output_attentions = output_attentions if output_attentions is not None else \
 | 
				
			||||||
 | 
					            self.config.output_attentions
 | 
				
			||||||
 | 
					        output_hidden_states = (
 | 
				
			||||||
 | 
					            output_hidden_states if output_hidden_states is not None
 | 
				
			||||||
 | 
					            else self.config.output_hidden_states
 | 
				
			||||||
 | 
					        )
 | 
				
			||||||
 | 
					        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
 | 
				
			||||||
 | 
					        outputs = self.model(
 | 
				
			||||||
 | 
					            input_ids=input_ids,
 | 
				
			||||||
 | 
					            attention_mask=attention_mask,
 | 
				
			||||||
 | 
					            position_ids=position_ids,
 | 
				
			||||||
 | 
					            past_key_values=past_key_values,
 | 
				
			||||||
 | 
					            inputs_embeds=inputs_embeds,
 | 
				
			||||||
 | 
					            use_cache=use_cache,
 | 
				
			||||||
 | 
					            output_attentions=output_attentions,
 | 
				
			||||||
 | 
					            output_hidden_states=output_hidden_states,
 | 
				
			||||||
 | 
					            return_dict=return_dict,
 | 
				
			||||||
 | 
					        )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        hidden_states = outputs[0]
 | 
				
			||||||
 | 
					        logits = self.lm_head(hidden_states)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        loss = None
 | 
				
			||||||
 | 
					        if labels is not None:
 | 
				
			||||||
 | 
					            # Shift so that tokens < n predict n
 | 
				
			||||||
 | 
					            shift_logits = logits[..., :-1, :].contiguous()
 | 
				
			||||||
 | 
					            shift_labels = labels[..., 1:].contiguous()
 | 
				
			||||||
 | 
					            # Flatten the tokens
 | 
				
			||||||
 | 
					            loss_fct = CrossEntropyLoss()
 | 
				
			||||||
 | 
					            shift_logits = shift_logits.view(-1, self.config.vocab_size)
 | 
				
			||||||
 | 
					            shift_labels = shift_labels.view(-1)
 | 
				
			||||||
 | 
					            # Enable model parallelism
 | 
				
			||||||
 | 
					            shift_labels = shift_labels.to(shift_logits.device)
 | 
				
			||||||
 | 
					            loss = loss_fct(shift_logits, shift_labels)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if not return_dict:
 | 
				
			||||||
 | 
					            output = (logits,) + outputs[1:]
 | 
				
			||||||
 | 
					            return (loss,) + output if loss is not None else output
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        return CausalLMOutputWithPast(
 | 
				
			||||||
 | 
					            loss=loss,
 | 
				
			||||||
 | 
					            logits=logits,
 | 
				
			||||||
 | 
					            past_key_values=outputs.past_key_values,
 | 
				
			||||||
 | 
					            hidden_states=outputs.hidden_states,
 | 
				
			||||||
 | 
					            attentions=outputs.attentions,
 | 
				
			||||||
 | 
					        )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def prepare_inputs_for_generation(
 | 
				
			||||||
 | 
					            self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
 | 
				
			||||||
 | 
					    ):
 | 
				
			||||||
 | 
					        if past_key_values:
 | 
				
			||||||
 | 
					            input_ids = input_ids[:, -1:]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        position_ids = kwargs.get("position_ids", None)
 | 
				
			||||||
 | 
					        if attention_mask is not None and position_ids is None:
 | 
				
			||||||
 | 
					            # create position_ids on the fly for batch generation
 | 
				
			||||||
 | 
					            position_ids = attention_mask.long().cumsum(-1) - 1
 | 
				
			||||||
 | 
					            position_ids.masked_fill_(attention_mask == 0, 1)
 | 
				
			||||||
 | 
					            if past_key_values:
 | 
				
			||||||
 | 
					                position_ids = position_ids[:, -1].unsqueeze(-1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
 | 
				
			||||||
 | 
					        if inputs_embeds is not None and past_key_values is None:
 | 
				
			||||||
 | 
					            model_inputs = {"inputs_embeds": inputs_embeds}
 | 
				
			||||||
 | 
					        else:
 | 
				
			||||||
 | 
					            model_inputs = {"input_ids": input_ids}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        model_inputs.update(
 | 
				
			||||||
 | 
					            {
 | 
				
			||||||
 | 
					                "position_ids": position_ids,
 | 
				
			||||||
 | 
					                "past_key_values": past_key_values,
 | 
				
			||||||
 | 
					                "use_cache": kwargs.get("use_cache"),
 | 
				
			||||||
 | 
					                "attention_mask": attention_mask,
 | 
				
			||||||
 | 
					            }
 | 
				
			||||||
 | 
					        )
 | 
				
			||||||
 | 
					        return model_inputs
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    @staticmethod
 | 
				
			||||||
 | 
					    def _reorder_cache(past_key_values, beam_idx):
 | 
				
			||||||
 | 
					        reordered_past = ()
 | 
				
			||||||
 | 
					        for layer_past in past_key_values:
 | 
				
			||||||
 | 
					            reordered_past += (
 | 
				
			||||||
 | 
					                tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),
 | 
				
			||||||
 | 
					            )
 | 
				
			||||||
 | 
					        return reordered_past
 | 
				
			||||||
| 
						 | 
					@ -0,0 +1,262 @@
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Copyright 2016 The BigDL Authors.
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
 | 
				
			||||||
 | 
					# and OPT implementations in this library. It has been modified from its
 | 
				
			||||||
 | 
					# original forms to accommodate minor architectural differences compared
 | 
				
			||||||
 | 
					# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||||
 | 
					# you may not use this file except in compliance with the License.
 | 
				
			||||||
 | 
					# You may obtain a copy of the License at
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					#     http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Unless required by applicable law or agreed to in writing, software
 | 
				
			||||||
 | 
					# distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||||
 | 
					# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||||
 | 
					# See the License for the specific language governing permissions and
 | 
				
			||||||
 | 
					# limitations under the License.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import os
 | 
				
			||||||
 | 
					from shutil import copyfile
 | 
				
			||||||
 | 
					from typing import Any, Dict, List, Optional, Tuple
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import sentencepiece as spm
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
 | 
				
			||||||
 | 
					from transformers.utils import logging
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					logger = logging.get_logger(__name__)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					PRETRAINED_VOCAB_FILES_MAP = {
 | 
				
			||||||
 | 
					    "vocab_file": {},
 | 
				
			||||||
 | 
					    "tokenizer_file": {},
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					class BaiChuanTokenizer(PreTrainedTokenizer):
 | 
				
			||||||
 | 
					    """
 | 
				
			||||||
 | 
					    Construct a BaiChuan tokenizer. Based on byte-level Byte-Pair-Encoding.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    Args:
 | 
				
			||||||
 | 
					        vocab_file (`str`):
 | 
				
			||||||
 | 
					            Path to the vocabulary file.
 | 
				
			||||||
 | 
					    """
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    vocab_files_names = VOCAB_FILES_NAMES
 | 
				
			||||||
 | 
					    pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
 | 
				
			||||||
 | 
					    max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
 | 
				
			||||||
 | 
					    model_input_names = ["input_ids", "attention_mask"]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def __init__(
 | 
				
			||||||
 | 
					        self,
 | 
				
			||||||
 | 
					        vocab_file,
 | 
				
			||||||
 | 
					        unk_token="<unk>",
 | 
				
			||||||
 | 
					        bos_token="<s>",
 | 
				
			||||||
 | 
					        eos_token="</s>",
 | 
				
			||||||
 | 
					        pad_token=None,
 | 
				
			||||||
 | 
					        sp_model_kwargs: Optional[Dict[str, Any]]=None,
 | 
				
			||||||
 | 
					        add_bos_token=True,
 | 
				
			||||||
 | 
					        add_eos_token=False,
 | 
				
			||||||
 | 
					        clean_up_tokenization_spaces=False,
 | 
				
			||||||
 | 
					        **kwargs,
 | 
				
			||||||
 | 
					    ):
 | 
				
			||||||
 | 
					        self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
 | 
				
			||||||
 | 
					        bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) \
 | 
				
			||||||
 | 
					            if isinstance(bos_token, str) else bos_token
 | 
				
			||||||
 | 
					        eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) \
 | 
				
			||||||
 | 
					            if isinstance(eos_token, str) else eos_token
 | 
				
			||||||
 | 
					        unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) \
 | 
				
			||||||
 | 
					            if isinstance(unk_token, str) else unk_token
 | 
				
			||||||
 | 
					        pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) \
 | 
				
			||||||
 | 
					            if isinstance(pad_token, str) else pad_token
 | 
				
			||||||
 | 
					        self.vocab_file = vocab_file
 | 
				
			||||||
 | 
					        self.add_bos_token = add_bos_token
 | 
				
			||||||
 | 
					        self.add_eos_token = add_eos_token
 | 
				
			||||||
 | 
					        self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
 | 
				
			||||||
 | 
					        self.sp_model.Load(vocab_file)
 | 
				
			||||||
 | 
					        super().__init__(
 | 
				
			||||||
 | 
					            bos_token=bos_token,
 | 
				
			||||||
 | 
					            eos_token=eos_token,
 | 
				
			||||||
 | 
					            unk_token=unk_token,
 | 
				
			||||||
 | 
					            pad_token=pad_token,
 | 
				
			||||||
 | 
					            add_bos_token=add_bos_token,
 | 
				
			||||||
 | 
					            add_eos_token=add_eos_token,
 | 
				
			||||||
 | 
					            sp_model_kwargs=self.sp_model_kwargs,
 | 
				
			||||||
 | 
					            clean_up_tokenization_spaces=clean_up_tokenization_spaces,
 | 
				
			||||||
 | 
					            **kwargs,
 | 
				
			||||||
 | 
					        )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def __getstate__(self):
 | 
				
			||||||
 | 
					        state = self.__dict__.copy()
 | 
				
			||||||
 | 
					        state["sp_model"] = None
 | 
				
			||||||
 | 
					        return state
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def __setstate__(self, d):
 | 
				
			||||||
 | 
					        self.__dict__ = d
 | 
				
			||||||
 | 
					        self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
 | 
				
			||||||
 | 
					        self.sp_model.Load(self.vocab_file)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    @property
 | 
				
			||||||
 | 
					    def vocab_size(self):
 | 
				
			||||||
 | 
					        """Returns vocab size"""
 | 
				
			||||||
 | 
					        return self.sp_model.get_piece_size()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def get_vocab(self):
 | 
				
			||||||
 | 
					        """Returns vocab as a dict"""
 | 
				
			||||||
 | 
					        vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
 | 
				
			||||||
 | 
					        vocab.update(self.added_tokens_encoder)
 | 
				
			||||||
 | 
					        return vocab
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def _tokenize(self, text):
 | 
				
			||||||
 | 
					        """Returns a tokenized string."""
 | 
				
			||||||
 | 
					        return self.sp_model.encode(text, out_type=str)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def _convert_token_to_id(self, token):
 | 
				
			||||||
 | 
					        """Converts a token (str) in an id using the vocab."""
 | 
				
			||||||
 | 
					        return self.sp_model.piece_to_id(token)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def _convert_id_to_token(self, index):
 | 
				
			||||||
 | 
					        """Converts an index (integer) in a token (str) using the vocab."""
 | 
				
			||||||
 | 
					        token = self.sp_model.IdToPiece(index)
 | 
				
			||||||
 | 
					        return token
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def convert_tokens_to_string(self, tokens):
 | 
				
			||||||
 | 
					        """Converts a sequence of tokens (string) in a single string."""
 | 
				
			||||||
 | 
					        current_sub_tokens = []
 | 
				
			||||||
 | 
					        out_string = ""
 | 
				
			||||||
 | 
					        prev_is_special = False
 | 
				
			||||||
 | 
					        for i, token in enumerate(tokens):
 | 
				
			||||||
 | 
					            # make sure that special tokens are not decoded using sentencepiece model
 | 
				
			||||||
 | 
					            if token in self.all_special_tokens:
 | 
				
			||||||
 | 
					                if not prev_is_special and i != 0:
 | 
				
			||||||
 | 
					                    out_string += " "
 | 
				
			||||||
 | 
					                out_string += self.sp_model.decode(current_sub_tokens) + token
 | 
				
			||||||
 | 
					                prev_is_special = True
 | 
				
			||||||
 | 
					                current_sub_tokens = []
 | 
				
			||||||
 | 
					            else:
 | 
				
			||||||
 | 
					                current_sub_tokens.append(token)
 | 
				
			||||||
 | 
					                prev_is_special = False
 | 
				
			||||||
 | 
					        out_string += self.sp_model.decode(current_sub_tokens)
 | 
				
			||||||
 | 
					        return out_string
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
 | 
				
			||||||
 | 
					        """
 | 
				
			||||||
 | 
					        Save the vocabulary and special tokens file to a directory.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        Args:
 | 
				
			||||||
 | 
					            save_directory (`str`):
 | 
				
			||||||
 | 
					                The directory in which to save the vocabulary.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        Returns:
 | 
				
			||||||
 | 
					            `Tuple(str)`: Paths to the files saved.
 | 
				
			||||||
 | 
					        """
 | 
				
			||||||
 | 
					        if not os.path.isdir(save_directory):
 | 
				
			||||||
 | 
					            logger.error(f"Vocabulary path ({save_directory}) should be a directory")
 | 
				
			||||||
 | 
					            return
 | 
				
			||||||
 | 
					        out_vocab_file = os.path.join(
 | 
				
			||||||
 | 
					            save_directory,
 | 
				
			||||||
 | 
					            (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
 | 
				
			||||||
 | 
					        )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)
 | 
				
			||||||
 | 
					                and os.path.isfile(self.vocab_file)):
 | 
				
			||||||
 | 
					            copyfile(self.vocab_file, out_vocab_file)
 | 
				
			||||||
 | 
					        elif not os.path.isfile(self.vocab_file):
 | 
				
			||||||
 | 
					            with open(out_vocab_file, "wb") as fi:
 | 
				
			||||||
 | 
					                content_spiece_model = self.sp_model.serialized_model_proto()
 | 
				
			||||||
 | 
					                fi.write(content_spiece_model)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        return (out_vocab_file,)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
 | 
				
			||||||
 | 
					        bos_token_id = [self.bos_token_id] if self.add_bos_token else []
 | 
				
			||||||
 | 
					        eos_token_id = [self.eos_token_id] if self.add_eos_token else []
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        output = bos_token_id + token_ids_0 + eos_token_id
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if token_ids_1 is not None:
 | 
				
			||||||
 | 
					            output = output + bos_token_id + token_ids_1 + eos_token_id
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        return output
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def get_special_tokens_mask(
 | 
				
			||||||
 | 
					        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None,
 | 
				
			||||||
 | 
					            already_has_special_tokens: bool = False
 | 
				
			||||||
 | 
					    ) -> List[int]:
 | 
				
			||||||
 | 
					        """
 | 
				
			||||||
 | 
					        Retrieve sequence ids from a token list that has no special tokens added.
 | 
				
			||||||
 | 
					         This method is called when adding
 | 
				
			||||||
 | 
					        special tokens using the tokenizer `prepare_for_model` method.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        Args:
 | 
				
			||||||
 | 
					            token_ids_0 (`List[int]`):
 | 
				
			||||||
 | 
					                List of IDs.
 | 
				
			||||||
 | 
					            token_ids_1 (`List[int]`, *optional*):
 | 
				
			||||||
 | 
					                Optional second list of IDs for sequence pairs.
 | 
				
			||||||
 | 
					            already_has_special_tokens (`bool`, *optional*, defaults to `False`):
 | 
				
			||||||
 | 
					                Whether or not the token list is already formatted
 | 
				
			||||||
 | 
					                with special tokens for the model.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        Returns:
 | 
				
			||||||
 | 
					            `List[int]`: A list of integers in the range [0, 1]:
 | 
				
			||||||
 | 
					             1 for a special token, 0 for a sequence token.
 | 
				
			||||||
 | 
					        """
 | 
				
			||||||
 | 
					        if already_has_special_tokens:
 | 
				
			||||||
 | 
					            return super().get_special_tokens_mask(
 | 
				
			||||||
 | 
					                token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
 | 
				
			||||||
 | 
					            )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        bos_token_id = [1] if self.add_bos_token else []
 | 
				
			||||||
 | 
					        eos_token_id = [1] if self.add_eos_token else []
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if token_ids_1 is None:
 | 
				
			||||||
 | 
					            return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
 | 
				
			||||||
 | 
					        return (
 | 
				
			||||||
 | 
					            bos_token_id
 | 
				
			||||||
 | 
					            + ([0] * len(token_ids_0))
 | 
				
			||||||
 | 
					            + eos_token_id
 | 
				
			||||||
 | 
					            + bos_token_id
 | 
				
			||||||
 | 
					            + ([0] * len(token_ids_1))
 | 
				
			||||||
 | 
					            + eos_token_id
 | 
				
			||||||
 | 
					        )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def create_token_type_ids_from_sequences(
 | 
				
			||||||
 | 
					        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
 | 
				
			||||||
 | 
					    ) -> List[int]:
 | 
				
			||||||
 | 
					        """
 | 
				
			||||||
 | 
					        Creates a mask from the two sequences passed to
 | 
				
			||||||
 | 
					        be used in a sequence-pair classification task. An ALBERT
 | 
				
			||||||
 | 
					        sequence pair mask has the following format:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        ```
 | 
				
			||||||
 | 
					        0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
 | 
				
			||||||
 | 
					        | first sequence    | second sequence |
 | 
				
			||||||
 | 
					        ```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if token_ids_1 is None, only returns the first portion of the mask (0s).
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        Args:
 | 
				
			||||||
 | 
					            token_ids_0 (`List[int]`):
 | 
				
			||||||
 | 
					                List of ids.
 | 
				
			||||||
 | 
					            token_ids_1 (`List[int]`, *optional*):
 | 
				
			||||||
 | 
					                Optional second list of IDs for sequence pairs.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        Returns:
 | 
				
			||||||
 | 
					            `List[int]`: List of [token type IDs](../glossary#token-type-ids)
 | 
				
			||||||
 | 
					             according to the given sequence(s).
 | 
				
			||||||
 | 
					        """
 | 
				
			||||||
 | 
					        bos_token_id = [self.bos_token_id] if self.add_bos_token else []
 | 
				
			||||||
 | 
					        eos_token_id = [self.eos_token_id] if self.add_eos_token else []
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if token_ids_1 is not None:
 | 
				
			||||||
 | 
					            output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        return output
 | 
				
			||||||
		Loading…
	
		Reference in a new issue