# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import torch import time import argparse from ipex_llm.transformers import AutoModelForCausalLM from transformers import AutoTokenizer # you could tune the prompt based on your own model, # here the prompt tuning refers to https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1 DEFAULT_SYSTEM_PROMPT = """\ """ def get_prompt(user_input: str, chat_history: list[tuple[str, str]], system_prompt: str) -> str: prompt_texts = [f'<|begin_of_text|>'] if system_prompt != '': prompt_texts.append(f'<|start_header_id|>system<|end_header_id|>\n\n{system_prompt}<|eot_id|>') for history_input, history_response in chat_history: prompt_texts.append(f'<|start_header_id|>user<|end_header_id|>\n\n{history_input.strip()}<|eot_id|>') prompt_texts.append(f'<|start_header_id|>assistant<|end_header_id|>\n\n{history_response.strip()}<|eot_id|>') prompt_texts.append(f'<|start_header_id|>user<|end_header_id|>\n\n{user_input.strip()}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n') return ''.join(prompt_texts) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Llama3.1 model') parser.add_argument('--repo-id-or-model-path', type=str, default="meta-llama/Meta-Llama-3.1-8B-Instruct", help='The huggingface repo id for the Llama3 (e.g. `meta-llama/Meta-Llama-3.1-8B-Instruct`) to be downloaded' ', or the path to the huggingface checkpoint folder') parser.add_argument('--prompt', type=str, default="What is AI?", help='Prompt to infer') parser.add_argument('--n-predict', type=int, default=32, help='Max tokens to predict') args = parser.parse_args() model_path = args.repo_id_or_model_path # Load model in 4 bit, # which convert the relevant layers in the model into INT4 format model = AutoModelForCausalLM.from_pretrained(model_path, load_in_4bit=True, optimize_model=True, trust_remote_code=True, use_cache=True) # Load tokenizer tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) # Generate predicted tokens with torch.inference_mode(): prompt = get_prompt(args.prompt, [], system_prompt=DEFAULT_SYSTEM_PROMPT) input_ids = tokenizer.encode(prompt, return_tensors="pt") st = time.time() output = model.generate(input_ids, max_new_tokens=args.n_predict) end = time.time() output_str = tokenizer.decode(output[0], skip_special_tokens=False) print(f'Inference time: {end-st} s') print('-'*20, 'Prompt', '-'*20) print(prompt) print('-'*20, 'Output (skip_special_tokens=False)', '-'*20) print(output_str)