* Add new example Deepseek * Add new example Deepseek * Add new example Deepseek * Add new example Deepseek * Add new example Deepseek * modify deepseek * modify deepseek * Add verified model in README * Turn cpu_embedding=True in Deepseek example --------- Co-authored-by: Shengsheng Huang <shengsheng.huang@intel.com>
		
			
				
	
	
		
			73 lines
		
	
	
	
		
			3.4 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			73 lines
		
	
	
	
		
			3.4 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
#
 | 
						|
# Copyright 2016 The BigDL Authors.
 | 
						|
#
 | 
						|
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
						|
# you may not use this file except in compliance with the License.
 | 
						|
# You may obtain a copy of the License at
 | 
						|
#
 | 
						|
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
						|
#
 | 
						|
# Unless required by applicable law or agreed to in writing, software
 | 
						|
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
						|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
						|
# See the License for the specific language governing permissions and
 | 
						|
# limitations under the License.
 | 
						|
#
 | 
						|
 | 
						|
import torch
 | 
						|
import time
 | 
						|
import argparse
 | 
						|
 | 
						|
from bigdl.llm.transformers import AutoModelForCausalLM
 | 
						|
from transformers import AutoTokenizer
 | 
						|
 | 
						|
# you could tune the prompt based on your own model,
 | 
						|
# Refer to https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-instruct
 | 
						|
PROMPT_FORMAT = """
 | 
						|
You are an AI programming assistant, utilizing the DeepSeek Coder model, developed by DeepSeek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.
 | 
						|
### Instruction:
 | 
						|
{prompt}
 | 
						|
### Response:
 | 
						|
"""
 | 
						|
 | 
						|
if __name__ == '__main__':
 | 
						|
    parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for deepseek model')
 | 
						|
    parser.add_argument('--repo-id-or-model-path', type=str, default="deepseek-ai/deepseek-coder-6.7b-instruct",
 | 
						|
                        help='The huggingface repo id for the deepseek (e.g. `deepseek-ai/deepseek-coder-6.7b-instruct`) to be downloaded'
 | 
						|
                             ', or the path to the huggingface checkpoint folder')
 | 
						|
    parser.add_argument('--prompt', type=str, default="What is AI?",
 | 
						|
                        help='Prompt to infer')
 | 
						|
    parser.add_argument('--n-predict', type=int, default=32,
 | 
						|
                        help='Max tokens to predict')
 | 
						|
 | 
						|
    args = parser.parse_args()
 | 
						|
    model_path = args.repo_id_or_model_path
 | 
						|
 | 
						|
    # Load model in 4 bit,
 | 
						|
    # which convert the relevant layers in the model into INT4 format
 | 
						|
    model = AutoModelForCausalLM.from_pretrained(model_path,
 | 
						|
                                                 load_in_4bit=True,
 | 
						|
                                                 trust_remote_code=True)
 | 
						|
 | 
						|
    # Load tokenizer
 | 
						|
    tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
						|
    
 | 
						|
    # Generate predicted tokens
 | 
						|
    with torch.inference_mode():
 | 
						|
        prompt = PROMPT_FORMAT.format(prompt=args.prompt)
 | 
						|
        input_ids = tokenizer.encode(prompt, return_tensors="pt")
 | 
						|
        # inputs_ids = tokenizer.apply_chat_template(prompt, add_generation_prompt=True, return_tensors="pt")
 | 
						|
        st = time.time()
 | 
						|
        # if your selected model is capable of utilizing previous key/value attentions
 | 
						|
        # to enhance decoding speed, but has `"use_cache": false` in its model config,
 | 
						|
        # it is important to set `use_cache=True` explicitly in the `generate` function
 | 
						|
        # to obtain optimal performance with BigDL-LLM INT4 optimizations
 | 
						|
        output = model.generate(input_ids,
 | 
						|
                                max_new_tokens=args.n_predict)
 | 
						|
        end = time.time()
 | 
						|
        output_str = tokenizer.decode(output[0], skip_special_tokens=True)
 | 
						|
        print(f'Inference time: {end-st} s')
 | 
						|
        print('-'*20, 'Prompt', '-'*20)
 | 
						|
        print(prompt)
 | 
						|
        print('-'*20, 'Output', '-'*20)
 | 
						|
        print(output_str)
 |