diff --git a/python/llm/example/gpu/hf-transformers-models/aquila/README.md b/python/llm/example/gpu/hf-transformers-models/aquila/README.md new file mode 100644 index 00000000..56a8fc6e --- /dev/null +++ b/python/llm/example/gpu/hf-transformers-models/aquila/README.md @@ -0,0 +1,57 @@ +# Aquila + +In this directory, you will find examples on how you could apply BigDL-LLM INT4 optimizations on Aquila models. For illustration purposes, we utilize the [BAAI/AquilaChat-7B](https://huggingface.co/BAAI/AquilaChat-7B) as a reference Aquila model. + +> **Note**: If you want to download the Hugging Face *Transformers* model, please refer to [here](https://huggingface.co/docs/hub/models-downloading#using-git). +> +> BigDL-LLM optimizes the *Transformers* model in INT4 precision at runtime, and thus no explicit conversion is needed. + +## Requirements +To run these examples with BigDL-LLM, we have some recommended requirements for your machine, please refer to [here](../README.md#recommended-requirements) for more information. + +## Example: Predict Tokens using `generate()` API +In the example [generate.py](./generate.py), we show a basic use case for a Aquila model to predict the next N tokens using `generate()` API, with BigDL-LLM INT4 optimizations. + +### 1. Install +We suggest using conda to manage environment: +```bash +conda create -n llm python=3.9 +conda activate llm +# below command will install intel_extension_for_pytorch==2.0.110+xpu as default +# you can install specific ipex/torch version for your need +pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu +``` +### 2. Configures OneAPI environment variables +```bash +source /opt/intel/oneapi/setvars.sh +``` + +### 3. Run + +For optimal performance on Arc, it is recommended to set several environment variables. + +```bash +export USE_XETLA=OFF +export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 +``` + +``` +python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROMPT --n-predict N_PREDICT +``` + +Arguments Info +In the example, several arguments can be passed to satisfy your requirements: + +- `--repo-id-or-model-path`: str, argument defining the huggingface repo id for the Aquila model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'BAAI/AquilaChat-7B'`. +- `--prompt`: str, argument defining the prompt to be inferred (with integrated prompt format for chat). It is default to be `'AI是什么?'`. +- `--n-predict`: int, argument defining the max number of tokens to predict. It is default to be `32`. + +#### Sample Output +#### [BAAI/AquilaChat-7B](https://huggingface.co/BAAI/AquilaChat-7B) +```log +Inference time: xxxx s +-------------------- Prompt -------------------- +Human: AI是什么?###Assistant: +-------------------- Output -------------------- +Human: AI是什么?###Assistant: AI是人工智能的缩写。人工智能是一种技术,旨在使计算机能够像人类一样思考、学习和执行任务。AI包括许多不同的技术和方法,例如机器 +``` diff --git a/python/llm/example/gpu/hf-transformers-models/aquila/generate.py b/python/llm/example/gpu/hf-transformers-models/aquila/generate.py new file mode 100644 index 00000000..d070a75c --- /dev/null +++ b/python/llm/example/gpu/hf-transformers-models/aquila/generate.py @@ -0,0 +1,73 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import torch +import intel_extension_for_pytorch as ipex +import time +import argparse + +from bigdl.llm.transformers import AutoModelForCausalLM +from transformers import AutoTokenizer + +# you could tune the prompt based on your own model, +# here the prompt tuning refers to https://huggingface.co/BAAI/AquilaChat-7B/blob/13577616fd4ff0d21c5735a88d350a68dae120e5/cyg_conversation.py +AQUILA_PROMPT_FORMAT = "Human: {prompt}###Assistant:" + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Predict Tokens using `generate()` API for Aquila model') + parser.add_argument('--repo-id-or-model-path', type=str, default="BAAI/AquilaChat-7B", + help='The huggingface repo id for the Aquila model to be downloaded' + ', or the path to the huggingface checkpoint folder') + parser.add_argument('--prompt', type=str, default="AI是什么?", + help='Prompt to infer') + parser.add_argument('--n-predict', type=int, default=32, + help='Max tokens to predict') + + args = parser.parse_args() + model_path = args.repo_id_or_model_path + + # Load model in 4 bit, + # which convert the relevant layers in the model into INT4 format + model = AutoModelForCausalLM.from_pretrained(model_path, + load_in_4bit=True, + trust_remote_code=True) + model = model.to('xpu') + + # Load tokenizer + tokenizer = AutoTokenizer.from_pretrained(model_path, + trust_remote_code=True) + + # Generate predicted tokens + with torch.inference_mode(): + prompt = AQUILA_PROMPT_FORMAT.format(prompt=args.prompt) + input_ids = tokenizer.encode(prompt, return_tensors="pt").to('xpu') + st = time.time() + # if your selected model is capable of utilizing previous key/value attentions + # to enhance decoding speed, but has `"use_cache": false` in its model config, + # it is important to set `use_cache=True` explicitly in the `generate` function + # to obtain optimal performance with BigDL-LLM INT4 optimizations + output = model.generate(input_ids, + max_new_tokens=args.n_predict) + torch.xpu.synchronize() + end = time.time() + output = output.cpu() + output_str = tokenizer.decode(output[0], skip_special_tokens=True) + print(f'Inference time: {end - st} s') + print('-' * 20, 'Prompt', '-' * 20) + print(prompt) + print('-' * 20, 'Output', '-' * 20) + print(output_str) diff --git a/python/llm/example/gpu/hf-transformers-models/dolly-v1/README.md b/python/llm/example/gpu/hf-transformers-models/dolly-v1/README.md new file mode 100644 index 00000000..d1a1be39 --- /dev/null +++ b/python/llm/example/gpu/hf-transformers-models/dolly-v1/README.md @@ -0,0 +1,67 @@ +# Dolly v1 +In this directory, you will find examples on how you could apply BigDL-LLM INT4 optimizations on Dolly v1 models. For illustration purposes, we utilize the [databricks/dolly-v1-6b](https://huggingface.co/databricks/dolly-v1-6b) as a reference Dolly v1 model. + +## 0. Requirements +To run these examples with BigDL-LLM, we have some recommended requirements for your machine, please refer to [here](../README.md#recommended-requirements) for more information. + +## Example: Predict Tokens using `generate()` API +In the example [generate.py](./generate.py), we show a basic use case for a Dolly v1 model to predict the next N tokens using `generate()` API, with BigDL-LLM INT4 optimizations. + + +### 1. Install +We suggest using conda to manage environment: +```bash +conda create -n llm python=3.9 +conda activate llm +# below command will install intel_extension_for_pytorch==2.0.110+xpu as default +# you can install specific ipex/torch version for your need +pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu +``` +### 2. Configures OneAPI environment variables +```bash +source /opt/intel/oneapi/setvars.sh +``` + +### 3. Run + +For optimal performance on Arc, it is recommended to set several environment variables. + +```bash +export USE_XETLA=OFF +export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 +``` + +``` +python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROMPT --n-predict N_PREDICT +``` + +Arguments info: +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the Dolly v1 model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'databricks/dolly-v1-6b'`. +- `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'What is AI?'`. +- `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. + +> **Note**: When loading the model in 4-bit, BigDL-LLM converts linear layers in the model into INT4 format. In theory, a *X*B model saved in 16-bit will requires approximately 2*X* GB of memory for loading, and ~0.5*X* GB memory for further inference. +> +> Please select the appropriate size of the Dolly v1 model based on the capabilities of your machine. + +#### Sample Output +#### [databricks/dolly-v1-6b](https://huggingface.co/databricks/dolly-v1-6b) +```log +Inference time: xxxx s +-------------------- Prompt -------------------- +Below is an instruction that describes a task. Write a response that appropriately completes the request. + +### Instruction: +What is AI? + +### Response: + +-------------------- Output -------------------- +Below is an instruction that describes a task. Write a response that appropriately completes the request. + +### Instruction: +What is AI? + +### Response: +AI is an umbrella term for a variety of technologies that enable computers to think and act like humans. AI can be used to automate tasks, analyze data, and +``` diff --git a/python/llm/example/gpu/hf-transformers-models/dolly-v1/generate.py b/python/llm/example/gpu/hf-transformers-models/dolly-v1/generate.py new file mode 100644 index 00000000..75c058b3 --- /dev/null +++ b/python/llm/example/gpu/hf-transformers-models/dolly-v1/generate.py @@ -0,0 +1,85 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import torch +import intel_extension_for_pytorch as ipex +import time +import argparse +import numpy as np + +from bigdl.llm.transformers import AutoModelForCausalLM +from transformers import AutoTokenizer + +# you could tune the prompt based on your own model, +# here the prompt tuning refers to https://huggingface.co/databricks/dolly-v1-6b#generate-text +DOLLY_V1_PROMPT_FORMAT = """Below is an instruction that describes a task. Write a response that appropriately completes the request. + +### Instruction: +{prompt} + +### Response: +""" + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Dolly v1 model') + parser.add_argument('--repo-id-or-model-path', type=str, default="databricks/dolly-v1-6b", + help='The huggingface repo id for the Dolly v1 model to be downloaded' + ', or the path to the huggingface checkpoint folder') + parser.add_argument('--prompt', type=str, default="What is AI?", + help='Prompt to infer') + parser.add_argument('--n-predict', type=int, default=32, + help='Max tokens to predict') + + args = parser.parse_args() + model_path = args.repo_id_or_model_path + + # Load model in 4 bit, + # which convert the relevant layers in the model into INT4 format + model = AutoModelForCausalLM.from_pretrained(model_path, + load_in_4bit=True) + model = model.to('xpu') + + # Load tokenizer + tokenizer = AutoTokenizer.from_pretrained(model_path) + + # Generate predicted tokens + with torch.inference_mode(): + prompt = DOLLY_V1_PROMPT_FORMAT.format(prompt=args.prompt) + input_ids = tokenizer.encode(prompt, return_tensors="pt").to('xpu') + end_key_token_id=tokenizer.encode("### End")[0] + st = time.time() + # enabling `use_cache=True` allows the model to utilize the previous + # key/values attentions to speed up decoding; + # to obtain optimal performance with BigDL-LLM INT4 optimizations, + # it is important to set use_cache=True for Dolly v1 models + output = model.generate(input_ids, + use_cache=True, + max_new_tokens=args.n_predict, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=end_key_token_id) + torch.xpu.synchronize() + end = time.time() + output = output.cpu() + end_token_position = None + end_token_positions = np.where(output[0] == end_key_token_id)[0] + if len(end_token_positions) > 0: + end_token_position = end_token_positions[0] + output_str = tokenizer.decode(output[0][:end_token_position], skip_special_tokens=False) + print(f'Inference time: {end-st} s') + print('-'*20, 'Prompt', '-'*20) + print(prompt) + print('-'*20, 'Output', '-'*20) + print(output_str) diff --git a/python/llm/example/gpu/hf-transformers-models/dolly-v2/README.md b/python/llm/example/gpu/hf-transformers-models/dolly-v2/README.md new file mode 100644 index 00000000..7ef54e16 --- /dev/null +++ b/python/llm/example/gpu/hf-transformers-models/dolly-v2/README.md @@ -0,0 +1,71 @@ +# Dolly v2 +In this directory, you will find examples on how you could apply BigDL-LLM INT4 optimizations on Dolly v2 models. For illustration purposes, we utilize the [databricks/dolly-v2-12b](https://huggingface.co/databricks/dolly-v2-12b) as a reference Dolly v2 model. + +## 0. Requirements +To run these examples with BigDL-LLM, we have some recommended requirements for your machine, please refer to [here](../README.md#recommended-requirements) for more information. + +## Example: Predict Tokens using `generate()` API +In the example [generate.py](./generate.py), we show a basic use case for a Dolly v2 model to predict the next N tokens using `generate()` API, with BigDL-LLM INT4 optimizations. +### 1. Install +We suggest using conda to manage environment: +```bash +conda create -n llm python=3.9 +conda activate llm + +pip install bigdl-llm[all] # install bigdl-llm with 'all' option +``` + +### 2. Run +``` +python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROMPT --n-predict N_PREDICT +``` + +Arguments info: +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the Dolly v2 model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'databricks/dolly-v2-12b'`. +- `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'What is AI?'`. +- `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. + +> **Note**: When loading the model in 4-bit, BigDL-LLM converts linear layers in the model into INT4 format. In theory, a *X*B model saved in 16-bit will requires approximately 2*X* GB of memory for loading, and ~0.5*X* GB memory for further inference. +> +> Please select the appropriate size of the Dolly v2 model based on the capabilities of your machine. + +#### 2.1 Client +On client Windows machine, it is recommended to run directly with full utilization of all cores: +```powershell +python ./generate.py +``` + +#### 2.2 Server +For optimal performance on server, it is recommended to set several environment variables (refer to [here](../README.md#best-known-configuration-on-linux) for more information), and run the example with all the physical cores of a single socket. + +E.g. on Linux, +```bash +# set BigDL-Nano env variables +source bigdl-nano-init + +# e.g. for a server with 48 cores per socket +export OMP_NUM_THREADS=48 +numactl -C 0-47 -m 0 python ./generate.py +``` + +#### 2.3 Sample Output +#### [databricks/dolly-v2-12b](https://huggingface.co/databricks/dolly-v2-12b) +```log +Inference time: xxxx s +-------------------- Prompt -------------------- +Below is an instruction that describes a task. Write a response that appropriately completes the request. + +### Instruction: +What is AI? + +### Response: + +-------------------- Output -------------------- +Below is an instruction that describes a task. Write a response that appropriately completes the request. + +### Instruction: +What is AI? + +### Response: +Artificial Intelligence (AI) is the area of computer science concerned with building machines that can perform tasks normally associated with human intelligence, such as reasoning, learning, +``` diff --git a/python/llm/example/gpu/hf-transformers-models/dolly-v2/generate.py b/python/llm/example/gpu/hf-transformers-models/dolly-v2/generate.py new file mode 100644 index 00000000..d785dec2 --- /dev/null +++ b/python/llm/example/gpu/hf-transformers-models/dolly-v2/generate.py @@ -0,0 +1,86 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import torch +import intel_extension_for_pytorch as ipex +import time +import argparse +import numpy as np + +from bigdl.llm.transformers import AutoModelForCausalLM +from transformers import AutoTokenizer + +# you could tune the prompt based on your own model, +# here the prompt tuning refers to https://huggingface.co/databricks/dolly-v2-12b/blob/main/instruct_pipeline.py#L15 +DOLLY_V2_PROMPT_FORMAT = """Below is an instruction that describes a task. Write a response that appropriately completes the request. + +### Instruction: +{prompt} + +### Response: +""" + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Dolly v2 model') + parser.add_argument('--repo-id-or-model-path', type=str, default="databricks/dolly-v2-12b", + help='The huggingface repo id for the Dolly v2 model to be downloaded' + ', or the path to the huggingface checkpoint folder') + parser.add_argument('--prompt', type=str, default="What is AI?", + help='Prompt to infer') + parser.add_argument('--n-predict', type=int, default=32, + help='Max tokens to predict') + + args = parser.parse_args() + model_path = args.repo_id_or_model_path + + # Load model in 4 bit, + # which convert the relevant layers in the model into INT4 format + model = AutoModelForCausalLM.from_pretrained(model_path, + load_in_4bit=True, + trust_remote_code=True) + model = model.to('xpu') + + # Load tokenizer + tokenizer = AutoTokenizer.from_pretrained(model_path, + trust_remote_code=True) + + # Generate predicted tokens + with torch.inference_mode(): + prompt = DOLLY_V2_PROMPT_FORMAT.format(prompt=args.prompt) + input_ids = tokenizer.encode(prompt, return_tensors="pt").to('xpu') + end_key_token_id=tokenizer.encode("### End")[0] + st = time.time() + # if your selected model is capable of utilizing previous key/value attentions + # to enhance decoding speed, but has `"use_cache": false` in its model config, + # it is important to set `use_cache=True` explicitly in the `generate` function + # to obtain optimal performance with BigDL-LLM INT4 optimizations + output = model.generate(input_ids, + max_new_tokens=args.n_predict, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=end_key_token_id) + torch.xpu.synchronize() + end = time.time() + output = output.cpu() + end_token_position = None + end_token_positions = np.where(output[0] == end_key_token_id)[0] + if len(end_token_positions) > 0: + end_token_position = end_token_positions[0] + output_str = tokenizer.decode(output[0][:end_token_position], skip_special_tokens=False) + print(f'Inference time: {end-st} s') + print('-'*20, 'Prompt', '-'*20) + print(prompt) + print('-'*20, 'Output', '-'*20) + print(output_str) diff --git a/python/llm/example/gpu/hf-transformers-models/vicuna/README.md b/python/llm/example/gpu/hf-transformers-models/vicuna/README.md new file mode 100644 index 00000000..0ea9149b --- /dev/null +++ b/python/llm/example/gpu/hf-transformers-models/vicuna/README.md @@ -0,0 +1,76 @@ +# Vicuna +In this directory, you will find examples on how you could apply BigDL-LLM INT4 optimizations on Vicuna models. For illustration purposes, we utilize the [lmsys/vicuna-13b-v1.3](https://huggingface.co/lmsys/vicuna-13b-v1.3) and [eachadea/vicuna-7b-1.1](https://huggingface.co/eachadea/vicuna-7b-1.1) as reference Vicuna models. + +## 0. Requirements +To run these examples with BigDL-LLM, we have some recommended requirements for your machine, please refer to [here](../README.md#recommended-requirements) for more information. + +## Example: Predict Tokens using `generate()` API +In the example [generate.py](./generate.py), we show a basic use case for a Vicuna model to predict the next N tokens using `generate()` API, with BigDL-LLM INT4 optimizations. + + +### 1. Install +We suggest using conda to manage environment: +```bash +conda create -n llm python=3.9 +conda activate llm +# below command will install intel_extension_for_pytorch==2.0.110+xpu as default +# you can install specific ipex/torch version for your need +pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu +``` +### 2. Configures OneAPI environment variables +```bash +source /opt/intel/oneapi/setvars.sh +``` + +### 3. Run + +For optimal performance on Arc, it is recommended to set several environment variables. + +```bash +export USE_XETLA=OFF +export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 +``` + +``` +python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROMPT --n-predict N_PREDICT +``` + +Arguments info: +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the Vicuna model (e.g. `lmsys/vicuna-13b-v1.3` and `eachadea/vicuna-7b-1.1`) to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'lmsys/vicuna-13b-v1.3'`. +- `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'What is AI?'`. +- `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. + +> **Note**: When loading the model in 4-bit, BigDL-LLM converts linear layers in the model into INT4 format. In theory, a *X*B model saved in 16-bit will requires approximately 2*X* GB of memory for loading, and ~0.5*X* GB memory for further inference. +> +> Please select the appropriate size of the Vicuna model based on the capabilities of your machine. + +#### Sample Output +#### [lmsys/vicuna-13b-v1.3](https://huggingface.co/lmsys/vicuna-13b-v1.3) +```log +Inference time: xxxx s +-------------------- Prompt -------------------- +### Human: +What is AI? + ### Assistant: + +-------------------- Output -------------------- +### Human: +What is AI? + ### Assistant: +AI, or Artificial Intelligence, refers to the development of computer systems that can perform tasks that typically require human intelligence, such as visual perception, +``` + +#### [eachadea/vicuna-7b-1.1](https://huggingface.co/eachadea/vicuna-7b-1.1) +```log +Inference time: xxxx s +-------------------- Prompt -------------------- +### Human: +What is AI? + ### Assistant: + +-------------------- Output -------------------- +### Human: +What is AI? + ### Assistant: +AI, or artificial intelligence, refers to the ability of a machine or computer program to mimic human intelligence and perform tasks that would normally require human intelligence to +``` diff --git a/python/llm/example/gpu/hf-transformers-models/vicuna/generate.py b/python/llm/example/gpu/hf-transformers-models/vicuna/generate.py new file mode 100644 index 00000000..f66d3dce --- /dev/null +++ b/python/llm/example/gpu/hf-transformers-models/vicuna/generate.py @@ -0,0 +1,71 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import torch +import intel_extension_for_pytorch as ipex +import time +import argparse + +from bigdl.llm.transformers import AutoModelForCausalLM +from transformers import LlamaTokenizer + +# you could tune the prompt based on your own model, +# here the prompt tuning refers to https://github.com/lm-sys/FastChat/blob/main/docs/vicuna_weights_version.md#example-prompt-weights-v0 +Vicuna_PROMPT_FORMAT = "### Human:\n{prompt} \n ### Assistant:\n" + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Vicuna model') + parser.add_argument('--repo-id-or-model-path', type=str, default="lmsys/vicuna-13b-v1.3", + help='The huggingface repo id for the Vicuna (e.g. `lmsys/vicuna-13b-v1.3` and `eachadea/vicuna-7b-1.1`) to be downloaded' + ', or the path to the huggingface checkpoint folder') + parser.add_argument('--prompt', type=str, default="What is AI?", + help='Prompt to infer') + parser.add_argument('--n-predict', type=int, default=32, + help='Max tokens to predict') + + args = parser.parse_args() + model_path = args.repo_id_or_model_path + + # Load model in 4 bit, + # which convert the relevant layers in the model into INT4 format + model = AutoModelForCausalLM.from_pretrained(model_path, + load_in_4bit=True) + model = model.to('xpu') + + # Load tokenizer + tokenizer = LlamaTokenizer.from_pretrained(model_path) + + # Generate predicted tokens + with torch.inference_mode(): + prompt = Vicuna_PROMPT_FORMAT.format(prompt=args.prompt) + input_ids = tokenizer.encode(prompt, return_tensors="pt").to('xpu') + st = time.time() + # enabling `use_cache=True` allows the model to utilize the previous + # key/values attentions to speed up decoding; + # to obtain optimal performance with BigDL-LLM INT4 optimizations, + # it is important to set use_cache=True for vicuna-v1.3 models + output = model.generate(input_ids, + use_cache=True, + max_new_tokens=args.n_predict) + torch.xpu.synchronize() + end = time.time() + output = output.cpu() + output_str = tokenizer.decode(output[0], skip_special_tokens=True) + print(f'Inference time: {end-st} s') + print('-'*20, 'Prompt', '-'*20) + print(prompt) + print('-'*20, 'Output', '-'*20) + print(output_str)