From cabe7c0358aa0c49f4f2e28e9b3e92ac9372f222 Mon Sep 17 00:00:00 2001 From: Ruonan Wang <105281011+rnwang04@users.noreply.github.com> Date: Mon, 18 Sep 2023 14:32:27 +0800 Subject: [PATCH] LLM: add baichuan2 example for arc (#8994) * add baichuan2 examples * add link * small fix --- python/llm/example/gpu/README.md | 1 + .../gpu/hf-transformers-models/README.md | 3 +- .../baichuan2/README.md | 59 ++++++++++++++ .../baichuan2/generate.py | 78 +++++++++++++++++++ 4 files changed, 140 insertions(+), 1 deletion(-) create mode 100644 python/llm/example/gpu/hf-transformers-models/baichuan2/README.md create mode 100644 python/llm/example/gpu/hf-transformers-models/baichuan2/generate.py diff --git a/python/llm/example/gpu/README.md b/python/llm/example/gpu/README.md index c729b4d2..1abff7e5 100644 --- a/python/llm/example/gpu/README.md +++ b/python/llm/example/gpu/README.md @@ -5,6 +5,7 @@ You can use BigDL-LLM to run almost every Huggingface Transformer models with IN | Model | Example | |------------|----------------------------------------------------------| | Baichuan | [link](hf-transformers-models/baichuan) | +| Baichuan2 | [link](hf-transformers-models/baichuan2) | | ChatGLM2 | [link](hf-transformers-models/chatglm2) | | Chinese Llama2 | [link](hf-transformers-models/chinese-llama2)| | Falcon | [link](hf-transformers-models/falcon) | diff --git a/python/llm/example/gpu/hf-transformers-models/README.md b/python/llm/example/gpu/hf-transformers-models/README.md index 2b8ecc8b..0798745b 100644 --- a/python/llm/example/gpu/hf-transformers-models/README.md +++ b/python/llm/example/gpu/hf-transformers-models/README.md @@ -4,7 +4,8 @@ You can use BigDL-LLM to run almost every Huggingface Transformer models with IN ## Verified models | Model | Example | |------------|----------------------------------------------------------| -| Baichuan | [link](baichuan) | +| Baichuan | [link](baichuan) | +| Baichuan2 | [link](baichuan2) | | ChatGLM2 | [link](chatglm2) | | Chinese Llama2 | [link](chinese-llama2)| | Falcon | [link](falcon) | diff --git a/python/llm/example/gpu/hf-transformers-models/baichuan2/README.md b/python/llm/example/gpu/hf-transformers-models/baichuan2/README.md new file mode 100644 index 00000000..f4a74302 --- /dev/null +++ b/python/llm/example/gpu/hf-transformers-models/baichuan2/README.md @@ -0,0 +1,59 @@ +# Baichuan +In this directory, you will find examples on how you could apply BigDL-LLM INT4 optimizations on Baichuan2 models on [Intel GPUs](../README.md). For illustration purposes, we utilize the [baichuan-inc/Baichuan2-7B-Chat](https://huggingface.co/baichuan-inc/Baichuan-7B-Chat) as a reference Baichuan model. + +## 0. Requirements +To run these examples with BigDL-LLM on Intel GPUs, we have some recommended requirements for your machine, please refer to [here](../README.md#recommended-requirements) for more information. + +## Example: Predict Tokens using `generate()` API +In the example [generate.py](./generate.py), we show a basic use case for a Baichuan model to predict the next N tokens using `generate()` API, with BigDL-LLM INT4 optimizations on Intel GPUs. +### 1. Install +We suggest using conda to manage environment: +```bash +conda create -n llm python=3.9 +conda activate llm +# below command will install intel_extension_for_pytorch==2.0.110+xpu as default +# you can install specific ipex/torch version for your need +pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu +pip install transformers_stream_generator # additional package required for Baichuan-7B-Chat to conduct generation +``` + +### 2. Configures OneAPI environment variables +```bash +source /opt/intel/oneapi/setvars.sh +``` + +### 3. Run + +For optimal performance on Arc, it is recommended to set several environment variables. + +```bash +export USE_XETLA=OFF +export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 +``` + +``` +python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROMPT --n-predict N_PREDICT +``` + +Arguments info: +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the Baichuan model (e.g `baichuan-inc/Baichuan2-7B-Chat`) to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'baichuan-inc/Baichuan2-7B-Chat'`. +- `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'AI是什么?'`. +- `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. + +#### Sample Output +#### [baichuan-inc/Baichuan2-7B-Chat](https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat) +```log +-------------------- Prompt -------------------- +AI是什么? +-------------------- Output -------------------- +AI是什么? +AI是人工智能(Artificial Intelligence)的缩写,它是指让计算机或机器模拟、扩展和辅助人类的智能。AI技术已经广泛应用于各个领域 +``` + +```log +Inference time: xxxx s +-------------------- Prompt -------------------- +What is AI? +-------------------- Output -------------------- +What is AI? Artificial Intelligence (AI) refers to the development of computer systems that can perform tasks that would typically require human intelligence. These tasks include learning, reasoning, problem +``` diff --git a/python/llm/example/gpu/hf-transformers-models/baichuan2/generate.py b/python/llm/example/gpu/hf-transformers-models/baichuan2/generate.py new file mode 100644 index 00000000..16a7b9d8 --- /dev/null +++ b/python/llm/example/gpu/hf-transformers-models/baichuan2/generate.py @@ -0,0 +1,78 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import torch +import intel_extension_for_pytorch as ipex +import time +import argparse + +from bigdl.llm.transformers import AutoModelForCausalLM +from transformers import AutoTokenizer + +# you could tune the prompt based on your own model, +BAICHUAN_PROMPT_FORMAT = "{prompt} " + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Baichuan model') + parser.add_argument('--repo-id-or-model-path', type=str, default="baichuan-inc/Baichuan2-7B-Chat", + help='The huggingface repo id for the Baichuan model to be downloaded' + ', or the path to the huggingface checkpoint folder') + parser.add_argument('--prompt', type=str, default="AI是什么?", + help='Prompt to infer') + parser.add_argument('--n-predict', type=int, default=32, + help='Max tokens to predict') + + args = parser.parse_args() + model_path = args.repo_id_or_model_path + + # Load model in 4 bit, + # which convert the relevant layers in the model into INT4 format + # if your selected model is capable of utilizing previous key/value attentions + # to enhance decoding speed, but has `"use_cache": false` in its model config, + # it is important to set `use_cache=True` explicitly in the `generate` function + # to obtain optimal performance with BigDL-LLM INT4 optimizations + model = AutoModelForCausalLM.from_pretrained(model_path, + load_in_4bit=True, + optimize_model=False, + trust_remote_code=True, + use_cache=True) + model = model.to('xpu') + + # Load tokenizer + tokenizer = AutoTokenizer.from_pretrained(model_path, + trust_remote_code=True) + + # Generate predicted tokens + with torch.inference_mode(): + prompt = BAICHUAN_PROMPT_FORMAT.format(prompt=args.prompt) + input_ids = tokenizer.encode(prompt, return_tensors="pt").to('xpu') + # ipex model needs a warmup, then inference time can be accurate + output = model.generate(input_ids, + max_new_tokens=args.n_predict) + + # start inference + st = time.time() + output = model.generate(input_ids, + max_new_tokens=args.n_predict) + torch.xpu.synchronize() + end = time.time() + output = output.cpu() + output_str = tokenizer.decode(output[0], skip_special_tokens=True) + print(f'Inference time: {end-st} s') + print('-'*20, 'Prompt', '-'*20) + print(prompt) + print('-'*20, 'Output', '-'*20) + print(output_str)