diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/README.md b/python/llm/example/CPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/README.md index 6474d507..2785323f 100644 --- a/python/llm/example/CPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/README.md +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/README.md @@ -1,11 +1,16 @@ # AWQ -This example shows how to directly run 4-bit AWQ models using BigDL-LLM on Intel CPU. For illustration purposes, we utilize the ["TheBloke/Llama-2-7B-Chat-AWQ"](https://huggingface.co/TheBloke/Llama-2-7B-Chat-AWQ) as a reference. +This example shows how to directly run 4-bit AWQ models using BigDL-LLM on Intel CPU. -## 0. Requirements +## Verified Models +- [Llama-2-7B-Chat-AWQ](https://huggingface.co/TheBloke/Llama-2-7B-Chat-AWQ) +- [Mistral-7B-Instruct-v0.1-AWQ](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-AWQ) +- [Mistral-7B-v0.1-AWQ](https://huggingface.co/TheBloke/Mistral-7B-v0.1-AWQ) + +## Requirements To run these examples with BigDL-LLM, we have some recommended requirements for your machine, please refer to [here](../README.md#recommended-requirements) for more information. ## Example: Predict Tokens using `generate()` API -In the example [generate.py](./generate.py), we show a basic use case for a Llama2 model to predict the next N tokens using `generate()` API, with BigDL-LLM INT4 optimizations. +In the example [generate.py](./generate.py), we show a basic use case for a AWQ model to predict the next N tokens using `generate()` API, with BigDL-LLM INT4 optimizations. ### 1. Install We suggest using conda to manage environment: ```bash @@ -13,7 +18,7 @@ conda create -n llm python=3.9 conda activate llm pip install autoawq==0.1.6 --no-deps -pip install bigdl-llm[all] # install bigdl-llm with 'all' option +pip install --pre --upgrade bigdl-llm[all] # install bigdl-llm with 'all' option pip install transformers==4.35.0 pip install accelerate==0.24.1 ``` @@ -24,13 +29,13 @@ python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROM ``` Arguments info: -- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the Llama2-awq model (e.g. `TheBloke/Llama-2-7B-Chat-AWQ`) to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'TheBloke/Llama-2-7B-Chat-AWQ'`. +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the AWQ model (e.g. `TheBloke/Llama-2-7B-Chat-AWQ`, `TheBloke/Mistral-7B-Instruct-v0.1-AWQ`, `TheBloke/Mistral-7B-v0.1-AWQ`) to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'TheBloke/Llama-2-7B-Chat-AWQ'`. - `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'What is AI?'`. - `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. > **Note**: When loading the model in 4-bit, BigDL-LLM converts linear layers in the model into INT4 format. In theory, a *X*B model saved in 16-bit will requires approximately 2*X* GB of memory for loading, and ~0.5*X* GB memory for further inference. > -> Please select the appropriate size of the Llama2 model based on the capabilities of your machine. +> Please select the appropriate size of the model based on the capabilities of your machine. #### 2.1 Client On client Windows machine, it is recommended to run directly with full utilization of all cores: @@ -52,7 +57,7 @@ numactl -C 0-47 -m 0 python ./generate.py ``` #### 2.3 Sample Output -#### ["TheBloke/Llama-2-7B-Chat-AWQ"](https://huggingface.co/TheBloke/Llama-2-7B-Chat-AWQ) +#### [TheBloke/Llama-2-7B-Chat-AWQ](https://huggingface.co/TheBloke/Llama-2-7B-Chat-AWQ) ```log Inference time: xxxx s -------------------- Prompt -------------------- @@ -68,4 +73,4 @@ What is AI? ### RESPONSE: Artificial intelligence (AI) is the ability of machines to perform tasks that typically require human intelligence, such as learning, problem-solving, decision -``` +``` \ No newline at end of file diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/generate.py b/python/llm/example/CPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/generate.py index c9e7c066..42cb6ed5 100644 --- a/python/llm/example/CPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/generate.py +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/generate.py @@ -19,18 +19,18 @@ import time import argparse from bigdl.llm.transformers import AutoModelForCausalLM -from transformers import LlamaTokenizer +from transformers import AutoTokenizer # you could tune the prompt based on your own model, # here the prompt tuning refers to https://huggingface.co/georgesung/llama2_7b_chat_uncensored#prompt-style -LLAMA2_PROMPT_FORMAT = """### HUMAN: +PROMPT_FORMAT = """### HUMAN: {prompt} ### RESPONSE: """ if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Llama2 model') + parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for AWQ model') parser.add_argument('--repo-id-or-model-path', type=str, default="TheBloke/Llama-2-7B-Chat-AWQ", help='The huggingface repo id' ', or the path to the huggingface checkpoint folder') @@ -49,11 +49,11 @@ if __name__ == '__main__': trust_remote_code=True) # Load tokenizer - tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True) + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) # Generate predicted tokens with torch.inference_mode(): - prompt = LLAMA2_PROMPT_FORMAT.format(prompt=args.prompt) + prompt = PROMPT_FORMAT.format(prompt=args.prompt) input_ids = tokenizer.encode(prompt, return_tensors="pt") st = time.time() # if your selected model is capable of utilizing previous key/value attentions diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/README.md index f223d5e9..52335c2d 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/README.md @@ -1,11 +1,16 @@ # AWQ -This example shows how to directly run 4-bit AWQ models using BigDL-LLM on Intel GPU. For illustration purposes, we utilize the ["TheBloke/Llama-2-7B-Chat-AWQ"](https://huggingface.co/TheBloke/Llama-2-7B-Chat-AWQ) as a reference. +This example shows how to directly run 4-bit AWQ models using BigDL-LLM on Intel GPU. -## 0. Requirements +## Verified Models +- [Llama-2-7B-Chat-AWQ](https://huggingface.co/TheBloke/Llama-2-7B-Chat-AWQ) +- [Mistral-7B-Instruct-v0.1-AWQ](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-AWQ) +- [Mistral-7B-v0.1-AWQ](https://huggingface.co/TheBloke/Mistral-7B-v0.1-AWQ) + +## Requirements To run these examples with BigDL-LLM, we have some recommended requirements for your machine, please refer to [here](../README.md#recommended-requirements) for more information. ## Example: Predict Tokens using `generate()` API -In the example [generate.py](./generate.py), we show a basic use case for a Llama2 model to predict the next N tokens using `generate()` API, with BigDL-LLM INT4 optimizations. +In the example [generate.py](./generate.py), we show a basic use case for a AWQ model to predict the next N tokens using `generate()` API, with BigDL-LLM INT4 optimizations. ### 1. Install We suggest using conda to manage environment: ```bash @@ -37,7 +42,7 @@ python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROM ``` Arguments info: -- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the Llama2-awq model (e.g. `TheBloke/Llama-2-7B-Chat-AWQ`) to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'TheBloke/Llama-2-7B-Chat-AWQ'`. +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the AWQ model (e.g. `TheBloke/Llama-2-7B-Chat-AWQ`, `TheBloke/Mistral-7B-Instruct-v0.1-AWQ`, `TheBloke/Mistral-7B-v0.1-AWQ`) to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'TheBloke/Llama-2-7B-Chat-AWQ'`. - `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'What is AI?'`. - `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/generate.py b/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/generate.py index 9e9b72df..136bd35d 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/generate.py +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/generate.py @@ -19,18 +19,18 @@ import time import argparse import intel_extension_for_pytorch as ipex from bigdl.llm.transformers import AutoModelForCausalLM -from transformers import LlamaTokenizer +from transformers import AutoTokenizer # you could tune the prompt based on your own model, # here the prompt tuning refers to https://huggingface.co/georgesung/llama2_7b_chat_uncensored#prompt-style -LLAMA2_PROMPT_FORMAT = """### HUMAN: +PROMPT_FORMAT = """### HUMAN: {prompt} ### RESPONSE: """ if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Llama2 model') + parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for AWQ model') parser.add_argument('--repo-id-or-model-path', type=str, default="TheBloke/Llama-2-7B-Chat-AWQ", help='The huggingface repo id' ', or the path to the huggingface checkpoint folder') @@ -49,11 +49,11 @@ if __name__ == '__main__': trust_remote_code=True,).to("xpu") # Load tokenizer - tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True) + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) # Generate predicted tokens with torch.inference_mode(): - prompt = LLAMA2_PROMPT_FORMAT.format(prompt=args.prompt) + prompt = PROMPT_FORMAT.format(prompt=args.prompt) input_ids = tokenizer.encode(prompt, return_tensors="pt").to("xpu") st = time.time() # if your selected model is capable of utilizing previous key/value attentions diff --git a/python/llm/src/bigdl/llm/transformers/awq/awq.py b/python/llm/src/bigdl/llm/transformers/awq/awq.py index 511d9ede..f3112157 100644 --- a/python/llm/src/bigdl/llm/transformers/awq/awq.py +++ b/python/llm/src/bigdl/llm/transformers/awq/awq.py @@ -131,6 +131,8 @@ def get_blocks(model): layers = model.transformer.h elif "neox" in str(model.__class__).lower(): layers = model.gpt_neox.layers + elif "mistral" in str(model.__class__).lower(): + layers = model.model.layers else: invalidInputError(False, f"Model type {type(model)} isn't supported.") return layers