update lowbit path for baichuan2, qwen2, generate.py (#12051)
* update lowbit path for baichuan2, qwen2, `generate.py` * update readme
This commit is contained in:
parent
dc4af02b2a
commit
73a4360f3f
4 changed files with 98 additions and 29 deletions
|
|
@ -61,6 +61,7 @@ python ./generate.py
|
|||
|
||||
Arguments info:
|
||||
- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the Llama2 model (e.g. `meta-llama/Llama-2-7b-chat-hf`) to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'meta-llama/Llama-2-7b-chat-hf'`, and more verified models please see the list in [Verified Models](#verified-models).
|
||||
- `--lowbit-path LOWBIT_MODEL_PATH`: argument defining the path to save/load lowbit version of the model. If it is an empty string, the original pretrained model specified by `REPO_ID_OR_MODEL_PATH` will be loaded. If it is an existing path, the lowbit model in `LOWBIT_MODEL_PATH` will be loaded. If it is a non-existing path, the original pretrained model specified by `REPO_ID_OR_MODEL_PATH` will be loaded, and the converted lowbit version will be saved into `LOWBIT_MODEL_PATH`. It is default to be `''`, i.e. an empty string.
|
||||
- `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'Once upon a time, there existed a little girl who liked to have adventures. She wanted to go to places and meet new people, and have fun'`.
|
||||
- `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`.
|
||||
- `--load_in_low_bit`: argument defining the `load_in_low_bit` format used. It is default to be `sym_int8`, `sym_int4` can also be used.
|
||||
|
|
@ -131,6 +132,9 @@ Arguments info:
|
|||
|
||||
### Troubleshooting
|
||||
|
||||
#### `TypeError: can't convert meta device type tensor to numpy.` Error
|
||||
If you encounter `TypeError: can't convert meta device type tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first.` error when loading lowbit model, please try re-saving the lowbit model with the example script you are currently using. Please note that lowbit models saved by `qwen2.py`, `llama.py`, etc. cannot be loaded by `generate.py`.
|
||||
|
||||
#### Output Problem
|
||||
If you encounter output problem, please try to disable the optimization of transposing value cache with following command:
|
||||
```bash
|
||||
|
|
|
|||
|
|
@ -50,6 +50,12 @@ if __name__ == "__main__":
|
|||
help="The huggingface repo id for the Baichuan2 model to be downloaded"
|
||||
", or the path to the huggingface checkpoint folder",
|
||||
)
|
||||
parser.add_argument("--lowbit-path", type=str,
|
||||
default="",
|
||||
help="The path to the lowbit model folder, leave blank if you do not want to save. \
|
||||
If path not exists, lowbit model will be saved there. \
|
||||
Else, lowbit model will be loaded.",
|
||||
)
|
||||
parser.add_argument('--prompt', type=str, default="What is AI?",
|
||||
help='Prompt to infer')
|
||||
parser.add_argument("--n-predict", type=int, default=32, help="Max tokens to predict")
|
||||
|
|
@ -62,22 +68,39 @@ if __name__ == "__main__":
|
|||
args = parser.parse_args()
|
||||
model_path = args.repo_id_or_model_path
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=torch.bfloat16,
|
||||
trust_remote_code=True,
|
||||
attn_implementation="eager",
|
||||
load_in_low_bit="sym_int4",
|
||||
optimize_model=True,
|
||||
max_output_len=args.max_output_len,
|
||||
max_prompt_len=args.max_prompt_len,
|
||||
intra_pp=args.intra_pp,
|
||||
inter_pp=args.inter_pp,
|
||||
transpose_value_cache=not args.disable_transpose_value_cache,
|
||||
)
|
||||
if not args.lowbit_path or not os.path.exists(args.lowbit_path):
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=torch.bfloat16,
|
||||
trust_remote_code=True,
|
||||
attn_implementation="eager",
|
||||
load_in_low_bit="sym_int4",
|
||||
optimize_model=True,
|
||||
max_output_len=args.max_output_len,
|
||||
max_prompt_len=args.max_prompt_len,
|
||||
intra_pp=args.intra_pp,
|
||||
inter_pp=args.inter_pp,
|
||||
transpose_value_cache=not args.disable_transpose_value_cache,
|
||||
)
|
||||
else:
|
||||
model = AutoModelForCausalLM.load_low_bit(
|
||||
args.lowbit_path,
|
||||
attn_implementation="eager",
|
||||
torch_dtype=torch.bfloat16,
|
||||
optimize_model=True,
|
||||
max_output_len=args.max_output_len,
|
||||
max_prompt_len=args.max_prompt_len,
|
||||
intra_pp=args.intra_pp,
|
||||
inter_pp=args.inter_pp,
|
||||
transpose_value_cache=not args.disable_transpose_value_cache,
|
||||
trust_remote_code=True,
|
||||
)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
|
||||
if args.lowbit_path and not os.path.exists(args.lowbit_path):
|
||||
model.save_low_bit(args.lowbit_path)
|
||||
|
||||
DEFAULT_SYSTEM_PROMPT = """\
|
||||
"""
|
||||
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
import torch
|
||||
import time
|
||||
import argparse
|
||||
import os
|
||||
|
||||
from ipex_llm.transformers.npu_model import AutoModelForCausalLM
|
||||
from transformers import AutoTokenizer
|
||||
|
|
@ -27,6 +28,11 @@ if __name__ == '__main__':
|
|||
parser.add_argument('--repo-id-or-model-path', type=str, default="meta-llama/Llama-2-7b-chat-hf",
|
||||
help='The huggingface repo id for the Llama2 model to be downloaded'
|
||||
', or the path to the huggingface checkpoint folder')
|
||||
parser.add_argument("--lowbit-path", type=str,
|
||||
default="",
|
||||
help='The path to the lowbit model folder, leave blank if you do not want to save. \
|
||||
If path not exists, lowbit model will be saved there. \
|
||||
Else, lowbit model will be loaded.')
|
||||
parser.add_argument('--prompt', type=str, default="Once upon a time, there existed a little girl who liked to have adventures. She wanted to go to places and meet new people, and have fun",
|
||||
help='Prompt to infer')
|
||||
parser.add_argument('--n-predict', type=int, default=32,
|
||||
|
|
@ -39,12 +45,26 @@ if __name__ == '__main__':
|
|||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True,
|
||||
load_in_low_bit=args.load_in_low_bit,
|
||||
attn_implementation="eager")
|
||||
if not args.lowbit_path or not os.path.exists(args.lowbit_path):
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_path,
|
||||
trust_remote_code=True,
|
||||
load_in_low_bit=args.load_in_low_bit,
|
||||
attn_implementation="eager"
|
||||
)
|
||||
else:
|
||||
model = AutoModelForCausalLM.load_low_bit(
|
||||
args.lowbit_path,
|
||||
trust_remote_code=True,
|
||||
bigdl_transformers_low_bit=args.load_in_low_bit,
|
||||
attn_implementation="eager"
|
||||
)
|
||||
|
||||
print(model)
|
||||
|
||||
if args.lowbit_path and not os.path.exists(args.lowbit_path):
|
||||
model.save_low_bit(args.lowbit_path)
|
||||
|
||||
with torch.inference_mode():
|
||||
prompt = "Once upon a time, there existed a little girl who liked to have adventures. She wanted to go to places and meet new people, and have fun"
|
||||
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
||||
|
|
|
|||
|
|
@ -37,6 +37,12 @@ if __name__ == "__main__":
|
|||
help="The huggingface repo id for the Qwen2 model to be downloaded"
|
||||
", or the path to the huggingface checkpoint folder",
|
||||
)
|
||||
parser.add_argument("--lowbit-path", type=str,
|
||||
default="",
|
||||
help="The path to the lowbit model folder, leave blank if you do not want to save. \
|
||||
If path not exists, lowbit model will be saved there. \
|
||||
Else, lowbit model will be loaded.",
|
||||
)
|
||||
parser.add_argument('--prompt', type=str, default="What is AI?",
|
||||
help='Prompt to infer')
|
||||
parser.add_argument("--n-predict", type=int, default=32, help="Max tokens to predict")
|
||||
|
|
@ -49,22 +55,38 @@ if __name__ == "__main__":
|
|||
args = parser.parse_args()
|
||||
model_path = args.repo_id_or_model_path
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=torch.float16,
|
||||
trust_remote_code=True,
|
||||
attn_implementation="eager",
|
||||
load_in_low_bit="sym_int4",
|
||||
optimize_model=True,
|
||||
max_output_len=args.max_output_len,
|
||||
max_prompt_len=args.max_prompt_len,
|
||||
intra_pp=args.intra_pp,
|
||||
inter_pp=args.inter_pp,
|
||||
transpose_value_cache=not args.disable_transpose_value_cache,
|
||||
)
|
||||
if not args.lowbit_path or not os.path.exists(args.lowbit_path):
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=torch.float16,
|
||||
trust_remote_code=True,
|
||||
attn_implementation="eager",
|
||||
load_in_low_bit="sym_int4",
|
||||
optimize_model=True,
|
||||
max_output_len=args.max_output_len,
|
||||
max_prompt_len=args.max_prompt_len,
|
||||
intra_pp=args.intra_pp,
|
||||
inter_pp=args.inter_pp,
|
||||
transpose_value_cache=not args.disable_transpose_value_cache,
|
||||
)
|
||||
else:
|
||||
model = AutoModelForCausalLM.load_low_bit(
|
||||
args.lowbit_path,
|
||||
attn_implementation="eager",
|
||||
torch_dtype=torch.float16,
|
||||
optimize_model=True,
|
||||
max_output_len=args.max_output_len,
|
||||
max_prompt_len=args.max_prompt_len,
|
||||
intra_pp=args.intra_pp,
|
||||
inter_pp=args.inter_pp,
|
||||
transpose_value_cache=not args.disable_transpose_value_cache,
|
||||
)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
|
||||
if args.lowbit_path and not os.path.exists(args.lowbit_path):
|
||||
model.save_low_bit(args.lowbit_path)
|
||||
|
||||
print("-" * 80)
|
||||
print("done")
|
||||
messages = [{"role": "system", "content": "You are a helpful assistant."},
|
||||
|
|
|
|||
Loading…
Reference in a new issue