[LLM] Correct prompt format of Qwen in generate.py (#9678)

* Change qwen prompt format to chatml
This commit is contained in:
Ziteng Zhang 2023-12-14 14:01:30 +08:00 committed by GitHub
parent 223c9622f7
commit 21c7503a42

View file

@ -23,7 +23,15 @@ from bigdl.llm.transformers import AutoModelForCausalLM
from transformers import AutoTokenizer
# you could tune the prompt based on your own model
QWEN_PROMPT_FORMAT = "<human>{prompt} <bot>"
QWEN_PROMPT_FORMAT = """
<|im_start|>system
You are a helpful assistant.
<|im_end|>
<|im_start|>user
{prompt}
<|im_end|>
<|im_start|>assistant
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Qwen model')