From 21c7503a42ee41f2b2be704c9df9ddd938d810c2 Mon Sep 17 00:00:00 2001 From: Ziteng Zhang <87107332+Jasonzzt@users.noreply.github.com> Date: Thu, 14 Dec 2023 14:01:30 +0800 Subject: [PATCH] [LLM] Correct prompt format of Qwen in generate.py (#9678) * Change qwen prompt format to chatml --- .../HF-Transformers-AutoModels/Model/qwen/generate.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/qwen/generate.py b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/qwen/generate.py index 44d3f34e..8143e34c 100644 --- a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/qwen/generate.py +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/qwen/generate.py @@ -23,7 +23,15 @@ from bigdl.llm.transformers import AutoModelForCausalLM from transformers import AutoTokenizer # you could tune the prompt based on your own model -QWEN_PROMPT_FORMAT = "{prompt} " +QWEN_PROMPT_FORMAT = """ + <|im_start|>system + You are a helpful assistant. + <|im_end|> + <|im_start|>user + {prompt} + <|im_end|> + <|im_start|>assistant +""" if __name__ == '__main__': parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Qwen model')