diff --git a/python/llm/example/GPU/PyTorch-Models/Model/baichuan2/generate.py b/python/llm/example/GPU/PyTorch-Models/Model/baichuan2/generate.py index 9ebb56be..a4592679 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/baichuan2/generate.py +++ b/python/llm/example/GPU/PyTorch-Models/Model/baichuan2/generate.py @@ -24,7 +24,7 @@ from ipex_llm import optimize_model # prompt format referred from https://github.com/baichuan-inc/Baichuan2/issues/227 # and https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat/blob/main/generation_utils.py#L7-L49 # For English prompt, you are recommended to change the prompt format. -BAICHUAN_PROMPT_FORMAT = " {prompt} " +BAICHUAN2_PROMPT_FORMAT = " {prompt} " if __name__ == '__main__': parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Baichuan2 model')