diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/baichuan2/README.md b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/baichuan2/README.md index ff0347b7..51495d3d 100644 --- a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/baichuan2/README.md +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/baichuan2/README.md @@ -54,15 +54,15 @@ numactl -C 0-47 -m 0 python ./generate.py ```log Inference time: xxxx s -------------------- Prompt -------------------- -AI是什么? + AI是什么? -------------------- Output -------------------- -AI是什么? 人工智能(AI)是指由计算机系统或其他数字设备模拟、扩展和增强人类智能的科学和技术。它涉及到多个领域,如机器学习、计算机视觉、 + AI是什么? 人工智能(AI)是指由计算机系统执行的任务,这些任务通常需要人类智能才能完成。AI的目标是使计算机能够模拟人类的思维过程,从而 ``` ```log Inference time: xxxx s -------------------- Prompt -------------------- -解释一下“温故而知新” + 解释一下“温故而知新” -------------------- Output -------------------- -解释一下“温故而知新” 这句话出自《论语·为政》篇,意思是通过回顾过去的事情来获取新的理解和认识。简单来说就是:温习学过的知识,可以从中 + 解释一下“温故而知新” 温故而知新是一个成语,出自《论语·为政》篇。这个成语的意思是:通过回顾和了解过去的事情,可以更好地理解新的知识和 ``` diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/baichuan2/generate.py b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/baichuan2/generate.py index 1ff4812e..8d1cce0c 100644 --- a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/baichuan2/generate.py +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/baichuan2/generate.py @@ -22,8 +22,10 @@ import numpy as np from bigdl.llm.transformers import AutoModelForCausalLM from transformers import AutoTokenizer -# you could tune the prompt based on your own model, -BAICHUAN_PROMPT_FORMAT = "{prompt} " +# prompt format referred from https://github.com/baichuan-inc/Baichuan2/issues/227 +# and https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat/blob/main/generation_utils.py#L7-L49 +# For English prompt, you are recommended to change the prompt format. +BAICHUAN_PROMPT_FORMAT = " {prompt} " if __name__ == '__main__': parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Baichuan model') diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan2/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan2/README.md index 1ad1b4eb..f49219c4 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan2/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan2/README.md @@ -109,18 +109,10 @@ Arguments info: #### Sample Output #### [baichuan-inc/Baichuan2-7B-Chat](https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat) -```log --------------------- Prompt -------------------- -AI是什么? --------------------- Output -------------------- -AI是什么? -AI是人工智能(Artificial Intelligence)的缩写,它是指让计算机或机器模拟、扩展和辅助人类的智能。AI技术已经广泛应用于各个领域 -``` - ```log Inference time: xxxx s -------------------- Prompt -------------------- -What is AI? + AI是什么? -------------------- Output -------------------- -What is AI? Artificial Intelligence (AI) refers to the development of computer systems that can perform tasks that would typically require human intelligence. These tasks include learning, reasoning, problem -``` + AI是什么? AI是人工智能(Artificial Intelligence)的缩写,它是指让计算机或其他设备模拟人类智能的技术。通过使用大量数据和算法,AI可以学习、 +``` \ No newline at end of file diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan2/generate.py b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan2/generate.py index 88d7ea40..ac004a52 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan2/generate.py +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan2/generate.py @@ -21,8 +21,10 @@ import argparse from bigdl.llm.transformers import AutoModelForCausalLM from transformers import AutoTokenizer -# you could tune the prompt based on your own model, -BAICHUAN_PROMPT_FORMAT = "{prompt} " +# prompt format referred from https://github.com/baichuan-inc/Baichuan2/issues/227 +# and https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat/blob/main/generation_utils.py#L7-L49 +# For English prompt, you are recommended to change the prompt format. +BAICHUAN_PROMPT_FORMAT = " {prompt} " if __name__ == '__main__': parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Baichuan model') diff --git a/python/llm/example/GPU/PyTorch-Models/Model/baichuan2/README.md b/python/llm/example/GPU/PyTorch-Models/Model/baichuan2/README.md index 7c0fecd0..c3e2b193 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/baichuan2/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/baichuan2/README.md @@ -114,13 +114,8 @@ In the example, several arguments can be passed to satisfy your requirements: #### [baichuan-inc/Baichuan2-7B-Chat](https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat) ```log Inference time: xxxx s +-------------------- Prompt -------------------- + AI是什么? -------------------- Output -------------------- -AI是什么? -AI是人工智能(Artificial Intelligence)的缩写,它是指让计算机或机器模拟、扩展和辅助人类的智能。AI技术已经广泛应用于各个领域 -``` - -```log -Inference time: xxxx s --------------------- Output -------------------- -What is AI? Artificial Intelligence (AI) refers to the development of computer systems that can perform tasks that would typically require human intelligence. These tasks include learning, reasoning, problem + AI是什么? AI是人工智能(Artificial Intelligence)的缩写,它是指让计算机或其他设备模拟人类智能的技术。通过使用大量数据和算法,AI可以学习、 ``` \ No newline at end of file diff --git a/python/llm/example/GPU/PyTorch-Models/Model/baichuan2/generate.py b/python/llm/example/GPU/PyTorch-Models/Model/baichuan2/generate.py index 8f27d1bc..15d3db8a 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/baichuan2/generate.py +++ b/python/llm/example/GPU/PyTorch-Models/Model/baichuan2/generate.py @@ -21,8 +21,10 @@ import argparse from transformers import AutoModelForCausalLM, AutoTokenizer from bigdl.llm import optimize_model -# you could tune the prompt based on your own model, -BAICHUAN2_PROMPT_FORMAT = "{prompt} " +# prompt format referred from https://github.com/baichuan-inc/Baichuan2/issues/227 +# and https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat/blob/main/generation_utils.py#L7-L49 +# For English prompt, you are recommended to change the prompt format. +BAICHUAN_PROMPT_FORMAT = " {prompt} " if __name__ == '__main__': parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Baichuan2 model')