use english prompt by default (#12115)
This commit is contained in:
parent
5d63aef60b
commit
657889e3e4
1 changed files with 8 additions and 6 deletions
|
|
@ -435,12 +435,14 @@ LLM_URLS = [f"http://localhost:{PORT}/v1/completions" for PORT in [8000]]
|
|||
MODEL = "/llm/models/" + model_name
|
||||
MAX_TOKENS = output_length # 修改 MAX_TOKENS 为 output_length
|
||||
|
||||
if "Qwen" not in MODEL and "chatglm" not in MODEL:
|
||||
# if "Qwen" not in MODEL and "chatglm" not in MODEL:
|
||||
# print("using Llama PROMPT")
|
||||
PROMPT = ENGLISH_PROMPT
|
||||
else:
|
||||
# PROMPT = ENGLISH_PROMPT
|
||||
# else:
|
||||
# print("using Qwen/chatglm PROMPT")
|
||||
PROMPT = CHINESE_PROMPT
|
||||
# PROMPT = CHINESE_PROMPT
|
||||
|
||||
PROMPT = ENGLISH_PROMPT
|
||||
|
||||
# 加载模型的 tokenizer
|
||||
from transformers import AutoTokenizer
|
||||
|
|
|
|||
Loading…
Reference in a new issue