add tools into previously built images (#9317)
* modify Dockerfile * manually build * modify Dockerfile * add chat.py into inference-xpu * add benchmark into inference-cpu * manually build * add benchmark into inference-cpu * add benchmark into inference-cpu * add benchmark into inference-cpu * add chat.py into inference-xpu * add chat.py into inference-xpu * change ADD to COPY in dockerfile * fix dependency issue * temporarily remove run-spr in llm-cpu * temporarily remove run-spr in llm-cpu
This commit is contained in:
parent
d383ee8efb
commit
2c2bc959ad
4 changed files with 110 additions and 5 deletions
|
|
@ -4,6 +4,9 @@ ARG http_proxy
|
||||||
ARG https_proxy
|
ARG https_proxy
|
||||||
|
|
||||||
ENV TZ=Asia/Shanghai
|
ENV TZ=Asia/Shanghai
|
||||||
|
ENV PYTHONUNBUFFERED=1
|
||||||
|
|
||||||
|
COPY chat.py /llm/chat.py
|
||||||
|
|
||||||
# Disable pip's cache behavior
|
# Disable pip's cache behavior
|
||||||
ARG PIP_NO_CACHE_DIR=false
|
ARG PIP_NO_CACHE_DIR=false
|
||||||
|
|
@ -33,4 +36,6 @@ RUN curl -fsSL https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-P
|
||||||
pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu && \
|
pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu && \
|
||||||
# Install opencl-related repos
|
# Install opencl-related repos
|
||||||
apt-get update && \
|
apt-get update && \
|
||||||
apt-get install -y intel-opencl-icd intel-level-zero-gpu level-zero level-zero-dev
|
apt-get install -y intel-opencl-icd intel-level-zero-gpu level-zero level-zero-dev && \
|
||||||
|
# Install related libary of chat.py
|
||||||
|
pip install --upgrade colorama
|
||||||
|
|
|
||||||
102
docker/llm/inference/xpu/docker/chat.py
Normal file
102
docker/llm/inference/xpu/docker/chat.py
Normal file
|
|
@ -0,0 +1,102 @@
|
||||||
|
#
|
||||||
|
# Copyright 2016 The BigDL Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import intel_extension_for_pytorch as ipex
|
||||||
|
import torch
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
# todo: support more model class
|
||||||
|
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer, AutoConfig
|
||||||
|
from transformers import TextIteratorStreamer
|
||||||
|
from transformers.tools.agents import StopSequenceCriteria
|
||||||
|
from transformers.generation.stopping_criteria import StoppingCriteriaList
|
||||||
|
from colorama import Fore
|
||||||
|
from bigdl.llm import optimize_model
|
||||||
|
SYSTEM_PROMPT = "A chat between a curious human <human> and an artificial intelligence assistant <bot>.\
|
||||||
|
The assistant gives helpful, detailed, and polite answers to the human's questions."
|
||||||
|
HUMAN_ID = "<human>"
|
||||||
|
BOT_ID = "<bot>"
|
||||||
|
# chat_history formated in [(iput_str, output_str)]
|
||||||
|
def format_prompt(input_str,
|
||||||
|
chat_history):
|
||||||
|
prompt = [f"{SYSTEM_PROMPT}\n"]
|
||||||
|
for history_input_str, history_output_str in chat_history:
|
||||||
|
prompt.append(f"{HUMAN_ID} {history_input_str}\n{BOT_ID} {history_output_str}\n")
|
||||||
|
prompt.append(f"{HUMAN_ID} {input_str}\n{BOT_ID} ")
|
||||||
|
return "".join(prompt)
|
||||||
|
def stream_chat(model,
|
||||||
|
tokenizer,
|
||||||
|
stopping_criteria,
|
||||||
|
input_str,
|
||||||
|
chat_history):
|
||||||
|
prompt = format_prompt(input_str, chat_history)
|
||||||
|
# print(prompt)
|
||||||
|
input_ids = tokenizer([prompt], return_tensors="pt").to('xpu')
|
||||||
|
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
||||||
|
generate_kwargs = dict(input_ids, streamer=streamer, max_new_tokens=512, stopping_criteria=stopping_criteria)
|
||||||
|
from threading import Thread
|
||||||
|
# to ensure non-blocking access to the generated text, generation process should be ran in a separate thread
|
||||||
|
thread = Thread(target=model.generate, kwargs=generate_kwargs)
|
||||||
|
thread.start()
|
||||||
|
output_str = []
|
||||||
|
print(Fore.BLUE+"BigDL-LLM: "+Fore.RESET, end="")
|
||||||
|
for partial_output_str in streamer:
|
||||||
|
output_str.append(partial_output_str)
|
||||||
|
# remove the last HUMAN_ID if exists
|
||||||
|
print(partial_output_str.replace(f"{HUMAN_ID}", ""), end="")
|
||||||
|
chat_history.append((input_str, "".join(output_str).replace(f"{HUMAN_ID}", "").rstrip()))
|
||||||
|
def auto_select_model(model_name):
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_path,
|
||||||
|
low_cpu_mem_usage=True,
|
||||||
|
torch_dtype="auto",
|
||||||
|
trust_remote_code=True,
|
||||||
|
use_cache=True)
|
||||||
|
except:
|
||||||
|
model = AutoModel.from_pretrained(model_path,
|
||||||
|
low_cpu_mem_usage=True,
|
||||||
|
torch_dtype="auto",
|
||||||
|
trust_remote_code=True,
|
||||||
|
use_cache=True)
|
||||||
|
except:
|
||||||
|
print("Sorry, the model you entered is not supported in installer.")
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("--model-path", type=str, help="path to an llm")
|
||||||
|
args = parser.parse_args()
|
||||||
|
model_path = args.model_path
|
||||||
|
|
||||||
|
model = auto_select_model(model_path)
|
||||||
|
model = optimize_model(model)
|
||||||
|
model = model.to('xpu')
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||||
|
stopping_criteria = StoppingCriteriaList([StopSequenceCriteria(HUMAN_ID, tokenizer)])
|
||||||
|
chat_history = []
|
||||||
|
while True:
|
||||||
|
with torch.inference_mode():
|
||||||
|
user_input = input(Fore.GREEN+"\nHuman: "+Fore.RESET)
|
||||||
|
if user_input == "stop": # let's stop the conversation when user input "stop"
|
||||||
|
break
|
||||||
|
stream_chat(model=model,
|
||||||
|
tokenizer=tokenizer,
|
||||||
|
stopping_criteria=stopping_criteria,
|
||||||
|
input_str=user_input,
|
||||||
|
chat_history=chat_history)
|
||||||
|
|
||||||
|
|
@ -10,8 +10,7 @@ ARG PIP_NO_CACHE_DIR=false
|
||||||
COPY ./entrypoint.sh /opt/entrypoint.sh
|
COPY ./entrypoint.sh /opt/entrypoint.sh
|
||||||
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /sbin/tini
|
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /sbin/tini
|
||||||
# Install Serving Dependencies
|
# Install Serving Dependencies
|
||||||
RUN mkdir /llm && \
|
RUN cd /llm && \
|
||||||
cd /llm && \
|
|
||||||
git clone https://github.com/analytics-zoo/FastChat.git && \
|
git clone https://github.com/analytics-zoo/FastChat.git && \
|
||||||
cd FastChat && \
|
cd FastChat && \
|
||||||
git checkout dev-2023-09-22 && \
|
git checkout dev-2023-09-22 && \
|
||||||
|
|
|
||||||
|
|
@ -7,8 +7,7 @@ ARG https_proxy
|
||||||
ARG PIP_NO_CACHE_DIR=false
|
ARG PIP_NO_CACHE_DIR=false
|
||||||
|
|
||||||
# Install Serving Dependencies
|
# Install Serving Dependencies
|
||||||
RUN mkdir /llm && \
|
RUN cd /llm && \
|
||||||
cd /llm && \
|
|
||||||
git clone https://github.com/analytics-zoo/FastChat.git && \
|
git clone https://github.com/analytics-zoo/FastChat.git && \
|
||||||
cd FastChat && \
|
cd FastChat && \
|
||||||
git checkout dev-2023-09-22 && \
|
git checkout dev-2023-09-22 && \
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue