506 lines
16 KiB
Python
506 lines
16 KiB
Python
#
|
|
# Copyright 2016 The BigDL Authors.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
|
|
"""
|
|
A model worker that executes the model.
|
|
Adapted from FastChat's model_worker.py
|
|
"""
|
|
import argparse
|
|
import asyncio
|
|
import dataclasses
|
|
import logging
|
|
import json
|
|
import os
|
|
import time
|
|
from typing import List, Optional
|
|
import threading
|
|
import uuid
|
|
from bigdl.llm.utils.common import invalidInputError
|
|
|
|
from fastapi import FastAPI, Request, BackgroundTasks
|
|
from fastapi.responses import StreamingResponse, JSONResponse
|
|
import requests
|
|
|
|
from .bigdl_llm_model import patch_fastchat
|
|
|
|
try:
|
|
from transformers import (
|
|
AutoTokenizer,
|
|
AutoModelForCausalLM,
|
|
LlamaTokenizer,
|
|
AutoModel,
|
|
)
|
|
except ImportError:
|
|
from transformers import (
|
|
AutoTokenizer,
|
|
AutoModelForCausalLM,
|
|
LLaMATokenizer,
|
|
AutoModel,
|
|
)
|
|
import torch
|
|
import torch.nn.functional as F
|
|
import uvicorn
|
|
|
|
from fastchat.constants import WORKER_HEART_BEAT_INTERVAL, ErrorCode, SERVER_ERROR_MSG
|
|
from fastchat.conversation import get_conv_template
|
|
from fastchat.model.model_adapter import (
|
|
add_model_args,
|
|
get_conversation_template,
|
|
get_generate_stream_function,
|
|
)
|
|
from fastchat.modules.gptq import GptqConfig
|
|
from fastchat.modules.awq import AWQConfig
|
|
from fastchat.utils import build_logger, pretty_print_semaphore, get_context_length
|
|
|
|
|
|
worker_id = str(uuid.uuid4())[:8]
|
|
logger = build_logger("model_worker", f"model_worker_{worker_id}.log")
|
|
|
|
app = FastAPI()
|
|
|
|
|
|
def heart_beat_worker(obj):
|
|
while True:
|
|
time.sleep(WORKER_HEART_BEAT_INTERVAL)
|
|
obj.send_heart_beat()
|
|
|
|
|
|
class BaseModelWorker:
|
|
def __init__(
|
|
self,
|
|
controller_addr: str,
|
|
worker_addr: str,
|
|
worker_id: str,
|
|
model_path: str,
|
|
model_names: List[str],
|
|
limit_worker_concurrency: int,
|
|
conv_template: str = None,
|
|
):
|
|
self.controller_addr = controller_addr
|
|
self.worker_addr = worker_addr
|
|
self.worker_id = worker_id
|
|
if model_path.endswith("/"):
|
|
model_path = model_path[:-1]
|
|
self.model_names = model_names or [model_path.split("/")[-1]]
|
|
self.limit_worker_concurrency = limit_worker_concurrency
|
|
if conv_template:
|
|
self.conv = get_conv_template(conv_template)
|
|
else:
|
|
self.conv = get_conversation_template(model_path)
|
|
self.conv.sep_style = int(self.conv.sep_style)
|
|
self.tokenizer = None
|
|
self.context_len = None
|
|
self.call_ct = 0
|
|
self.semaphore = None
|
|
|
|
self.heart_beat_thread = None
|
|
|
|
def init_heart_beat(self):
|
|
self.register_to_controller()
|
|
self.heart_beat_thread = threading.Thread(
|
|
target=heart_beat_worker, args=(self,)
|
|
)
|
|
self.heart_beat_thread.start()
|
|
|
|
def register_to_controller(self):
|
|
logger.info("Register to controller")
|
|
|
|
url = self.controller_addr + "/register_worker"
|
|
data = {
|
|
"worker_name": self.worker_addr,
|
|
"check_heart_beat": True,
|
|
"worker_status": self.get_status(),
|
|
}
|
|
r = requests.post(url, json=data)
|
|
invalidInputError(r.status_code == 200, "Error register to Controller")
|
|
|
|
def send_heart_beat(self):
|
|
logger.info(
|
|
f"Send heart beat. Models: {self.model_names}. "
|
|
f"Semaphore: {pretty_print_semaphore(self.semaphore)}. "
|
|
f"call_ct: {self.call_ct}. "
|
|
f"worker_id: {self.worker_id}. "
|
|
)
|
|
|
|
url = self.controller_addr + "/receive_heart_beat"
|
|
|
|
while True:
|
|
try:
|
|
ret = requests.post(
|
|
url,
|
|
json={
|
|
"worker_name": self.worker_addr,
|
|
"queue_length": self.get_queue_length(),
|
|
},
|
|
timeout=5,
|
|
)
|
|
exist = ret.json()["exist"]
|
|
break
|
|
except (requests.exceptions.RequestException, KeyError) as e:
|
|
logger.error(f"heart beat error: {e}")
|
|
time.sleep(5)
|
|
|
|
if not exist:
|
|
self.register_to_controller()
|
|
|
|
def get_queue_length(self):
|
|
if (
|
|
self.semaphore is None
|
|
or self.semaphore._value is None
|
|
or self.semaphore._waiters is None
|
|
):
|
|
return 0
|
|
else:
|
|
return (
|
|
self.limit_worker_concurrency
|
|
- self.semaphore._value
|
|
+ len(self.semaphore._waiters)
|
|
)
|
|
|
|
def get_status(self):
|
|
return {
|
|
"model_names": self.model_names,
|
|
"speed": 1,
|
|
"queue_length": self.get_queue_length(),
|
|
}
|
|
|
|
def count_token(self, params):
|
|
prompt = params["prompt"]
|
|
input_ids = self.tokenizer(prompt).input_ids
|
|
input_echo_len = len(input_ids)
|
|
|
|
ret = {
|
|
"count": input_echo_len,
|
|
"error_code": 0,
|
|
}
|
|
return ret
|
|
|
|
def get_conv_template(self):
|
|
return {"conv": self.conv}
|
|
|
|
|
|
class ModelWorker(BaseModelWorker):
|
|
def __init__(
|
|
self,
|
|
controller_addr: str,
|
|
worker_addr: str,
|
|
worker_id: str,
|
|
model_path: str,
|
|
model_names: List[str],
|
|
limit_worker_concurrency: int,
|
|
no_register: bool,
|
|
device: str,
|
|
num_gpus: int,
|
|
max_gpu_memory: str,
|
|
load_8bit: bool = False,
|
|
cpu_offloading: bool = False,
|
|
gptq_config: Optional[GptqConfig] = None,
|
|
awq_config: Optional[AWQConfig] = None,
|
|
stream_interval: int = 2,
|
|
conv_template: str = None,
|
|
):
|
|
super().__init__(
|
|
controller_addr,
|
|
worker_addr,
|
|
worker_id,
|
|
model_path,
|
|
model_names,
|
|
limit_worker_concurrency,
|
|
conv_template=conv_template,
|
|
)
|
|
|
|
logger.info(f"Loading the model {self.model_names} on worker {worker_id} ...")
|
|
from fastchat.model.model_adapter import load_model
|
|
self.model, self.tokenizer = load_model(
|
|
model_path,
|
|
device=device,
|
|
num_gpus=num_gpus,
|
|
max_gpu_memory=max_gpu_memory,
|
|
load_8bit=load_8bit,
|
|
cpu_offloading=cpu_offloading,
|
|
gptq_config=gptq_config,
|
|
awq_config=awq_config,
|
|
)
|
|
self.device = device
|
|
if self.tokenizer.pad_token is None:
|
|
self.tokenizer.pad_token = self.tokenizer.eos_token
|
|
self.context_len = get_context_length(self.model.config)
|
|
self.generate_stream_func = get_generate_stream_function(self.model, model_path)
|
|
self.stream_interval = stream_interval
|
|
|
|
if not no_register:
|
|
self.init_heart_beat()
|
|
|
|
def generate_stream_gate(self, params):
|
|
self.call_ct += 1
|
|
|
|
try:
|
|
for output in self.generate_stream_func(
|
|
self.model,
|
|
self.tokenizer,
|
|
params,
|
|
self.device,
|
|
self.context_len,
|
|
self.stream_interval,
|
|
):
|
|
if self.device == "xpu":
|
|
torch.xpu.empty_cache()
|
|
ret = {
|
|
"text": output["text"],
|
|
"error_code": 0,
|
|
}
|
|
if "usage" in output:
|
|
ret["usage"] = output["usage"]
|
|
if "finish_reason" in output:
|
|
ret["finish_reason"] = output["finish_reason"]
|
|
if "logprobs" in output:
|
|
ret["logprobs"] = output["logprobs"]
|
|
yield json.dumps(ret).encode() + b"\0"
|
|
except torch.cuda.OutOfMemoryError as e:
|
|
ret = {
|
|
"text": f"{SERVER_ERROR_MSG}\n\n({e})",
|
|
"error_code": ErrorCode.CUDA_OUT_OF_MEMORY,
|
|
}
|
|
yield json.dumps(ret).encode() + b"\0"
|
|
except (ValueError, RuntimeError) as e:
|
|
ret = {
|
|
"text": f"{SERVER_ERROR_MSG}\n\n({e})",
|
|
"error_code": ErrorCode.INTERNAL_ERROR,
|
|
}
|
|
yield json.dumps(ret).encode() + b"\0"
|
|
|
|
def generate_gate(self, params):
|
|
for x in self.generate_stream_gate(params):
|
|
pass
|
|
return json.loads(x[:-1].decode())
|
|
|
|
@torch.inference_mode()
|
|
def get_embeddings(self, params):
|
|
self.call_ct += 1
|
|
|
|
try:
|
|
tokenizer = self.tokenizer
|
|
is_llama = "llama" in str(
|
|
type(self.model)
|
|
) # llama supports batch inference
|
|
is_chatglm = "chatglm" in str(type(self.model))
|
|
is_t5 = "t5" in str(type(self.model))
|
|
is_bert = "bert" in str(type(self.model))
|
|
|
|
if is_llama:
|
|
encoding = tokenizer.batch_encode_plus(
|
|
params["input"], padding=True, return_tensors="pt"
|
|
)
|
|
input_ids = encoding["input_ids"].to(self.device)
|
|
attention_mask = encoding["attention_mask"].to(self.device)
|
|
model_output = self.model(
|
|
input_ids, attention_mask, output_hidden_states=True
|
|
)
|
|
data = model_output.hidden_states[-1]
|
|
mask = attention_mask.unsqueeze(-1).expand(data.size()).float()
|
|
masked_embeddings = data * mask
|
|
sum_embeddings = torch.sum(masked_embeddings, dim=1)
|
|
seq_length = torch.sum(mask, dim=1)
|
|
embedding = sum_embeddings / seq_length
|
|
normalized_embeddings = F.normalize(embedding, p=2, dim=1)
|
|
ret = {
|
|
"embedding": normalized_embeddings.tolist(),
|
|
"token_num": torch.sum(attention_mask).item(),
|
|
}
|
|
elif is_bert:
|
|
embedding = []
|
|
token_num = 0
|
|
for text in params["input"]:
|
|
input_ids = tokenizer.encode(text, return_tensors="pt").to(
|
|
self.device
|
|
)
|
|
model_output = self.model(input_ids)
|
|
data = model_output[0][:, 0]
|
|
data = F.normalize(torch.mean(data, dim=0), p=2, dim=0)
|
|
embedding.append(data.tolist())
|
|
token_num += len(input_ids[0])
|
|
ret = {
|
|
"embedding": embedding,
|
|
"token_num": token_num,
|
|
}
|
|
else:
|
|
embedding = []
|
|
token_num = 0
|
|
for text in params["input"]:
|
|
input_ids = tokenizer.encode(text, return_tensors="pt").to(
|
|
self.device
|
|
)
|
|
if is_t5:
|
|
model_output = self.model(
|
|
input_ids, decoder_input_ids=input_ids
|
|
)
|
|
else:
|
|
model_output = self.model(input_ids, output_hidden_states=True)
|
|
if is_chatglm:
|
|
data = (model_output.hidden_states[-1].transpose(0, 1))[0]
|
|
elif is_t5:
|
|
data = model_output.encoder_last_hidden_state[0]
|
|
else:
|
|
data = model_output.hidden_states[-1][0]
|
|
data = F.normalize(torch.mean(data, dim=0), p=2, dim=0)
|
|
embedding.append(data.tolist())
|
|
token_num += len(input_ids[0])
|
|
ret = {
|
|
"embedding": embedding,
|
|
"token_num": token_num,
|
|
}
|
|
except torch.cuda.OutOfMemoryError as e:
|
|
ret = {
|
|
"text": f"{SERVER_ERROR_MSG}\n\n({e})",
|
|
"error_code": ErrorCode.CUDA_OUT_OF_MEMORY,
|
|
}
|
|
except (ValueError, RuntimeError) as e:
|
|
ret = {
|
|
"text": f"{SERVER_ERROR_MSG}\n\n({e})",
|
|
"error_code": ErrorCode.INTERNAL_ERROR,
|
|
}
|
|
return ret
|
|
|
|
|
|
def release_worker_semaphore():
|
|
worker.semaphore.release()
|
|
|
|
|
|
def acquire_worker_semaphore():
|
|
if worker.semaphore is None:
|
|
worker.semaphore = asyncio.Semaphore(worker.limit_worker_concurrency)
|
|
return worker.semaphore.acquire()
|
|
|
|
|
|
def create_background_tasks():
|
|
background_tasks = BackgroundTasks()
|
|
background_tasks.add_task(release_worker_semaphore)
|
|
return background_tasks
|
|
|
|
|
|
@app.post("/worker_generate_stream")
|
|
async def api_generate_stream(request: Request):
|
|
params = await request.json()
|
|
await acquire_worker_semaphore()
|
|
generator = worker.generate_stream_gate(params)
|
|
background_tasks = create_background_tasks()
|
|
return StreamingResponse(generator, background=background_tasks)
|
|
|
|
|
|
@app.post("/worker_generate")
|
|
async def api_generate(request: Request):
|
|
params = await request.json()
|
|
await acquire_worker_semaphore()
|
|
output = worker.generate_gate(params)
|
|
release_worker_semaphore()
|
|
return JSONResponse(output)
|
|
|
|
|
|
@app.post("/worker_get_embeddings")
|
|
async def api_get_embeddings(request: Request):
|
|
params = await request.json()
|
|
await acquire_worker_semaphore()
|
|
embedding = worker.get_embeddings(params)
|
|
release_worker_semaphore()
|
|
return JSONResponse(content=embedding)
|
|
|
|
|
|
@app.post("/worker_get_status")
|
|
async def api_get_status(request: Request):
|
|
return worker.get_status()
|
|
|
|
|
|
@app.post("/count_token")
|
|
async def api_count_token(request: Request):
|
|
params = await request.json()
|
|
return worker.count_token(params)
|
|
|
|
|
|
@app.post("/worker_get_conv_template")
|
|
async def api_get_conv(request: Request):
|
|
return worker.get_conv_template()
|
|
|
|
|
|
@app.post("/model_details")
|
|
async def api_model_details(request: Request):
|
|
return {"context_length": worker.context_len}
|
|
|
|
|
|
if __name__ == "__main__":
|
|
patch_fastchat()
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument("--host", type=str, default="localhost")
|
|
parser.add_argument("--port", type=int, default=21002)
|
|
parser.add_argument("--worker-address", type=str, default="http://localhost:21002")
|
|
parser.add_argument(
|
|
"--controller-address", type=str, default="http://localhost:21001"
|
|
)
|
|
add_model_args(parser)
|
|
parser.add_argument(
|
|
"--model-names",
|
|
type=lambda s: s.split(","),
|
|
help="Optional display comma separated names",
|
|
)
|
|
parser.add_argument(
|
|
"--conv-template", type=str, default=None, help="Conversation prompt template."
|
|
)
|
|
parser.add_argument(
|
|
"--limit-worker-concurrency",
|
|
type=int,
|
|
default=5,
|
|
help="Limit the model concurrency to prevent OOM.",
|
|
)
|
|
parser.add_argument("--stream-interval", type=int, default=2)
|
|
parser.add_argument("--no-register", action="store_true")
|
|
args = parser.parse_args()
|
|
logger.info(f"args: {args}")
|
|
|
|
if args.gpus:
|
|
invalidInputError(len(args.gpus.split(",")) > args.num_gpus, f"Larger --num-gpus "
|
|
"({args.num_gpus}) than --gpus {args.gpus}!")
|
|
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
|
|
|
|
gptq_config = GptqConfig(
|
|
ckpt=args.gptq_ckpt or args.model_path,
|
|
wbits=args.gptq_wbits,
|
|
groupsize=args.gptq_groupsize,
|
|
act_order=args.gptq_act_order,
|
|
)
|
|
awq_config = AWQConfig(
|
|
ckpt=args.awq_ckpt or args.model_path,
|
|
wbits=args.awq_wbits,
|
|
groupsize=args.awq_groupsize,
|
|
)
|
|
|
|
worker = ModelWorker(
|
|
args.controller_address,
|
|
args.worker_address,
|
|
worker_id,
|
|
args.model_path,
|
|
args.model_names,
|
|
args.limit_worker_concurrency,
|
|
no_register=args.no_register,
|
|
device=args.device,
|
|
num_gpus=args.num_gpus,
|
|
max_gpu_memory=args.max_gpu_memory,
|
|
load_8bit=args.load_8bit,
|
|
cpu_offloading=args.cpu_offloading,
|
|
gptq_config=gptq_config,
|
|
awq_config=awq_config,
|
|
stream_interval=args.stream_interval,
|
|
conv_template=args.conv_template,
|
|
)
|
|
uvicorn.run(app, host=args.host, port=args.port, log_level="info")
|