Fix concurrent issue in autoTP streming. (#11150)
* add benchmark test * update
This commit is contained in:
parent
7cc43aa67a
commit
751e1a4e29
6 changed files with 293 additions and 3 deletions
|
|
@ -159,4 +159,23 @@ Please change the test url accordingly.
|
|||
```bash
|
||||
# set t/c to the number of concurrencies to test full throughput.
|
||||
wrk -t1 -c1 -d5m -s ./wrk_script_1024.lua http://127.0.0.1:8000/generate/ --timeout 1m
|
||||
```
|
||||
```
|
||||
|
||||
## Using the `benchmark.py` Script
|
||||
|
||||
The `benchmark.py` script is designed to evaluate the performance of a streaming service by measuring response times and other relevant metrics. Below are the details on how to use the script effectively:
|
||||
|
||||
### Command Line Arguments
|
||||
### Command Line Arguments
|
||||
- `--prompt_length`: Specifies the length of the prompt used in the test. Acceptable values are `32`, `1024`, and `2048`.
|
||||
- `--max_concurrent_requests`: Defines the levels of concurrency for the requests. You can specify multiple values to test different levels of concurrency in one run.
|
||||
- `--max_new_tokens`: Sets the maximum number of new tokens that the model will generate per request. Default is `128`.
|
||||
|
||||
### Usage Example
|
||||
You can run the script with specific settings for prompt length, concurrent requests, and max new tokens by using the following command:
|
||||
|
||||
```bash
|
||||
python benchmark.py --prompt_length 1024 --max_concurrent_requests 1 2 3 --max_new_tokens 128
|
||||
```
|
||||
|
||||
This command sets the prompt length to 1024, tests concurrency levels of 1, 2, and 3, and configures the model to generate up to 128 new tokens per request. The results are saved in log files named according to the concurrency level (1.log, 2.log, 3.log).
|
||||
|
|
|
|||
268
python/llm/example/GPU/Deepspeed-AutoTP-FastAPI/benchmark.py
Normal file
268
python/llm/example/GPU/Deepspeed-AutoTP-FastAPI/benchmark.py
Normal file
|
|
@ -0,0 +1,268 @@
|
|||
#
|
||||
# Copyright 2016 The BigDL Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import requests
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import concurrent
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
import json
|
||||
import argparse
|
||||
from typing import List, Tuple
|
||||
|
||||
|
||||
# Execute single request
|
||||
def perform_request(session, url, payload, headers):
|
||||
start_time = time.perf_counter()
|
||||
with session.post(url, json=payload, headers=headers, stream=True) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
first_token_time = None
|
||||
last_token_time = 0
|
||||
first_token_inference_time = None
|
||||
next_token_inference_time = None
|
||||
next_token_time = []
|
||||
i = 0
|
||||
for line in response.iter_lines():
|
||||
|
||||
token_time = time.perf_counter() - start_time
|
||||
if line:
|
||||
data = line.decode("utf-8").strip()
|
||||
i = i + 1
|
||||
try:
|
||||
json_data = json.loads(data)
|
||||
if json_data["message"] is not None:
|
||||
if first_token_time is None:
|
||||
first_token_time = token_time
|
||||
else:
|
||||
next_token_time.append(token_time - last_token_time)
|
||||
last_token_time = token_time
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
end_time = time.perf_counter()
|
||||
return (
|
||||
first_token_time,
|
||||
np.mean(next_token_time),
|
||||
end_time - start_time,
|
||||
first_token_inference_time,
|
||||
next_token_inference_time,
|
||||
)
|
||||
|
||||
|
||||
def extend_list_to_length(lst, target_length):
|
||||
if target_length <= len(lst):
|
||||
return lst[:]
|
||||
times = target_length // len(lst)
|
||||
remainder = target_length % len(lst)
|
||||
extended_list = lst * times + lst[:remainder]
|
||||
|
||||
return extended_list
|
||||
|
||||
|
||||
def benchmark(
|
||||
llm_urls,
|
||||
prompt,
|
||||
num_requests,
|
||||
max_concurrent_requests,
|
||||
max_tokens,
|
||||
is_warmup=False,
|
||||
):
|
||||
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
first_token_latencies = []
|
||||
next_token_latencies = []
|
||||
total_responce_times = []
|
||||
first_token_inference_times = []
|
||||
next_token_inference_times = []
|
||||
cur_url_index = 0
|
||||
|
||||
with requests.Session() as session:
|
||||
with ThreadPoolExecutor(max_workers=max_concurrent_requests) as executor:
|
||||
llm_url = llm_urls[cur_url_index]
|
||||
cur_url_index = (cur_url_index + 1) % len(llm_urls)
|
||||
|
||||
cur_llm_urls = extend_list_to_length(llm_urls, max_concurrent_requests)
|
||||
cur_len = len(cur_llm_urls)
|
||||
|
||||
payload = {
|
||||
"prompt": prompt,
|
||||
"n_predict": max_tokens,
|
||||
}
|
||||
futures = [
|
||||
executor.submit(
|
||||
perform_request,
|
||||
session,
|
||||
cur_llm_urls[index % cur_len],
|
||||
payload,
|
||||
headers,
|
||||
)
|
||||
for index in range(num_requests)
|
||||
]
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
if is_warmup:
|
||||
phase = "Warm Up"
|
||||
else:
|
||||
phase = "Benchmarking"
|
||||
with tqdm(total=num_requests, desc=phase, unit="req", ncols=100) as pbar:
|
||||
for future in concurrent.futures.as_completed(futures):
|
||||
try:
|
||||
(
|
||||
first_token_latency,
|
||||
next_token_latency,
|
||||
total_responce_time,
|
||||
first_token_inference_time,
|
||||
next_token_inference_time,
|
||||
) = future.result()
|
||||
first_token_latencies.append(first_token_latency)
|
||||
next_token_latencies.append(next_token_latency)
|
||||
total_responce_times.append(total_responce_time)
|
||||
if first_token_inference_time:
|
||||
first_token_inference_times.append(
|
||||
first_token_inference_time
|
||||
)
|
||||
if next_token_inference_time:
|
||||
next_token_inference_times.append(next_token_inference_time)
|
||||
except Exception as e:
|
||||
print(f"Request failed: {e}")
|
||||
pbar.update(1)
|
||||
|
||||
if is_warmup:
|
||||
return
|
||||
total_time = time.perf_counter() - start_time
|
||||
log_file = f"{max_concurrent_requests}.log"
|
||||
|
||||
with open(log_file, "w") as file:
|
||||
print(
|
||||
f"Total time for {num_requests} requests with {max_concurrent_requests} concurrent requests: {total_time} seconds.",
|
||||
file=file,
|
||||
)
|
||||
print(
|
||||
f"Average response time: {np.mean(total_responce_times)}", file=file
|
||||
)
|
||||
|
||||
print(
|
||||
f"Token throughput: {num_requests * max_tokens / total_time}",
|
||||
file=file,
|
||||
)
|
||||
print(
|
||||
f"Total token throughput: {(128 + 1024) * num_requests / total_time}",
|
||||
file=file,
|
||||
)
|
||||
print(file=file)
|
||||
|
||||
if first_token_latencies:
|
||||
average_first_token_latency = sum(first_token_latencies) / len(
|
||||
first_token_latencies
|
||||
)
|
||||
p90_first_token_latency = np.percentile(first_token_latencies, 90)
|
||||
p95_first_token_latency = np.percentile(first_token_latencies, 95)
|
||||
average_first_token_inference_latency = np.mean(
|
||||
first_token_inference_times
|
||||
)
|
||||
print(
|
||||
f"Average first token latency: {average_first_token_latency * 1000} milliseconds.",
|
||||
file=file,
|
||||
)
|
||||
print(
|
||||
f"P90 first token latency: {p90_first_token_latency * 1000} milliseconds.",
|
||||
file=file,
|
||||
)
|
||||
print(
|
||||
f"P95 first token latency: {p95_first_token_latency * 1000} milliseconds.",
|
||||
file=file,
|
||||
)
|
||||
print(
|
||||
f"Average first token inference latency: {average_first_token_inference_latency * 1000} milliseconds.",
|
||||
file=file,
|
||||
)
|
||||
print(file=file)
|
||||
|
||||
if next_token_latencies:
|
||||
average_next_token_latency = sum(next_token_latencies) / len(
|
||||
next_token_latencies
|
||||
)
|
||||
p90_next_token_latency = np.percentile(next_token_latencies, 90)
|
||||
p95_next_token_latency = np.percentile(next_token_latencies, 95)
|
||||
average_next_token_inference_latency = np.mean(
|
||||
next_token_inference_times
|
||||
)
|
||||
print(
|
||||
f"Average next token latency: {average_next_token_latency * 1000} milliseconds.",
|
||||
file=file,
|
||||
)
|
||||
print(
|
||||
f"P90 next token latency: {p90_next_token_latency * 1000} milliseconds.",
|
||||
file=file,
|
||||
)
|
||||
print(
|
||||
f"P95 next token latency: {p95_next_token_latency * 1000} milliseconds.",
|
||||
file=file,
|
||||
)
|
||||
print(
|
||||
f"Average next token inference latency: {average_next_token_inference_latency * 1000} milliseconds.",
|
||||
file=file,
|
||||
)
|
||||
print(file=file)
|
||||
|
||||
|
||||
LLM_URLS = [f"http://localhost:{PORT}/generate_stream/" for PORT in [8000]]
|
||||
|
||||
parser = argparse.ArgumentParser(description="Set prompt length.")
|
||||
parser.add_argument(
|
||||
"--prompt_length",
|
||||
type=int,
|
||||
choices=[32, 1024, 2048],
|
||||
default=1024,
|
||||
help="Length of the prompt: 32, 1024, or 2048",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max_concurrent_requests",
|
||||
type=int,
|
||||
nargs="+",
|
||||
default=[1, 2, 4, 5, 6],
|
||||
help="List of maximum concurrent requests to test.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max_new_tokens",
|
||||
type=int,
|
||||
default=128,
|
||||
help="Maximum number of new tokens that the model will generate per request.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
PROMPT_LENGTH = args.prompt_length
|
||||
PROMPT = open(f"prompt/{PROMPT_LENGTH}.txt", "r").read()
|
||||
MAX_TOKENS = args.max_new_tokens
|
||||
|
||||
|
||||
for MAX_CONCURRENT_REQUESTS in args.max_concurrent_requests:
|
||||
NUM_WARMUP = 5 * MAX_CONCURRENT_REQUESTS
|
||||
NUM_REQUESTS = 10 * MAX_CONCURRENT_REQUESTS
|
||||
|
||||
# warm up
|
||||
benchmark(
|
||||
LLM_URLS,
|
||||
PROMPT,
|
||||
NUM_WARMUP,
|
||||
MAX_CONCURRENT_REQUESTS,
|
||||
MAX_TOKENS,
|
||||
is_warmup=True,
|
||||
)
|
||||
|
||||
benchmark(LLM_URLS, PROMPT, NUM_REQUESTS, MAX_CONCURRENT_REQUESTS, MAX_TOKENS)
|
||||
|
|
@ -0,0 +1 @@
|
|||
Once upon a time, there existed a little girl who liked to have adventures. She wanted to go to places and meet new people, and have fun. However, her parents were always telling her to stay close to home, to be careful, and to avoid any danger. But the little girl was stubborn, and she wanted to see what was on the other side of the mountain. So she sneaked out of the house one night, leaving a note for her parents, and set off on her journey. As she climbed the mountain, the little girl felt a sense of excitement and wonder. She had never been this far away from home before, and she couldnt wait to see what she would find on the other side. She climbed higher and higher, her lungs burning from the thin air, until she finally reached the top of the mountain. And there, she found a beautiful meadow filled with wildflowers and a sparkling stream. The little girl danced and played in the meadow, feeling free and alive. She knew she had to return home eventually, but for now, she was content to enjoy her adventure. As the sun began to set, the little girl reluctantly made her way back down the mountain, but she knew that she would never forget her adventure and the joy of discovering something new and exciting. And whenever she felt scared or unsure, she would remember the thrill of climbing the mountain and the beauty of the meadow on the other side, and she would know that she could face any challenge that came her way, with courage and determination. She carried the memories of her journey in her heart, a constant reminder of the strength she possessed. The little girl returned home to her worried parents, who had discovered her note and anxiously awaited her arrival. They scolded her for disobeying their instructions and venturing into the unknown. But as they looked into her sparkling eyes and saw the glow on her face, their anger softened. They realized that their little girl had grown, that she had experienced something extraordinary. The little girl shared her tales of the mountain and the meadow with her parents, painting vivid pictures with her words. She spoke of the breathtaking view from the mountaintop, where the world seemed to stretch endlessly before her. She described the delicate petals of the wildflowers, vibrant hues that danced in the gentle breeze. And she recounted the soothing melody of the sparkling stream, its waters reflecting the golden rays of the setting sun. Her parents listened intently, captivated by her story. They realized that their daughter had discovered a part of herself on that journey—a spirit of curiosity and a thirst for exploration. They saw that she had learned valuable lessons about independence, resilience, and the beauty that lies beyond ones comfort zone. From that day forward, the little girls parents encouraged her to pursue her dreams and embrace new experiences. They understood that while there were risks in the world, there were also rewards waiting to be discovered. They supported her as she continued to embark on adventures, always reminding her to stay safe but never stifling her spirit. As the years passed, the little girl grew into a remarkable woman, fearlessly exploring the world and making a difference wherever she went. The lessons she had learned on that fateful journey stayed with her, guiding her through challenges and inspiring her to live life to the fullest. And so, the once timid little girl became a symbol of courage and resilience, a reminder to all who knew her that the greatest joys in life often lie just beyond the mountains we fear to climb. Her story spread far and wide, inspiring others to embrace their own journeys and discover the wonders that awaited them. In the end, the little girls adventure became a timeless tale, passed down through generations, reminding us all that sometimes, the greatest rewards come to those who dare to step into the unknown and follow their hearts. With each passing day, the little girls story continued to inspire countless individuals, igniting a spark within their souls and encouraging them to embark on their own extraordinary adventures. The tale of her bravery and determination resonated deeply with people from all walks of life, reminding them of the limitless possibilities that awaited them beyond the boundaries of their comfort zones. People marveled at the little girls unwavering spirit and her unwavering belief in the power of dreams. They saw themselves reflected in her journey, finding solace in the knowledge that they too could overcome their fears and pursue their passions. The little girl's story became a beacon of hope, a testament to the human spirit
|
||||
File diff suppressed because one or more lines are too long
|
|
@ -0,0 +1 @@
|
|||
Once upon a time, there existed a little girl who liked to have adventures. She wanted to go to places and meet new people, and have fun
|
||||
|
|
@ -284,6 +284,8 @@ async def generate_stream(prompt_request: PromptRequest):
|
|||
|
||||
async def process_requests():
|
||||
while True:
|
||||
# Pause briefly to ensure stability in concurrency
|
||||
await asyncio.sleep(0.1)
|
||||
request_ids, prompt_requests = [], []
|
||||
cur_batched_tokens = 0
|
||||
|
||||
|
|
@ -343,8 +345,6 @@ async def process_requests():
|
|||
f"First token latency: {model.first_cost}, next token latency: {model.rest_cost_mean}, generate time: {generate_time}"
|
||||
)
|
||||
|
||||
await asyncio.sleep(0)
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
async def startup_event():
|
||||
|
|
|
|||
Loading…
Reference in a new issue