Add more control arguments for benchmark_vllm_throughput (#11291)
This commit is contained in:
parent
592f7aa61e
commit
2e75bbccf9
1 changed files with 12 additions and 3 deletions
|
|
@ -77,6 +77,7 @@ def run_vllm(
|
||||||
gpu_memory_utilization: float = 0.9,
|
gpu_memory_utilization: float = 0.9,
|
||||||
load_in_low_bit: str = "sym_int4",
|
load_in_low_bit: str = "sym_int4",
|
||||||
max_num_batched_tokens: int = 5000,
|
max_num_batched_tokens: int = 5000,
|
||||||
|
max_num_seqs: int = 256,
|
||||||
) -> float:
|
) -> float:
|
||||||
from vllm import SamplingParams
|
from vllm import SamplingParams
|
||||||
from ipex_llm.vllm.xpu.engine import IPEXLLMClass as LLM
|
from ipex_llm.vllm.xpu.engine import IPEXLLMClass as LLM
|
||||||
|
|
@ -94,7 +95,8 @@ def run_vllm(
|
||||||
device=device,
|
device=device,
|
||||||
enable_prefix_caching=enable_prefix_caching,
|
enable_prefix_caching=enable_prefix_caching,
|
||||||
load_in_low_bit=load_in_low_bit,
|
load_in_low_bit=load_in_low_bit,
|
||||||
max_num_batched_tokens=max_num_batched_tokens,)
|
max_num_batched_tokens=max_num_batched_tokens,
|
||||||
|
max_num_seqs=max_num_seqs,)
|
||||||
|
|
||||||
|
|
||||||
# Add the requests to the engine.
|
# Add the requests to the engine.
|
||||||
|
|
@ -238,7 +240,8 @@ def main(args: argparse.Namespace):
|
||||||
args.tensor_parallel_size, args.seed, args.n, args.use_beam_search,
|
args.tensor_parallel_size, args.seed, args.n, args.use_beam_search,
|
||||||
args.trust_remote_code, args.dtype, args.max_model_len,
|
args.trust_remote_code, args.dtype, args.max_model_len,
|
||||||
args.enforce_eager, args.kv_cache_dtype, args.device,
|
args.enforce_eager, args.kv_cache_dtype, args.device,
|
||||||
args.enable_prefix_caching, args.gpu_memory_utilization, args.load_in_low_bit, args.max_num_batched_tokens)
|
args.enable_prefix_caching, args.gpu_memory_utilization, args.load_in_low_bit,
|
||||||
|
args.max_num_batched_tokens,args.max_num_seqs)
|
||||||
elif args.backend == "hf":
|
elif args.backend == "hf":
|
||||||
assert args.tensor_parallel_size == 1
|
assert args.tensor_parallel_size == 1
|
||||||
elapsed_time = run_hf(requests, args.model, tokenizer, args.n,
|
elapsed_time = run_hf(requests, args.model, tokenizer, args.n,
|
||||||
|
|
@ -348,9 +351,15 @@ if __name__ == "__main__":
|
||||||
|
|
||||||
parser.add_argument('--max-num-batched-tokens',
|
parser.add_argument('--max-num-batched-tokens',
|
||||||
type=int,
|
type=int,
|
||||||
default=5000,
|
default=4096,
|
||||||
help='maximum number of batched tokens per iteration')
|
help='maximum number of batched tokens per iteration')
|
||||||
|
|
||||||
|
parser.add_argument('--max-num-seqs',
|
||||||
|
type=int,
|
||||||
|
default=256,
|
||||||
|
help='Maximum number of sequences per iteration.')
|
||||||
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
if args.tokenizer is None:
|
if args.tokenizer is None:
|
||||||
args.tokenizer = args.model
|
args.tokenizer = args.model
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue