From 3ef4aa98d12a37c0305a065fa7393bec82fc32b4 Mon Sep 17 00:00:00 2001 From: Guancheng Fu <110874468+gc-fu@users.noreply.github.com> Date: Tue, 4 Jun 2024 18:46:27 +0800 Subject: [PATCH] Refine vllm_quickstart doc (#11199) * refine doc * refine --- .../cpu/docker/benchmark_vllm_throughput.py | 31 +++++++++++++++++-- docker/llm/serving/xpu/docker/Dockerfile | 2 +- .../doc/LLM/Quickstart/vLLM_quickstart.md | 6 ++++ 3 files changed, 36 insertions(+), 3 deletions(-) diff --git a/docker/llm/serving/cpu/docker/benchmark_vllm_throughput.py b/docker/llm/serving/cpu/docker/benchmark_vllm_throughput.py index 8f2b783a..0a80360f 100644 --- a/docker/llm/serving/cpu/docker/benchmark_vllm_throughput.py +++ b/docker/llm/serving/cpu/docker/benchmark_vllm_throughput.py @@ -76,9 +76,13 @@ def run_vllm( enable_prefix_caching: bool, gpu_memory_utilization: float = 0.9, load_in_low_bit: str = "sym_int4", + max_num_batched_tokens: int = 10450, ) -> float: from vllm import SamplingParams from ipex_llm.vllm.cpu.engine import IPEXLLMClass as LLM + warm_prompt = "hi " * (1024 - 1) + warm_requests = [(warm_prompt, 1024, 1024) + for _ in range(8)] llm = LLM(model=model, tokenizer=tokenizer, quantization=quantization, @@ -94,6 +98,22 @@ def run_vllm( enable_prefix_caching=enable_prefix_caching, load_in_low_bit=load_in_low_bit) + for prompt, _, output_len in warm_requests: + sampling_params = SamplingParams( + n=n, + temperature=0.0 if use_beam_search else 1.0, + top_p=1.0, + use_beam_search=use_beam_search, + ignore_eos=True, + max_tokens=output_len, + ) + llm._add_request( + prompt=prompt, + prompt_token_ids=None, + sampling_params=sampling_params, + ) + llm._run_engine(use_tqdm=True) + # Add the requests to the engine. for prompt, _, output_len in requests: sampling_params = SamplingParams( @@ -216,7 +236,9 @@ def main(args: argparse.Namespace): args.tensor_parallel_size, args.seed, args.n, args.use_beam_search, args.trust_remote_code, args.dtype, args.max_model_len, args.enforce_eager, args.kv_cache_dtype, args.device, - args.enable_prefix_caching, args.gpu_memory_utilization, args.load_in_low_bit) + args.enable_prefix_caching, args.gpu_memory_utilization, args.load_in_low_bit, + args.max_num_batched_tokens) + elif args.backend == "hf": assert args.tensor_parallel_size == 1 elapsed_time = run_hf(requests, args.model, tokenizer, args.n, @@ -320,9 +342,14 @@ if __name__ == "__main__": parser.add_argument( "--load-in-low-bit", type=str, - choices=["sym_int4", "fp8", "fp16"], + choices=["sym_int4", "fp6", "fp8", "fp16"], default="sym_int4", help="Low-bit format quantization with IPEX-LLM") + parser.add_argument('--max-num-batched-tokens', + type=int, + default=10450, + help='maximum number of batched tokens per iteration') + args = parser.parse_args() if args.tokenizer is None: args.tokenizer = args.model diff --git a/docker/llm/serving/xpu/docker/Dockerfile b/docker/llm/serving/xpu/docker/Dockerfile index 325239b0..a3511325 100644 --- a/docker/llm/serving/xpu/docker/Dockerfile +++ b/docker/llm/serving/xpu/docker/Dockerfile @@ -12,7 +12,7 @@ RUN cd /llm &&\ # Install ipex-llm[serving] only will update ipex_llm source code without updating # bigdl-core-xe, which will lead to problems apt-get update && \ - apt-get install -y libfabric-dev wrk && \ + apt-get install -y libfabric-dev wrk libaio-dev && \ pip install --pre --upgrade ipex-llm[xpu,serving] && \ pip install transformers==4.37.0 gradio==4.19.2 && \ # Install vLLM-v2 dependencies diff --git a/docs/readthedocs/source/doc/LLM/Quickstart/vLLM_quickstart.md b/docs/readthedocs/source/doc/LLM/Quickstart/vLLM_quickstart.md index 193579d8..7c5b5cbc 100644 --- a/docs/readthedocs/source/doc/LLM/Quickstart/vLLM_quickstart.md +++ b/docs/readthedocs/source/doc/LLM/Quickstart/vLLM_quickstart.md @@ -134,6 +134,12 @@ You can tune the service using these four arguments: 3. `--max-num-batched-token`: Maximum number of batched tokens per iteration. 4. `--max-num-seq`: Maximum number of sequences per iteration. Default: 256 +For longer input prompt, we would suggest to use `--max-num-batched-token` to restrict the service. The reason behind this logic is that the `peak GPU memory usage` will appear when generating first token. By using `--max-num-batched-token`, we can restrict the input size when generating first token. + +`--max-num-seqs` will restrict the generation for both first token and rest token. It will restrict the maximum batch size to the value set by `--max-num-seqs`. + +When out-of-memory error occurs, the most obvious solution is to reduce the `gpu-memory-utilization`. Other ways to resolve this error is to set `--max-num-batched-token` if peak memory occurs when generating first token or using `--max-num-seq` if peak memory occurs when generating rest tokens. + If the service have been booted successfully, the console will display messages similar to the following: