Refine vllm_quickstart doc (#11199)

* refine doc

* refine
This commit is contained in:
Guancheng Fu 2024-06-04 18:46:27 +08:00 committed by GitHub
parent 744042d1b2
commit 3ef4aa98d1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 36 additions and 3 deletions

View file

@ -76,9 +76,13 @@ def run_vllm(
enable_prefix_caching: bool,
gpu_memory_utilization: float = 0.9,
load_in_low_bit: str = "sym_int4",
max_num_batched_tokens: int = 10450,
) -> float:
from vllm import SamplingParams
from ipex_llm.vllm.cpu.engine import IPEXLLMClass as LLM
warm_prompt = "hi " * (1024 - 1)
warm_requests = [(warm_prompt, 1024, 1024)
for _ in range(8)]
llm = LLM(model=model,
tokenizer=tokenizer,
quantization=quantization,
@ -94,6 +98,22 @@ def run_vllm(
enable_prefix_caching=enable_prefix_caching,
load_in_low_bit=load_in_low_bit)
for prompt, _, output_len in warm_requests:
sampling_params = SamplingParams(
n=n,
temperature=0.0 if use_beam_search else 1.0,
top_p=1.0,
use_beam_search=use_beam_search,
ignore_eos=True,
max_tokens=output_len,
)
llm._add_request(
prompt=prompt,
prompt_token_ids=None,
sampling_params=sampling_params,
)
llm._run_engine(use_tqdm=True)
# Add the requests to the engine.
for prompt, _, output_len in requests:
sampling_params = SamplingParams(
@ -216,7 +236,9 @@ def main(args: argparse.Namespace):
args.tensor_parallel_size, args.seed, args.n, args.use_beam_search,
args.trust_remote_code, args.dtype, args.max_model_len,
args.enforce_eager, args.kv_cache_dtype, args.device,
args.enable_prefix_caching, args.gpu_memory_utilization, args.load_in_low_bit)
args.enable_prefix_caching, args.gpu_memory_utilization, args.load_in_low_bit,
args.max_num_batched_tokens)
elif args.backend == "hf":
assert args.tensor_parallel_size == 1
elapsed_time = run_hf(requests, args.model, tokenizer, args.n,
@ -320,9 +342,14 @@ if __name__ == "__main__":
parser.add_argument(
"--load-in-low-bit",
type=str,
choices=["sym_int4", "fp8", "fp16"],
choices=["sym_int4", "fp6", "fp8", "fp16"],
default="sym_int4",
help="Low-bit format quantization with IPEX-LLM")
parser.add_argument('--max-num-batched-tokens',
type=int,
default=10450,
help='maximum number of batched tokens per iteration')
args = parser.parse_args()
if args.tokenizer is None:
args.tokenizer = args.model

View file

@ -12,7 +12,7 @@ RUN cd /llm &&\
# Install ipex-llm[serving] only will update ipex_llm source code without updating
# bigdl-core-xe, which will lead to problems
apt-get update && \
apt-get install -y libfabric-dev wrk && \
apt-get install -y libfabric-dev wrk libaio-dev && \
pip install --pre --upgrade ipex-llm[xpu,serving] && \
pip install transformers==4.37.0 gradio==4.19.2 && \
# Install vLLM-v2 dependencies

View file

@ -134,6 +134,12 @@ You can tune the service using these four arguments:
3. `--max-num-batched-token`: Maximum number of batched tokens per iteration.
4. `--max-num-seq`: Maximum number of sequences per iteration. Default: 256
For longer input prompt, we would suggest to use `--max-num-batched-token` to restrict the service. The reason behind this logic is that the `peak GPU memory usage` will appear when generating first token. By using `--max-num-batched-token`, we can restrict the input size when generating first token.
`--max-num-seqs` will restrict the generation for both first token and rest token. It will restrict the maximum batch size to the value set by `--max-num-seqs`.
When out-of-memory error occurs, the most obvious solution is to reduce the `gpu-memory-utilization`. Other ways to resolve this error is to set `--max-num-batched-token` if peak memory occurs when generating first token or using `--max-num-seq` if peak memory occurs when generating rest tokens.
If the service have been booted successfully, the console will display messages similar to the following:
<a href="https://llm-assets.readthedocs.io/en/latest/_images/start-vllm-service.png" target="_blank">