From c9b4cadd81f59470a3001bedf891d9463fde2a72 Mon Sep 17 00:00:00 2001 From: Guancheng Fu <110874468+gc-fu@users.noreply.github.com> Date: Tue, 18 Jun 2024 16:23:53 +0800 Subject: [PATCH] fix vLLM/docker issues (#11348) * fix * fix * ffix --- docker/llm/serving/xpu/docker/Dockerfile | 3 ++- docker/llm/serving/xpu/docker/benchmark_vllm_throughput.py | 2 +- .../source/doc/LLM/Quickstart/vLLM_quickstart.md | 7 ++++--- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/docker/llm/serving/xpu/docker/Dockerfile b/docker/llm/serving/xpu/docker/Dockerfile index 7db0bb6a..f1ecf089 100644 --- a/docker/llm/serving/xpu/docker/Dockerfile +++ b/docker/llm/serving/xpu/docker/Dockerfile @@ -21,7 +21,8 @@ RUN apt-get update && \ pip install outlines==0.0.34 --no-deps && \ pip install interegular cloudpickle diskcache joblib lark nest-asyncio numba scipy && \ # For Qwen series models support - pip install transformers_stream_generator einops tiktoken + pip install transformers_stream_generator einops tiktoken && \ + pip uninstall -y deepspeed COPY ./vllm_offline_inference.py /llm/ COPY ./payload-1024.lua /llm/ diff --git a/docker/llm/serving/xpu/docker/benchmark_vllm_throughput.py b/docker/llm/serving/xpu/docker/benchmark_vllm_throughput.py index a1102f8a..94e04584 100644 --- a/docker/llm/serving/xpu/docker/benchmark_vllm_throughput.py +++ b/docker/llm/serving/xpu/docker/benchmark_vllm_throughput.py @@ -345,7 +345,7 @@ if __name__ == "__main__": parser.add_argument( "--load-in-low-bit", type=str, - choices=["sym_int4", "fp8", "fp16"], + choices=["sym_int4", "fp8", "fp8_e4m3", "fp16", "fp6"], default="sym_int4", help="Low-bit format quantization with IPEX-LLM") diff --git a/docs/readthedocs/source/doc/LLM/Quickstart/vLLM_quickstart.md b/docs/readthedocs/source/doc/LLM/Quickstart/vLLM_quickstart.md index 7c5b5cbc..71e34834 100644 --- a/docs/readthedocs/source/doc/LLM/Quickstart/vLLM_quickstart.md +++ b/docs/readthedocs/source/doc/LLM/Quickstart/vLLM_quickstart.md @@ -58,7 +58,7 @@ To run offline inference using vLLM for a quick impression, use the following ex .. note:: Please modify the MODEL_PATH in offline_inference.py to use your chosen model. - You can try modify load_in_low_bit to different values in **[sym_int4, fp8, fp16]** to use different quantization dtype. + You can try modify load_in_low_bit to different values in **[sym_int4, fp6, fp8, fp8_e4m3, fp16]** to use different quantization dtype. ``` ```bash @@ -111,6 +111,7 @@ served_model_name="YOUR_MODEL_NAME" # --max-model-len, --max-num-batched-tokens, --max-num-seqs # to acquire the best performance + # Change value --load-in-low-bit to [fp6, fp8, fp8_e4m3, fp16] to use different low-bit formats python -m ipex_llm.vllm.xpu.entrypoints.openai.api_server \ --served-model-name $served_model_name \ --port 8000 \ @@ -245,7 +246,7 @@ wget https://raw.githubusercontent.com/intel-analytics/ipex-llm/main/docker/llm/ export MODEL="YOUR_MODEL" -# You can change load-in-low-bit from values in [sym_int4, fp8, fp16] +# You can change load-in-low-bit from values in [sym_int4, fp6, fp8, fp8_e4m3, fp16] python3 ./benchmark_throughput.py \ --backend vllm \ @@ -272,4 +273,4 @@ The following figure shows the result of benchmarking `Llama-2-7b-chat-hf` using .. tip:: To find the best config that fits your workload, you may need to start the service and use tools like `wrk` or `jmeter` to perform a stress tests. -``` \ No newline at end of file +```