From b30bf7648e2afae75bacc96f7eef097321a89232 Mon Sep 17 00:00:00 2001 From: Xiangyu Tian <109123695+xiangyuT@users.noreply.github.com> Date: Fri, 21 Jun 2024 13:00:06 +0800 Subject: [PATCH] Fix vLLM CPU api_server params (#11384) --- .../src/ipex_llm/vllm/cpu/entrypoints/openai/api_server.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/python/llm/src/ipex_llm/vllm/cpu/entrypoints/openai/api_server.py b/python/llm/src/ipex_llm/vllm/cpu/entrypoints/openai/api_server.py index 31991d50..5065f1c1 100644 --- a/python/llm/src/ipex_llm/vllm/cpu/entrypoints/openai/api_server.py +++ b/python/llm/src/ipex_llm/vllm/cpu/entrypoints/openai/api_server.py @@ -175,7 +175,9 @@ if __name__ == "__main__": served_model_names = [args.model] engine_args = AsyncEngineArgs.from_cli_args(args) engine = IPEXLLMAsyncLLMEngine.from_engine_args( - engine_args, usage_context=UsageContext.OPENAI_API_SERVER) + engine_args, usage_context=UsageContext.OPENAI_API_SERVER, + load_in_low_bit=args.load_in_low_bit, + ) openai_serving_chat = OpenAIServingChat(engine, served_model_names, args.response_role, args.lora_modules,