Fix vllm api_server v1/models error (#12867)

This commit is contained in:
Wang, Jian4 2025-02-21 11:08:29 +08:00 committed by GitHub
parent 8077850452
commit 3ea5389a99
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -278,6 +278,10 @@ def base(request: Request) -> OpenAIServing:
return tokenization(request) return tokenization(request)
def models(request: Request) -> OpenAIServingModels:
return request.app.state.openai_serving_models
def chat(request: Request) -> Optional[OpenAIServingChat]: def chat(request: Request) -> Optional[OpenAIServingChat]:
return request.app.state.openai_serving_chat return request.app.state.openai_serving_chat
@ -345,10 +349,10 @@ async def detokenize(request: DetokenizeRequest, raw_request: Request):
@router.get("/v1/models") @router.get("/v1/models")
async def show_available_models(raw_request: Request): async def show_available_models(raw_request: Request):
handler = base(raw_request) handler = models(raw_request)
models = await handler.show_available_models() models_ = await handler.show_available_models()
return JSONResponse(content=models.model_dump()) return JSONResponse(content=models_.model_dump())
@router.get("/version") @router.get("/version")