Fix vllm api_server v1/models error (#12867)
This commit is contained in:
parent
8077850452
commit
3ea5389a99
1 changed files with 7 additions and 3 deletions
|
|
@ -278,6 +278,10 @@ def base(request: Request) -> OpenAIServing:
|
||||||
return tokenization(request)
|
return tokenization(request)
|
||||||
|
|
||||||
|
|
||||||
|
def models(request: Request) -> OpenAIServingModels:
|
||||||
|
return request.app.state.openai_serving_models
|
||||||
|
|
||||||
|
|
||||||
def chat(request: Request) -> Optional[OpenAIServingChat]:
|
def chat(request: Request) -> Optional[OpenAIServingChat]:
|
||||||
return request.app.state.openai_serving_chat
|
return request.app.state.openai_serving_chat
|
||||||
|
|
||||||
|
|
@ -345,10 +349,10 @@ async def detokenize(request: DetokenizeRequest, raw_request: Request):
|
||||||
|
|
||||||
@router.get("/v1/models")
|
@router.get("/v1/models")
|
||||||
async def show_available_models(raw_request: Request):
|
async def show_available_models(raw_request: Request):
|
||||||
handler = base(raw_request)
|
handler = models(raw_request)
|
||||||
|
|
||||||
models = await handler.show_available_models()
|
models_ = await handler.show_available_models()
|
||||||
return JSONResponse(content=models.model_dump())
|
return JSONResponse(content=models_.model_dump())
|
||||||
|
|
||||||
|
|
||||||
@router.get("/version")
|
@router.get("/version")
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue