diff --git a/docker/llm/serving/xpu/docker/README.md b/docker/llm/serving/xpu/docker/README.md index 23d6aff6..d4318025 100644 --- a/docker/llm/serving/xpu/docker/README.md +++ b/docker/llm/serving/xpu/docker/README.md @@ -26,7 +26,7 @@ To map the `XPU` into the container, you need to specify `--device=/dev/dri` whe ```bash #/bin/bash -export DOCKER_IMAGE=intelanalytics/ipex-llm-xpu:2.2.0-SNAPSHOT +export DOCKER_IMAGE=intelanalytics/ipex-llm-serving-xpu:2.2.0-SNAPSHOT sudo docker run -itd \ --net=host \ @@ -34,6 +34,7 @@ sudo docker run -itd \ --memory="32G" \ --name=CONTAINER_NAME \ --shm-size="16g" \ + --entrypoint /bin/bash \ $DOCKER_IMAGE ``` @@ -71,7 +72,7 @@ By default, the container is configured to automatically start the service when ```bash #/bin/bash -export DOCKER_IMAGE=intelanalytics/ipex-llm-xpu:2.2.0-SNAPSHOT +export DOCKER_IMAGE=intelanalytics/ipex-llm-serving-xpu:2.2.0-SNAPSHOT sudo docker run -itd \ --net=host \ @@ -110,7 +111,7 @@ If you prefer to manually start the service or need to troubleshoot, you can ove ```bash #/bin/bash -export DOCKER_IMAGE=intelanalytics/ipex-llm-xpu:2.2.0-SNAPSHOT +export DOCKER_IMAGE=intelanalytics/ipex-llm-serving-xpu:2.2.0-SNAPSHOT sudo docker run -itd \ --net=host \ diff --git a/docs/mddocs/DockerGuides/docker_pytorch_inference_gpu.md b/docs/mddocs/DockerGuides/docker_pytorch_inference_gpu.md index 1a692fce..a729f08f 100644 --- a/docs/mddocs/DockerGuides/docker_pytorch_inference_gpu.md +++ b/docs/mddocs/DockerGuides/docker_pytorch_inference_gpu.md @@ -32,6 +32,7 @@ Start ipex-llm-xpu Docker Container. Choose one of the following commands to sta --name=$CONTAINER_NAME \ --shm-size="16g" \ -v $MODEL_PATH:/llm/models \ + --entrypoint /bin/bash \ $DOCKER_IMAGE ``` @@ -52,6 +53,7 @@ Start ipex-llm-xpu Docker Container. Choose one of the following commands to sta --shm-size="16g" \ -v $MODEL_PATH:/llm/llm-models \ -v /usr/lib/wsl:/usr/lib/wsl \ + --entrypoint /bin/bash \ $DOCKER_IMAGE ``` diff --git a/docs/mddocs/DockerGuides/docker_run_pytorch_inference_in_vscode.md b/docs/mddocs/DockerGuides/docker_run_pytorch_inference_in_vscode.md index b6114ebd..46f3414f 100644 --- a/docs/mddocs/DockerGuides/docker_run_pytorch_inference_in_vscode.md +++ b/docs/mddocs/DockerGuides/docker_run_pytorch_inference_in_vscode.md @@ -66,6 +66,7 @@ Start ipex-llm-serving-xpu Docker Container. Choose one of the following command --name=$CONTAINER_NAME \ --shm-size="16g" \ -v $MODEL_PATH:/llm/models \ + --entrypoint /bin/bash \ $DOCKER_IMAGE ``` @@ -86,6 +87,7 @@ Start ipex-llm-serving-xpu Docker Container. Choose one of the following command --shm-size="16g" \ -v $MODEL_PATH:/llm/llm-models \ -v /usr/lib/wsl:/usr/lib/wsl \ + --entrypoint /bin/bash \ $DOCKER_IMAGE ``` diff --git a/docs/mddocs/DockerGuides/fastchat_docker_quickstart.md b/docs/mddocs/DockerGuides/fastchat_docker_quickstart.md index f2a684e9..5980e88b 100644 --- a/docs/mddocs/DockerGuides/fastchat_docker_quickstart.md +++ b/docs/mddocs/DockerGuides/fastchat_docker_quickstart.md @@ -29,6 +29,7 @@ sudo docker run -itd \ --memory="32G" \ --name=$CONTAINER_NAME \ --shm-size="16g" \ + --entrypoint /bin/bash \ $DOCKER_IMAGE ``` diff --git a/docs/mddocs/DockerGuides/vllm_docker_quickstart.md b/docs/mddocs/DockerGuides/vllm_docker_quickstart.md index 06025d93..d3c113fb 100644 --- a/docs/mddocs/DockerGuides/vllm_docker_quickstart.md +++ b/docs/mddocs/DockerGuides/vllm_docker_quickstart.md @@ -32,6 +32,7 @@ sudo docker run -itd \ --memory="32G" \ --name=$CONTAINER_NAME \ --shm-size="16g" \ + --entrypoint /bin/bash \ $DOCKER_IMAGE ``` @@ -855,6 +856,7 @@ We can set up model serving using `IPEX-LLM` as backend using FastChat, the foll -e http_proxy=... \ -e https_proxy=... \ -e no_proxy="127.0.0.1,localhost" \ + --entrypoint /bin/bash \ $DOCKER_IMAGE ``` diff --git a/python/llm/dev/benchmark/ceval/README.md b/python/llm/dev/benchmark/ceval/README.md index 79dbe58e..7f0a6327 100644 --- a/python/llm/dev/benchmark/ceval/README.md +++ b/python/llm/dev/benchmark/ceval/README.md @@ -64,6 +64,7 @@ python eval.py \ -e http_proxy=$HTTP_PROXY \ -e https_proxy=$HTTPS_PROXY \ --shm-size="16g" \ + --entrypoint /bin/bash \ $DOCKER_IMAGE ```