diff --git a/docker/llm/README_backup.md b/docker/llm/README_backup.md index 2b8e73af..e65d634e 100644 --- a/docker/llm/README_backup.md +++ b/docker/llm/README_backup.md @@ -42,7 +42,6 @@ export CONTAINER_NAME=my_container export MODEL_PATH=/llm/models[change to your model path] docker run -itd \ - --privileged \ -p 12345:12345 \ --cpuset-cpus="0-47" \ --cpuset-mems="0" \ diff --git a/docker/llm/inference-cpp/README.md b/docker/llm/inference-cpp/README.md index d5847a7a..13afd4b4 100644 --- a/docker/llm/inference-cpp/README.md +++ b/docker/llm/inference-cpp/README.md @@ -165,7 +165,6 @@ docker rm -f $CONTAINER_NAME docker run -itd \ -v open-webui:/app/backend/data \ -e PORT=8080 \ - --privileged \ --network=host \ --name $CONTAINER_NAME \ --restart always $DOCKER_IMAGE diff --git a/docs/mddocs/DockerGuides/docker_cpp_xpu_quickstart.md b/docs/mddocs/DockerGuides/docker_cpp_xpu_quickstart.md index d57ab55b..f02170b2 100644 --- a/docs/mddocs/DockerGuides/docker_cpp_xpu_quickstart.md +++ b/docs/mddocs/DockerGuides/docker_cpp_xpu_quickstart.md @@ -207,7 +207,6 @@ docker rm -f $CONTAINER_NAME docker run -itd \ -v open-webui:/app/backend/data \ -e PORT=8080 \ - --privileged \ --network=host \ --name $CONTAINER_NAME \ --restart always $DOCKER_IMAGE diff --git a/docs/mddocs/DockerGuides/vllm_docker_quickstart.md b/docs/mddocs/DockerGuides/vllm_docker_quickstart.md index 7ebc890a..dd05c68f 100644 --- a/docs/mddocs/DockerGuides/vllm_docker_quickstart.md +++ b/docs/mddocs/DockerGuides/vllm_docker_quickstart.md @@ -24,8 +24,8 @@ docker pull intelanalytics/ipex-llm-serving-xpu:latest export DOCKER_IMAGE=intelanalytics/ipex-llm-serving-xpu:latest export CONTAINER_NAME=ipex-llm-serving-xpu-container sudo docker run -itd \ - --privileged \ --net=host \ + --group-add video \ --device=/dev/dri \ -v /path/to/models:/llm/models \ -e no_proxy=localhost,127.0.0.1 \ diff --git a/python/llm/dev/benchmark/ceval/README.md b/python/llm/dev/benchmark/ceval/README.md index 11863da6..79dbe58e 100644 --- a/python/llm/dev/benchmark/ceval/README.md +++ b/python/llm/dev/benchmark/ceval/README.md @@ -55,8 +55,8 @@ python eval.py \ 2. **Start Docker Container**: ```bash docker run -td \ - --privileged \ --net=host \ + --group-add video \ --device=/dev/dri \ --name=$CONTAINER_NAME \ -v /home/intel/LLM:/llm/models/ \