diff --git a/docker/llm/inference/cpu/docker/Dockerfile b/docker/llm/inference/cpu/docker/Dockerfile index 97bb2a0d..1e5143f9 100644 --- a/docker/llm/inference/cpu/docker/Dockerfile +++ b/docker/llm/inference/cpu/docker/Dockerfile @@ -13,6 +13,7 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get update && \ ln -s /usr/bin/python3 /usr/bin/python && \ apt-get install -y python3-pip python3.9-dev python3-wheel python3.9-distutils && \ pip3 install --no-cache --upgrade requests argparse urllib3 && \ - pip3 install --pre --upgrade bigdl-llm[all] + pip3 install --pre --upgrade bigdl-llm[all] && \ + pip3 install --pre --upgrade bigdl-nano ENTRYPOINT ["/bin/bash"] \ No newline at end of file diff --git a/docker/llm/inference/cpu/docker/README.md b/docker/llm/inference/cpu/docker/README.md new file mode 100644 index 00000000..805cfc07 --- /dev/null +++ b/docker/llm/inference/cpu/docker/README.md @@ -0,0 +1,34 @@ +## Build/Use BigDL-LLM cpu image + +### Build Image +```bash +docker build \ + --build-arg http_proxy=.. \ + --build-arg https_proxy=.. \ + --build-arg no_proxy=.. \ + --rm --no-cache -t intelanalytics/bigdl-llm-cpu:2.4.0-SNAPSHOT . +``` + + +### Use the image for doing cpu inference + + +An example could be: +```bash +#/bin/bash +export DOCKER_IMAGE=intelanalytics/bigdl-llm-cpu:2.4.0-SNAPSHOT + +sudo docker run -itd \ + --net=host \ + --cpuset-cpus="0-47" \ + --cpuset-mems="0" \ + --memory="32G" \ + --name=CONTAINER_NAME \ + --shm-size="16g" \ + $DOCKER_IMAGE +``` + + +After the container is booted, you could get into the container through `docker exec`. + +To run inference using `BigDL-LLM` using cpu, you could refer to this [documentation](https://github.com/intel-analytics/BigDL/tree/main/python/llm#cpu-int4).