From 9ac950fa527c365df263710654c197a680253149 Mon Sep 17 00:00:00 2001 From: Lilac09 <74996885+Zhengjin-Wang@users.noreply.github.com> Date: Tue, 26 Sep 2023 13:22:11 +0800 Subject: [PATCH] Add bigdl llm cpu image build (#9047) * modify Dockerfile * add README.md * add README.md * Modify Dockerfile * Add bigdl inference cpu image build * Add bigdl llm cpu image build * Add bigdl llm cpu image build * Add bigdl llm cpu image build --- .github/workflows/manually_build.yml | 29 +++++++++++++++++ .../workflows/manually_build_for_testing.yml | 31 +++++++++++++++++++ docker/llm/inference/cpu/docker/Dockerfile | 10 ++++-- 3 files changed, 67 insertions(+), 3 deletions(-) diff --git a/.github/workflows/manually_build.yml b/.github/workflows/manually_build.yml index 1e70c0a1..3f3741bc 100644 --- a/.github/workflows/manually_build.yml +++ b/.github/workflows/manually_build.yml @@ -10,6 +10,7 @@ on: type: choice options: - all + - bigdl-llm-cpu - bigdl-ppml-gramine-base - bigdl-ppml-trusted-bigdl-llm-gramine-base - bigdl-ppml-trusted-bigdl-llm-gramine-ref @@ -54,6 +55,34 @@ permissions: packages: write jobs: + bigdl-llm-cpu: + if: ${{ github.event.inputs.artifact == 'bigdl-llm-cpu' || github.event.inputs.artifact == 'all' }} + runs-on: [self-hosted, Shire] + steps: + - uses: actions/checkout@v3 + - name: docker login + run: | + docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} + - name: bigdl-llm-cpu + run: | + echo "##############################################################" + echo "####### bigdl-llm-cpu ########" + echo "##############################################################" + export image=intelanalytics/bigdl-llm-cpu + cd docker/llm/inference/cpu/docker + sudo docker build \ + --no-cache=true \ + --build-arg http_proxy=${HTTP_PROXY} \ + --build-arg https_proxy=${HTTPS_PROXY} \ + --build-arg no_proxy=${NO_PROXY} \ + --build-arg BASE_IMAGE_NAME=${base_image} \ + --build-arg BASE_IMAGE_TAG=${TAG} \ + -t ${image}:${TAG} -f ./Dockerfile . + sudo docker push ${image}:${TAG} + sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} + sudo docker push 10.239.45.10/arda/${image}:${TAG} + sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} + bigdl-ppml-gramine-base: if: ${{ github.event.inputs.artifact == 'bigdl-ppml-gramine-base' || github.event.inputs.artifact == 'all' }} runs-on: [self-hosted, Shire] diff --git a/.github/workflows/manually_build_for_testing.yml b/.github/workflows/manually_build_for_testing.yml index c64f8201..1b723025 100644 --- a/.github/workflows/manually_build_for_testing.yml +++ b/.github/workflows/manually_build_for_testing.yml @@ -14,6 +14,7 @@ on: type: choice options: - all + - bigdl-llm-cpu - bigdl-ppml-gramine-base - bigdl-ppml-trusted-bigdl-llm-gramine-base - bigdl-ppml-trusted-bigdl-llm-gramine-ref @@ -51,6 +52,36 @@ permissions: packages: write jobs: + bigdl-llm-cpu: + if: ${{ github.event.inputs.artifact == 'bigdl-llm-cpu' || github.event.inputs.artifact == 'all' }} + runs-on: [self-hosted, Shire] + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.inputs.sha }} + - name: docker login + run: | + docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} + - name: bigdl-llm-cpu + run: | + echo "##############################################################" + echo "####### bigdl-llm-cpu ########" + echo "##############################################################" + export image=intelanalytics/bigdl-llm-cpu + cd docker/llm/inference/cpu/docker + sudo docker build \ + --no-cache=true \ + --build-arg http_proxy=${HTTP_PROXY} \ + --build-arg https_proxy=${HTTPS_PROXY} \ + --build-arg no_proxy=${NO_PROXY} \ + --build-arg BASE_IMAGE_NAME=${base_image} \ + --build-arg BASE_IMAGE_TAG=${TAG} \ + -t ${image}:${TAG} -f ./Dockerfile . + sudo docker push ${image}:${TAG} + sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} + sudo docker push 10.239.45.10/arda/${image}:${TAG} + sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} + bigdl-ppml-gramine-base: if: ${{ github.event.inputs.artifact == 'bigdl-ppml-gramine-base' || github.event.inputs.artifact == 'all' }} runs-on: [self-hosted, Shire] diff --git a/docker/llm/inference/cpu/docker/Dockerfile b/docker/llm/inference/cpu/docker/Dockerfile index 9299f898..80747466 100644 --- a/docker/llm/inference/cpu/docker/Dockerfile +++ b/docker/llm/inference/cpu/docker/Dockerfile @@ -14,11 +14,15 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get update && \ ln -s /usr/bin/python3 /usr/bin/python && \ apt-get install -y python3-pip python3.9-dev python3-wheel python3.9-distutils && \ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && \ - # Install FastChat from source requires PEP 660 support +# Install FastChat from source requires PEP 660 support python3 get-pip.py && \ rm get-pip.py && \ pip install --upgrade requests argparse urllib3 && \ + pip3 install --no-cache-dir --upgrade torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu && \ pip install --pre --upgrade bigdl-llm[all] && \ - pip install --pre --upgrade bigdl-nano + pip install --pre --upgrade bigdl-nano && \ +# Download chat.py script + wget -P /root https://raw.githubusercontent.com/intel-analytics/BigDL/main/python/llm/portable-executable/chat.py && \ + export PYTHONUNBUFFERED=1 -ENTRYPOINT ["/bin/bash"] +ENTRYPOINT ["/bin/bash"] \ No newline at end of file