Add bigdl llm cpu image build (#9047)

* modify Dockerfile

* add README.md

* add README.md

* Modify Dockerfile

* Add bigdl inference cpu image build

* Add bigdl llm cpu image build

* Add bigdl llm cpu image build

* Add bigdl llm cpu image build
This commit is contained in:
Lilac09 2023-09-26 13:22:11 +08:00 committed by GitHub
parent a717352c59
commit 9ac950fa52
3 changed files with 67 additions and 3 deletions

View file

@ -10,6 +10,7 @@ on:
type: choice
options:
- all
- bigdl-llm-cpu
- bigdl-ppml-gramine-base
- bigdl-ppml-trusted-bigdl-llm-gramine-base
- bigdl-ppml-trusted-bigdl-llm-gramine-ref
@ -54,6 +55,34 @@ permissions:
packages: write
jobs:
bigdl-llm-cpu:
if: ${{ github.event.inputs.artifact == 'bigdl-llm-cpu' || github.event.inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire]
steps:
- uses: actions/checkout@v3
- name: docker login
run: |
docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
- name: bigdl-llm-cpu
run: |
echo "##############################################################"
echo "####### bigdl-llm-cpu ########"
echo "##############################################################"
export image=intelanalytics/bigdl-llm-cpu
cd docker/llm/inference/cpu/docker
sudo docker build \
--no-cache=true \
--build-arg http_proxy=${HTTP_PROXY} \
--build-arg https_proxy=${HTTPS_PROXY} \
--build-arg no_proxy=${NO_PROXY} \
--build-arg BASE_IMAGE_NAME=${base_image} \
--build-arg BASE_IMAGE_TAG=${TAG} \
-t ${image}:${TAG} -f ./Dockerfile .
sudo docker push ${image}:${TAG}
sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
sudo docker push 10.239.45.10/arda/${image}:${TAG}
sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
bigdl-ppml-gramine-base:
if: ${{ github.event.inputs.artifact == 'bigdl-ppml-gramine-base' || github.event.inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire]

View file

@ -14,6 +14,7 @@ on:
type: choice
options:
- all
- bigdl-llm-cpu
- bigdl-ppml-gramine-base
- bigdl-ppml-trusted-bigdl-llm-gramine-base
- bigdl-ppml-trusted-bigdl-llm-gramine-ref
@ -51,6 +52,36 @@ permissions:
packages: write
jobs:
bigdl-llm-cpu:
if: ${{ github.event.inputs.artifact == 'bigdl-llm-cpu' || github.event.inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire]
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.inputs.sha }}
- name: docker login
run: |
docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
- name: bigdl-llm-cpu
run: |
echo "##############################################################"
echo "####### bigdl-llm-cpu ########"
echo "##############################################################"
export image=intelanalytics/bigdl-llm-cpu
cd docker/llm/inference/cpu/docker
sudo docker build \
--no-cache=true \
--build-arg http_proxy=${HTTP_PROXY} \
--build-arg https_proxy=${HTTPS_PROXY} \
--build-arg no_proxy=${NO_PROXY} \
--build-arg BASE_IMAGE_NAME=${base_image} \
--build-arg BASE_IMAGE_TAG=${TAG} \
-t ${image}:${TAG} -f ./Dockerfile .
sudo docker push ${image}:${TAG}
sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
sudo docker push 10.239.45.10/arda/${image}:${TAG}
sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
bigdl-ppml-gramine-base:
if: ${{ github.event.inputs.artifact == 'bigdl-ppml-gramine-base' || github.event.inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire]

View file

@ -14,11 +14,15 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get update && \
ln -s /usr/bin/python3 /usr/bin/python && \
apt-get install -y python3-pip python3.9-dev python3-wheel python3.9-distutils && \
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && \
# Install FastChat from source requires PEP 660 support
# Install FastChat from source requires PEP 660 support
python3 get-pip.py && \
rm get-pip.py && \
pip install --upgrade requests argparse urllib3 && \
pip3 install --no-cache-dir --upgrade torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu && \
pip install --pre --upgrade bigdl-llm[all] && \
pip install --pre --upgrade bigdl-nano
pip install --pre --upgrade bigdl-nano && \
# Download chat.py script
wget -P /root https://raw.githubusercontent.com/intel-analytics/BigDL/main/python/llm/portable-executable/chat.py && \
export PYTHONUNBUFFERED=1
ENTRYPOINT ["/bin/bash"]
ENTRYPOINT ["/bin/bash"]