Add bigdl llm xpu image build (#9062)

* modify Dockerfile

* add README.md

* add README.md

* Modify Dockerfile

* Add bigdl inference cpu image build

* Add bigdl llm cpu image build

* Add bigdl llm cpu image build

* Add bigdl llm cpu image build

* Modify Dockerfile

* Add bigdl inference cpu image build

* Add bigdl inference cpu image build

* Add bigdl llm xpu image build
This commit is contained in:
Lilac09 2023-09-26 14:29:03 +08:00 committed by GitHub
parent 9ac950fa52
commit ecee02b34d
3 changed files with 62 additions and 2 deletions

View file

@ -10,6 +10,7 @@ on:
type: choice
options:
- all
- bigdl-llm-xpu
- bigdl-llm-cpu
- bigdl-ppml-gramine-base
- bigdl-ppml-trusted-bigdl-llm-gramine-base
@ -55,6 +56,34 @@ permissions:
packages: write
jobs:
bigdl-llm-xpu:
if: ${{ github.event.inputs.artifact == 'bigdl-llm-xpu' || github.event.inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire]
steps:
- uses: actions/checkout@v3
- name: docker login
run: |
docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
- name: bigdl-llm-xpu
run: |
echo "##############################################################"
echo "####### bigdl-llm-xpu ########"
echo "##############################################################"
export image=intelanalytics/bigdl-llm-xpu
cd docker/llm/inference/xpu/docker
sudo docker build \
--no-cache=true \
--build-arg http_proxy=${HTTP_PROXY} \
--build-arg https_proxy=${HTTPS_PROXY} \
--build-arg no_proxy=${NO_PROXY} \
--build-arg BASE_IMAGE_NAME=${base_image} \
--build-arg BASE_IMAGE_TAG=${TAG} \
-t ${image}:${TAG} -f ./Dockerfile .
sudo docker push ${image}:${TAG}
sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
sudo docker push 10.239.45.10/arda/${image}:${TAG}
sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
bigdl-llm-cpu:
if: ${{ github.event.inputs.artifact == 'bigdl-llm-cpu' || github.event.inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire]

View file

@ -14,6 +14,7 @@ on:
type: choice
options:
- all
- bigdl-llm-xpu
- bigdl-llm-cpu
- bigdl-ppml-gramine-base
- bigdl-ppml-trusted-bigdl-llm-gramine-base
@ -52,6 +53,36 @@ permissions:
packages: write
jobs:
bigdl-llm-xpu:
if: ${{ github.event.inputs.artifact == 'bigdl-llm-xpu' || github.event.inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire]
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.inputs.sha }}
- name: docker login
run: |
docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
- name: bigdl-llm-xpu
run: |
echo "##############################################################"
echo "####### bigdl-llm-xpu ########"
echo "##############################################################"
export image=intelanalytics/bigdl-llm-xpu
cd docker/llm/inference/xpu/docker
sudo docker build \
--no-cache=true \
--build-arg http_proxy=${HTTP_PROXY} \
--build-arg https_proxy=${HTTPS_PROXY} \
--build-arg no_proxy=${NO_PROXY} \
--build-arg BASE_IMAGE_NAME=${base_image} \
--build-arg BASE_IMAGE_TAG=${TAG} \
-t ${image}:${TAG} -f ./Dockerfile .
sudo docker push ${image}:${TAG}
sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
sudo docker push 10.239.45.10/arda/${image}:${TAG}
sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
bigdl-llm-cpu:
if: ${{ github.event.inputs.artifact == 'bigdl-llm-cpu' || github.event.inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire]

View file

@ -1,7 +1,7 @@
FROM intel/oneapi-basekit:2023.2.1-devel-ubuntu22.04
ENV http_proxy $HTTP_PROXY
ENV https_proxy $HTTP_PROXY
ARG http_proxy
ARG https_proxy
ENV TZ=Asia/Shanghai