ipex-llm/.github/workflows/manually_build.yml

400 lines
16 KiB
YAML

name: Manually Build
on:
# workflow_dispatch:
# inputs:
# checkout-ref:
# description: 'commit id (SHA-1 hash)'
# required: true
# type: string
# artifact:
# description: 'select which job to run("all" will make all jobs run)'
# required: true
# default: 'all'
# type: choice
# options:
# - all
# - ipex-llm-cpu
# - ipex-llm-xpu
# - ipex-llm-inference-cpp-xpu
# - ipex-llm-serving-cpu
# - ipex-llm-serving-xpu
# - ipex-llm-finetune-lora-cpu
# - ipex-llm-finetune-qlora-cpu
# - ipex-llm-finetune-qlora-cpu-k8s
# - ipex-llm-finetune-xpu
# tag:
# description: 'docker image tag (e.g. 2.1.0-SNAPSHOT)'
# required: true
# default: '2.1.0-SNAPSHOT'
# type: string
workflow_call:
inputs:
checkout-ref:
description: 'commit id (SHA-1 hash)'
required: true
type: string
artifact:
description: 'select which job to run("all" will make all jobs run)'
required: true
default: 'all'
type: string
tag:
description: 'docker image tag (e.g. 2.1.0-SNAPSHOT)'
required: true
default: '2.1.0-SNAPSHOT'
type: string
public:
description: "if the docker image push to public docker hub"
required: true
type: boolean
default: true
env:
TAG: ${{ inputs.tag }}
permissions:
contents: read
jobs:
ipex-llm-finetune-lora-cpu:
if: ${{ inputs.artifact == 'ipex-llm-finetune-lora-cpu' || inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire]
steps:
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
with:
repository: 'intel-analytics/ipex-llm'
ref: ${{ github.event.inputs.checkout-ref }}
- name: docker login
run: |
docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
- name: ipex-llm-finetune-lora-cpu
run: |
echo "##############################################################"
echo "####### ipex-llm-finetune-lora-cpu ########"
echo "##############################################################"
export image=intelanalytics/ipex-llm-finetune-lora-cpu
cd docker/llm/finetune/lora/cpu/docker
sudo docker build \
--no-cache=true \
--build-arg http_proxy=${HTTP_PROXY} \
--build-arg https_proxy=${HTTPS_PROXY} \
--build-arg no_proxy=${NO_PROXY} \
-t ${image}:${TAG} -f ./Dockerfile .
# push docker image to public hub
if [[ "${{ github.event.inputs.public }}" == "true" ]]; then
sudo docker push ${image}:${TAG}
# tag 'latest'
sudo docker tag ${image}:${TAG} ${image}:latest
sudo docker push ${image}:latest
sudo docker rmi -f ${image}:${TAG} ${image}:latest
else
sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
sudo docker push 10.239.45.10/arda/${image}:${TAG}
sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
fi
ipex-llm-finetune-qlora-cpu:
if: ${{ inputs.artifact == 'ipex-llm-finetune-qlora-cpu' || inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire]
steps:
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
with:
repository: 'intel-analytics/ipex-llm'
ref: ${{ github.event.inputs.checkout-ref }}
- name: docker login
run: |
docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
- name: ipex-llm-finetune-qlora-cpu
run: |
echo "##############################################################"
echo "####### ipex-llm-finetune-qlora-cpu ########"
echo "##############################################################"
export image=intelanalytics/ipex-llm-finetune-qlora-cpu
cd docker/llm/finetune/qlora/cpu/docker
sudo docker build \
--no-cache=true \
--build-arg http_proxy=${HTTP_PROXY} \
--build-arg https_proxy=${HTTPS_PROXY} \
--build-arg no_proxy=${NO_PROXY} \
-t ${image}:${TAG} -f ./Dockerfile .
# push docker image to public hub
if [[ "${{ github.event.inputs.public }}" == "true" ]]; then
sudo docker push ${image}:${TAG}
# tag 'latest'
sudo docker tag ${image}:${TAG} ${image}:latest
sudo docker push ${image}:latest
sudo docker rmi -f ${image}:${TAG} ${image}:latest
else
sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
sudo docker push 10.239.45.10/arda/${image}:${TAG}
sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
fi
ipex-llm-finetune-qlora-cpu-k8s:
if: ${{ inputs.artifact == 'ipex-llm-finetune-qlora-cpu-k8s' || inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire]
steps:
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
with:
repository: 'intel-analytics/ipex-llm'
ref: ${{ github.event.inputs.checkout-ref }}
- name: docker login
run: |
docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
- name: ipex-llm-finetune-qlora-cpu-k8s
run: |
echo "##############################################################"
echo "####### ipex-llm-finetune-qlora-cpu-k8s ########"
echo "##############################################################"
export image=intelanalytics/ipex-llm-finetune-qlora-cpu-k8s
cd docker/llm/finetune/qlora/cpu/docker
sudo docker build \
--no-cache=true \
--build-arg http_proxy=${HTTP_PROXY} \
--build-arg https_proxy=${HTTPS_PROXY} \
--build-arg no_proxy=${NO_PROXY} \
-t ${image}:${TAG} -f ./Dockerfile.k8s .
# push docker image to public hub
if [[ "${{ github.event.inputs.public }}" == "true" ]]; then
sudo docker push ${image}:${TAG}
# tag 'latest'
sudo docker tag ${image}:${TAG} ${image}:latest
sudo docker push ${image}:latest
sudo docker rmi -f ${image}:${TAG} ${image}:latest
else
sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
sudo docker push 10.239.45.10/arda/${image}:${TAG}
sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
fi
ipex-llm-finetune-xpu:
if: ${{ inputs.artifact == 'ipex-llm-finetune-xpu' || inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire]
steps:
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
with:
repository: 'intel-analytics/ipex-llm'
ref: ${{ github.event.inputs.checkout-ref }}
- name: docker login
run: |
docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
- name: ipex-llm-finetune-xpu
run: |
echo "##############################################################"
echo "####### ipex-llm-finetune-xpu ########"
echo "##############################################################"
export image=intelanalytics/ipex-llm-finetune-xpu
cd docker/llm/finetune/xpu
sudo docker build \
--no-cache=true \
--build-arg http_proxy=${HTTP_PROXY} \
--build-arg https_proxy=${HTTPS_PROXY} \
--build-arg no_proxy=${NO_PROXY} \
-t ${image}:${TAG} -f ./Dockerfile .
# push docker image to public hub
if [[ "${{ github.event.inputs.public }}" == "true" ]]; then
sudo docker push ${image}:${TAG}
# tag 'latest'
sudo docker tag ${image}:${TAG} ${image}:latest
sudo docker push ${image}:latest
sudo docker rmi -f ${image}:${TAG} ${image}:latest
else
sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
sudo docker push 10.239.45.10/arda/${image}:${TAG}
sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
fi
ipex-llm-xpu:
if: ${{ inputs.artifact == 'ipex-llm-xpu' || inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire]
steps:
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
with:
repository: 'intel-analytics/ipex-llm'
ref: ${{ github.event.inputs.checkout-ref }}
- name: docker login
run: |
docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
- name: ipex-llm-xpu
run: |
echo "##############################################################"
echo "####### ipex-llm-xpu ########"
echo "##############################################################"
export image=intelanalytics/ipex-llm-xpu
cd docker/llm/inference/xpu/docker
sudo docker build \
--no-cache=true \
--build-arg http_proxy=${HTTP_PROXY} \
--build-arg https_proxy=${HTTPS_PROXY} \
--build-arg no_proxy=${NO_PROXY} \
-t ${image}:${TAG} -f ./Dockerfile .
# push docker image to public hub
if [[ "${{ github.event.inputs.public }}" == "true" ]]; then
sudo docker push ${image}:${TAG}
# tag 'latest'
sudo docker tag ${image}:${TAG} ${image}:latest
sudo docker push ${image}:latest
sudo docker rmi -f ${image}:${TAG} ${image}:latest
else
sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
sudo docker push 10.239.45.10/arda/${image}:${TAG}
sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
fi
ipex-llm-inference-cpp-xpu:
if: ${{ inputs.artifact == 'ipex-llm-inference-cpp-xpu' || inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire]
steps:
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
with:
repository: 'intel-analytics/ipex-llm'
ref: ${{ github.event.inputs.checkout-ref }}
- name: docker login
run: |
docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
- name: ipex-llm-inference-cpp-xpu
run: |
echo "##############################################################"
echo "####### ipex-llm-inference-cpp-xpu ########"
echo "##############################################################"
export image=intelanalytics/ipex-llm-inference-cpp-xpu
cd docker/llm/inference-cpp/
sudo docker build \
--no-cache=true \
--build-arg http_proxy=${HTTP_PROXY} \
--build-arg https_proxy=${HTTPS_PROXY} \
--build-arg no_proxy=${NO_PROXY} \
-t ${image}:${TAG} -f ./Dockerfile .
# push docker image to public hub
if [[ "${{ github.event.inputs.public }}" == "true" ]]; then
sudo docker push ${image}:${TAG}
# tag 'latest'
sudo docker tag ${image}:${TAG} ${image}:latest
sudo docker push ${image}:latest
sudo docker rmi -f ${image}:${TAG} ${image}:latest
else
sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
sudo docker push 10.239.45.10/arda/${image}:${TAG}
sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
fi
ipex-llm-cpu:
if: ${{ inputs.artifact == 'ipex-llm-cpu' || inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire]
steps:
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
with:
repository: 'intel-analytics/ipex-llm'
ref: ${{ github.event.inputs.checkout-ref }}
- name: docker login
run: |
docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
- name: ipex-llm-cpu
run: |
echo "##############################################################"
echo "####### ipex-llm-cpu ########"
echo "##############################################################"
export image=intelanalytics/ipex-llm-cpu
cd docker/llm/inference/cpu/docker
sudo docker build \
--no-cache=true \
--build-arg http_proxy=${HTTP_PROXY} \
--build-arg https_proxy=${HTTPS_PROXY} \
--build-arg no_proxy=${NO_PROXY} \
-t ${image}:${TAG} -f ./Dockerfile .
# push docker image to public hub
if [[ "${{ github.event.inputs.public }}" == "true" ]]; then
sudo docker push ${image}:${TAG}
# tag 'latest'
sudo docker tag ${image}:${TAG} ${image}:latest
sudo docker push ${image}:latest
sudo docker rmi -f ${image}:${TAG} ${image}:latest
else
sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
sudo docker push 10.239.45.10/arda/${image}:${TAG}
sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
fi
ipex-llm-serving-xpu:
if: ${{ inputs.artifact == 'ipex-llm-serving-xpu' || inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire]
steps:
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
with:
repository: 'intel-analytics/ipex-llm'
ref: ${{ github.event.inputs.checkout-ref }}
- name: docker login
run: |
docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
- name: ipex-llm-serving-xpu
run: |
echo "##############################################################"
echo "####### ipex-llm-serving-xpu ########"
echo "##############################################################"
export image=intelanalytics/ipex-llm-serving-xpu
cd docker/llm/serving/xpu/docker
sudo docker build \
--no-cache=true \
--build-arg http_proxy=${HTTP_PROXY} \
--build-arg https_proxy=${HTTPS_PROXY} \
--build-arg no_proxy=${NO_PROXY} \
-t ${image}:${TAG} -f ./Dockerfile .
# push docker image to public hub
if [[ "${{ github.event.inputs.public }}" == "true" ]]; then
sudo docker push ${image}:${TAG}
# tag 'latest'
sudo docker tag ${image}:${TAG} ${image}:latest
sudo docker push ${image}:latest
sudo docker rmi -f ${image}:${TAG} ${image}:latest
else
sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
sudo docker push 10.239.45.10/arda/${image}:${TAG}
sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
fi
ipex-llm-serving-cpu:
if: ${{ inputs.artifact == 'ipex-llm-serving-cpu' || inputs.artifact == 'all' }}
runs-on: [self-hosted, Shire, AVX512]
steps:
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
with:
repository: 'intel-analytics/ipex-llm'
ref: ${{ github.event.inputs.checkout-ref }}
- name: docker login
run: |
docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
- name: ipex-llm-serving-cpu
run: |
echo "##############################################################"
echo "####### ipex-llm-serving-cpu ########"
echo "##############################################################"
export image=intelanalytics/ipex-llm-serving-cpu
cd docker/llm/serving/cpu/docker
sudo docker build \
--no-cache=true \
--build-arg http_proxy=${HTTP_PROXY} \
--build-arg https_proxy=${HTTPS_PROXY} \
--build-arg no_proxy=${NO_PROXY} \
-t ${image}:${TAG} -f ./Dockerfile .
# push docker image to public hub
if [[ "${{ github.event.inputs.public }}" == "true" ]]; then
sudo docker push ${image}:${TAG}
# tag 'latest'
sudo docker tag ${image}:${TAG} ${image}:latest
sudo docker push ${image}:latest
sudo docker rmi -f ${image}:${TAG} ${image}:latest
else
sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
sudo docker push 10.239.45.10/arda/${image}:${TAG}
sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
fi