[LLM] Revert compile OS for llm build workflow (#8732)

* use almalinux to build
This commit is contained in:
xingyuan li 2023-08-11 17:47:45 +09:00 committed by GitHub
parent f91035c298
commit 1cb8f5abbd
2 changed files with 44 additions and 101 deletions

View file

@ -10,6 +10,6 @@ runs:
pip install requests pip install requests
bash python/llm/dev/release_default_linux.sh default false bash python/llm/dev/release_default_linux.sh default false
whl_name=$(ls python/llm/dist) whl_name=$(ls python/llm/dist)
pip install -i https://pypi.python.org/simple "python/llm/dist/${whl_name}[all]" pip install -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[all]"
pip install pytest pip install pytest
bash python/llm/test/run-llm-install-tests.sh bash python/llm/test/run-llm-install-tests.sh

View file

@ -17,8 +17,19 @@ on:
paths: paths:
- ".github/workflows/llm-binary-build.yml" - ".github/workflows/llm-binary-build.yml"
workflow_dispatch: workflow_dispatch:
inputs:
llmcpp-ref:
description: 'Ref of llm.cpp code'
default: ''
required: false
type: string
workflow_call: workflow_call:
inputs:
llmcpp-ref:
description: 'Ref of llm.cpp code'
default: ''
required: false
type: string
# A workflow run is made up of one or more jobs that can run sequentially or in parallel # A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs: jobs:
@ -34,62 +45,32 @@ jobs:
name: linux-avxvnni name: linux-avxvnni
linux-build-avxvnni: linux-build-avxvnni:
runs-on: [self-hosted, AVX2, ubuntu-18.04-lts] runs-on: [self-hosted, AVX2, almalinux8]
needs: check-linux-avxvnni-artifact needs: check-linux-avxvnni-artifact
if: needs.check-linux-avxvnni-artifact.outputs.if-exists == 'false' if: needs.check-linux-avxvnni-artifact.outputs.if-exists == 'false'
steps: steps:
- name: Set access token - name: Set access token
run: | run: |
echo "github_access_token=${GITHUB_ACCESS_TOKEN}" >> "$GITHUB_ENV" echo "github_access_token=${GITHUB_ACCESS_TOKEN}" >> "$GITHUB_ENV"
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.9"
- name: Install Build Environment - name: Install Build Environment
shell: bash shell: bash
run: | run: |
export http_proxy=${HTTP_PROXY} export http_proxy=${HTTP_PROXY}
export https_proxy=${HTTPS_PROXY} export https_proxy=${HTTPS_PROXY}
add-apt-repository -y ppa:git-core/ppa yum install -y gcc-toolset-11 cmake git
add-apt-repository -y ppa:ubuntu-toolchain-r/test conda remove -n python39 --all -y
apt update conda create -n python39 python=3.9 -y
apt install -y git
apt install -y build-essential
apt install -y gcc-11 g++-11
# install binutils
if [[ "$(ld -v | awk '{print $NF}')" != "2.36.1" ]]; then
wget http://ftp.gnu.org/gnu/binutils/binutils-2.36.1.tar.gz
tar xzf binutils-2.36.1.tar.gz
cd binutils-2.36.1
./configure
make
make install
cd ..
fi
cd ..
if [ -f "cmake-3.27.1-linux-x86_64.tar.gz" ]; then
actual_sha256=$(sha256sum "cmake-3.27.1-linux-x86_64.tar.gz" | awk '{print $1}')
if [ "$actual_sha256" != "9fef63e1cf87cab1153f9433045df2e43c336e462518b0f5e52d2cc91d762cff" ]; then
wget https://github.com/Kitware/CMake/releases/download/v3.27.1/cmake-3.27.1-linux-x86_64.tar.gz
fi
else
wget https://github.com/Kitware/CMake/releases/download/v3.27.1/cmake-3.27.1-linux-x86_64.tar.gz
fi
tar zxf cmake-3.27.1-linux-x86_64.tar.gz
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
repository: "intel-analytics/llm.cpp" repository: "intel-analytics/llm.cpp"
ref: ${{ inputs.llmcpp-ref }}
token: ${{ env.github_access_token }} token: ${{ env.github_access_token }}
submodules: "recursive" submodules: "recursive"
- name: Build binary - name: Build binary
shell: bash shell: bash
run: | run: |
cmake_path="$(pwd)/../cmake-3.27.1-linux-x86_64/bin/cmake" scl enable gcc-toolset-11 "cmake -B build"
$cmake_path -B build scl enable gcc-toolset-11 "cmake --build build --config Release"
$cmake_path --build build --config Release
env:
CC: gcc-11
CXX: g++-11
- name: Move release binary - name: Move release binary
shell: bash shell: bash
run: | run: |
@ -113,13 +94,10 @@ jobs:
- name: Build Chatglm - name: Build Chatglm
shell: bash shell: bash
run: | run: |
cmake_path="$(pwd)/../cmake-3.27.1-linux-x86_64/bin/cmake" source activate python39 || conda activate python39
cd src/chatglm cd src/chatglm
$cmake_path -B build scl enable gcc-toolset-11 "cmake -B build"
$cmake_path --build build --config Release scl enable gcc-toolset-11 "cmake --build build --config Release"
env:
CC: gcc-11
CXX: g++-11
- name: Move Chatglm binaries - name: Move Chatglm binaries
shell: bash shell: bash
run: | run: |
@ -135,6 +113,7 @@ jobs:
shell: bash shell: bash
run: | run: |
make clean make clean
conda remove -n python39 --all -y
check-linux-avx512-artifact: check-linux-avx512-artifact:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -148,54 +127,32 @@ jobs:
name: linux-avx512 name: linux-avx512
linux-build-avx512: linux-build-avx512:
runs-on: [self-hosted, AVX512, ubuntu-18.04-lts] runs-on: [self-hosted, AVX512, almalinux8]
needs: check-linux-avx512-artifact needs: check-linux-avx512-artifact
if: needs.check-linux-avx512-artifact.outputs.if-exists == 'false' if: needs.check-linux-avx512-artifact.outputs.if-exists == 'false'
steps: steps:
- name: Set access token - name: Set access token
run: | run: |
echo "github_access_token=${GITHUB_ACCESS_TOKEN}" >> "$GITHUB_ENV" echo "github_access_token=${GITHUB_ACCESS_TOKEN}" >> "$GITHUB_ENV"
- name: Update Git
shell: bash
run: |
export http_proxy=${HTTP_PROXY}
export https_proxy=${HTTPS_PROXY}
add-apt-repository -y ppa:git-core/ppa
apt update
apt install -y git
- uses: actions/checkout@v3
with:
repository: "intel-analytics/llm.cpp"
token: ${{ env.github_access_token }}
submodules: "recursive"
- name: Install Build Environment - name: Install Build Environment
shell: bash shell: bash
run: | run: |
export http_proxy=${HTTP_PROXY} export http_proxy=${HTTP_PROXY}
export https_proxy=${HTTPS_PROXY} export https_proxy=${HTTPS_PROXY}
add-apt-repository -y ppa:ubuntu-toolchain-r/test yum install -y gcc-toolset-11 cmake git
apt update conda remove -n python39 --all -y
apt install -y build-essential conda create -n python39 python=3.9 -y
apt install -y gcc-11 g++-11 - uses: actions/checkout@v3
cd .. with:
if [ -f "cmake-3.27.1-linux-x86_64.tar.gz" ]; then repository: "intel-analytics/llm.cpp"
actual_sha256=$(sha256sum "cmake-3.27.1-linux-x86_64.tar.gz" | awk '{print $1}') ref: ${{ inputs.llmcpp-ref }}
if [ "$actual_sha256" != "9fef63e1cf87cab1153f9433045df2e43c336e462518b0f5e52d2cc91d762cff" ]; then token: ${{ env.github_access_token }}
wget https://github.com/Kitware/CMake/releases/download/v3.27.1/cmake-3.27.1-linux-x86_64.tar.gz submodules: "recursive"
fi
else
wget https://github.com/Kitware/CMake/releases/download/v3.27.1/cmake-3.27.1-linux-x86_64.tar.gz
fi
tar zxf cmake-3.27.1-linux-x86_64.tar.gz
- name: Build avx512 binary - name: Build avx512 binary
shell: bash shell: bash
run: | run: |
cmake_path="$(pwd)/../cmake-3.27.1-linux-x86_64/bin/cmake" scl enable gcc-toolset-11 "cmake -DONLYAVX=OFF -DONLYAVX2=OFF -B build"
$cmake_path -DONLYAVX=OFF -DONLYAVX2=OFF -B build scl enable gcc-toolset-11 "cmake --build build --config Release"
$cmake_path --build build --config Release
env:
CC: gcc-11
CXX: g++-11
- name: Move avx512 release binary - name: Move avx512 release binary
shell: bash shell: bash
run: | run: |
@ -211,12 +168,8 @@ jobs:
- name: Build avx2 binary - name: Build avx2 binary
shell: bash shell: bash
run: | run: |
cmake_path="$(pwd)/../cmake-3.27.1-linux-x86_64/bin/cmake" scl enable gcc-toolset-11 "cmake -DONLYAVX=OFF -DONLYAVX2=ON -B build"
$cmake_path -DONLYAVX=OFF -DONLYAVX2=ON -B build scl enable gcc-toolset-11 "cmake --build build --config Release"
$cmake_path --build build --config Release
env:
CC: gcc-11
CXX: g++-11
- name: Move avx2 release binary - name: Move avx2 release binary
shell: bash shell: bash
run: | run: |
@ -228,12 +181,8 @@ jobs:
- name: Build avx binary - name: Build avx binary
shell: bash shell: bash
run: | run: |
cmake_path="$(pwd)/../cmake-3.27.1-linux-x86_64/bin/cmake" scl enable gcc-toolset-11 "cmake -DONLYAVX=ON -DONLYAVX2=OFF -B build"
$cmake_path -DONLYAVX=ON -DONLYAVX2=OFF -B build scl enable gcc-toolset-11 "cmake --build build --config Release"
$cmake_path --build build --config Release
env:
CC: gcc-11
CXX: g++-11
- name: Move avx release binary - name: Move avx release binary
shell: bash shell: bash
run: | run: |
@ -242,16 +191,6 @@ jobs:
mv build/libllama.so avx_release/libllama_avx.so mv build/libllama.so avx_release/libllama_avx.so
mv build/libgptneox.so avx_release/libgptneox_avx.so mv build/libgptneox.so avx_release/libgptneox_avx.so
mv build/libstarcoder.so avx_release/libstarcoder_avx.so mv build/libstarcoder.so avx_release/libstarcoder_avx.so
# - name: Build Chatglm
# shell: bash
# run: |
# cmake_path="$(pwd)/../cmake-3.27.1-linux-x86_64/bin/cmake"
# cd src/chatglm
# $cmake_path -B build
# $cmake_path --build build --config Release
# env:
# CC: gcc-11
# CXX: g++-11
- name: Archive avx512 build files - name: Archive avx512 build files
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
@ -274,6 +213,7 @@ jobs:
shell: bash shell: bash
run: | run: |
make clean make clean
conda remove -n python39 --all -y
check-windows-avx2-artifact: check-windows-avx2-artifact:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -298,6 +238,7 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
repository: "intel-analytics/llm.cpp" repository: "intel-analytics/llm.cpp"
ref: ${{ inputs.llmcpp-ref }}
token: ${{ env.github_access_token }} token: ${{ env.github_access_token }}
submodules: "recursive" submodules: "recursive"
- name: Add msbuild to PATH - name: Add msbuild to PATH
@ -345,6 +286,7 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
repository: "intel-analytics/llm.cpp" repository: "intel-analytics/llm.cpp"
ref: ${{ inputs.llmcpp-ref }}
token: ${{ env.github_access_token }} token: ${{ env.github_access_token }}
submodules: "recursive" submodules: "recursive"
- name: Add msbuild to PATH - name: Add msbuild to PATH
@ -419,6 +361,7 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
repository: "intel-analytics/llm.cpp" repository: "intel-analytics/llm.cpp"
ref: ${{ inputs.llmcpp-ref }}
token: ${{ env.github_access_token }} token: ${{ env.github_access_token }}
submodules: "recursive" submodules: "recursive"
- name: Add msbuild to PATH - name: Add msbuild to PATH