diff --git a/.github/actions/llm/cli-test-linux/action.yml b/.github/actions/llm/cli-test-linux/action.yml deleted file mode 100644 index 4544e61a..00000000 --- a/.github/actions/llm/cli-test-linux/action.yml +++ /dev/null @@ -1,46 +0,0 @@ -name: "llm-cli Flow Verification (Linux)" -description: "Verify the llm-cli flow on linux" - -runs: - using: "composite" - steps: - - name: Test llama llm-cli - shell: bash - run: | - llm-cli -t $THREAD_NUM -n 256 -x llama -m $LLAMA_INT4_CKPT_PATH -p 'Once upon a time,' - - timeout 30s llm-cli -t $THREAD_NUM -n 256 -x llama -m $LLAMA_INT4_CKPT_PATH -i -p \ - 'A chat between a curious user and a helpful and polite AI assistant. User:Can you tell me a story? AI:' >test.out 2>&1 || true - - if ! grep -q 'A chat between a curious user and a helpful and polite AI assistant.' test.out ; then - exit 1 - fi - rm test.out - - - name: Test gptneox llm-cli - shell: bash - run: | - llm-cli -t $THREAD_NUM -n 256 -x gptneox -m $GPTNEOX_INT4_CKPT_PATH -p 'Once upon a time,' - - timeout 30s llm-cli -t $THREAD_NUM -n 256 -x gptneox -m $GPTNEOX_INT4_CKPT_PATH -i -p \ - 'A chat between a curious user and a helpful and polite AI assistant. User:Can you tell me a story? AI:' >test.out 2>&1 || true - - if ! grep -q 'A chat between a curious user and a helpful and polite AI assistant.' test.out ; then - exit 1 - fi - rm test.out - - - name: Test bloom llm-cli - shell: bash - run: | - llm-cli -t $THREAD_NUM -n 256 -x bloom -m $BLOOM_INT4_CKPT_PATH -p 'Once upon a time,' - - - name: Test starcoder llm-cli - shell: bash - run: | - llm-cli -t $THREAD_NUM -n 256 -x starcoder -m $STARCODER_INT4_CKPT_PATH -p 'def check_odd(' - - # - name: Test chatglm llm-cli - # shell: bash - # run: | - # llm-cli -t $THREAD_NUM -n 256 -x chatglm -m $CHATGLM_INT4_CKPT_PATH -p '你好' \ No newline at end of file diff --git a/.github/actions/llm/convert-test/action.yml b/.github/actions/llm/convert-test/action.yml deleted file mode 100644 index f434520a..00000000 --- a/.github/actions/llm/convert-test/action.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: "IPEX-LLM convert tests" -description: "IPEX-LLM convert test, including downloading original models" - -runs: - using: "composite" - steps: - - name: Download original models (LLaMA) - shell: bash - run: | - if [ ! -d $LLAMA_ORIGIN_PATH ]; then - echo "Directory $LLAMA_ORIGIN_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/llama-7b-hf -P $ORIGIN_DIR - fi - - - name: Download original models (GPT-NeoX) - shell: bash - run: | - if [ ! -d $GPTNEOX_ORIGIN_PATH ]; then - echo "Directory $GPTNEOX_ORIGIN_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/gptneox-7b-redpajama-bf16 -P $ORIGIN_DIR - fi - - - name: Download original models (BLOOM) - shell: bash - run: | - if [ ! -d $BLOOM_ORIGIN_PATH ]; then - echo "Directory $BLOOM_ORIGIN_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/bloomz-7b1 -P $ORIGIN_DIR - fi - - - name: Download original models (StarCoder) - shell: bash - run: | - if [ ! -d $STARCODER_ORIGIN_PATH ]; then - echo "Directory $STARCODER_ORIGIN_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/gpt_bigcode-santacoder -P $ORIGIN_DIR - fi - - - name: Convert test - shell: bash - run: | - echo "Running the convert models tests..." - python -m pytest -s python/llm/test/convert/test_convert_model.py diff --git a/.github/actions/llm/download-llm-binary/action.yml b/.github/actions/llm/download-llm-binary/action.yml deleted file mode 100644 index 8726969c..00000000 --- a/.github/actions/llm/download-llm-binary/action.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Download LLM binary files -description: Download built binary files from github artifact -inputs: - platform: - description: 'Platforms to built on' - default: 'Windows,Linux' - required: false - type: string -runs: - using: "composite" - steps: - - name: Download all build files - uses: actions/download-artifact@v3 - - name: Move build resources - shell: bash - run: | - rm -rf python/llm/llm-binary || true - mkdir -p python/llm/llm-binary - if ${{contains(inputs.platform, 'Linux')}}; then - mv linux-avx2/* python/llm/llm-binary/ - mv linux-avx512/* python/llm/llm-binary/ - mv linux-avxvnni/* python/llm/llm-binary/ - mv linux-avx/* python/llm/llm-binary/ - mv linux-amx/* python/llm/llm-binary/ - fi - if ${{contains(inputs.platform, 'Windows')}}; then - mv windows-avx2/* python/llm/llm-binary/ - mv windows-avx-vnni/* python/llm/llm-binary/ - mv windows-avx/* python/llm/llm-binary/ - mv windows-npu-level0/* python/llm/llm-binary/ - fi - rm -rf linux-avx2 || true - rm -rf linux-avx512 || true - rm -rf linux-avxvnni || true - rm -rf linux-avx || true - rm -rf linux-amx || true - rm -rf windows-avx2 || true - rm -rf windows-avx-vnni || true - rm -rf windows-avx || true - rm -rf windows-npu-level0 || true diff --git a/.github/actions/llm/example-test/action.yml b/.github/actions/llm/example-test/action.yml deleted file mode 100644 index 8346f64a..00000000 --- a/.github/actions/llm/example-test/action.yml +++ /dev/null @@ -1,14 +0,0 @@ -name: 'IPEX-LLM example tests' -description: 'IPEX-LLM example tests' - -runs: - using: "composite" - steps: - - name: Test LLAMA2 - shell: bash - env: - INT4_CKPT_DIR: ./llm/ggml-actions/stable - LLM_DIR: ./llm - - run: | - bash python/llm/dev/test/run-example-tests.sh \ No newline at end of file diff --git a/.github/actions/llm/setup-llm-env/action.yml b/.github/actions/llm/setup-llm-env/action.yml deleted file mode 100644 index 3e10716c..00000000 --- a/.github/actions/llm/setup-llm-env/action.yml +++ /dev/null @@ -1,62 +0,0 @@ -name: "Setup IPEX-LLM Env" -description: "IPEX-LLM installation" -inputs: - extra-dependency: - description: "Name of extra dependencies filled in brackets" - required: false - default: "all" -runs: - using: "composite" - steps: - - name: Create conda env for llm tests and conduct install tests - shell: bash - run: | - # make sure we install the latest version for bigdl-core-xe related packages - pip uninstall bigdl-core-xe -y || true - pip uninstall bigdl-core-xe-batch -y || true - pip uninstall bigdl-core-xe-addons -y || true - pip uninstall bigdl-core-xe-esimd -y || true - pip uninstall bigdl-core-xe-21 -y || true - pip uninstall bigdl-core-xe-batch-21 -y || true - pip uninstall bigdl-core-xe-addons-21 -y || true - pip uninstall bigdl-core-xe-esimd-21 -y || true - sed -i 's/"bigdl-core-xe==" + CORE_XE_VERSION + "/"bigdl-core-xe/g' python/llm/setup.py - sed -i 's/"bigdl-core-xe-batch==" + CORE_XE_VERSION + "/"bigdl-core-xe-batch/g' python/llm/setup.py - sed -i 's/"bigdl-core-xe-addons==" + CORE_XE_VERSION + "/"bigdl-core-xe-addons/g' python/llm/setup.py - sed -i 's/"bigdl-core-xe-esimd==" + CORE_XE_VERSION + "/"bigdl-core-xe-esimd/g' python/llm/setup.py - sed -i 's/"bigdl-core-xe-21==" + CORE_XE_VERSION/"bigdl-core-xe-21"/g' python/llm/setup.py - sed -i 's/"bigdl-core-xe-batch-21==" + CORE_XE_VERSION/"bigdl-core-xe-batch-21"/g' python/llm/setup.py - sed -i 's/"bigdl-core-xe-addons-21==" + CORE_XE_VERSION/"bigdl-core-xe-addons-21"/g' python/llm/setup.py - sed -i 's/"bigdl-core-xe-esimd-21==" + CORE_XE_VERSION/"bigdl-core-xe-esimd-21"/g' python/llm/setup.py - - pip uninstall bigdl-core-xe-all -y || true - sed -i 's/"bigdl-core-xe-all==" + CORE_XE_VERSION/"bigdl-core-xe-all"/g' python/llm/setup.py - - pip install requests - if [[ ${{ runner.os }} == 'Linux' ]]; then - bash python/llm/dev/release_default_linux.sh default false - elif [[ ${{ runner.os }} == 'Windows' ]]; then - bash python/llm/dev/release_default_windows.sh default false - else - echo "Runner os is not supported!!!!!" - exit 1 - fi - whl_name=$(ls python/llm/dist) - if [[ ${{ inputs.extra-dependency }} == 'xpu_2.0' ]]; then - pip install --upgrade --pre -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[xpu_2.0]" --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/ - pip install pytest expecttest - elif [[ ${{ inputs.extra-dependency }} == 'xpu_2.1' ]]; then - pip install --upgrade --pre -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[xpu_2.1]" --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/ - pip install pytest expecttest - elif [[ ${{ inputs.extra-dependency }} == 'xpu_2.6' ]]; then - pip install --upgrade --pre -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[xpu_2.6]" --extra-index-url https://download.pytorch.org/whl/test/xpu - pip install pytest - else - if [[ ${{ runner.os }} == 'Linux' ]]; then - pip install --upgrade --pre -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[all]" --extra-index-url https://download.pytorch.org/whl/cpu - elif [[ ${{ runner.os }} == 'Windows' ]]; then - pip install --upgrade --pre -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[all]" - fi - pip install pytest - bash python/llm/test/run-llm-install-tests.sh - fi diff --git a/.github/workflows/llm-binary-build.yml b/.github/workflows/llm-binary-build.yml deleted file mode 100644 index 2d24212c..00000000 --- a/.github/workflows/llm-binary-build.yml +++ /dev/null @@ -1,511 +0,0 @@ -name: LLM Binary Build - -# Cancel previous runs in the PR when you push new commits -# concurrency: -# group: ${{ github.workflow }}-llm-binary-build-${{ github.event.pull_request.number || github.run_id }} -# cancel-in-progress: false - -permissions: - contents: read - -# Controls when the action will run. -on: - # Triggers the workflow on push or pull request events but only for the main branch - # push: - # branches: [main] - # paths: - # - ".github/workflows/llm-binary-build.yml" - # pull_request: - # branches: [main] - # paths: - # - ".github/workflows/llm-binary-build.yml" - # workflow_dispatch: - # inputs: - # llmcpp-ref: - # description: 'Ref of llm.cpp code' - # default: '' - # required: false - # type: string - # platform: - # description: 'Platforms to built on' - # default: '["Windows", "Linux"]' - # required: false - # type: string - workflow_call: - inputs: - llmcpp-ref: - description: 'Ref of llm.cpp code' - default: '' - required: false - type: string - platform: - description: 'Platforms to built on' - default: 'Windows,Linux' - required: false - type: string - -# A workflow run is made up of one or more jobs that can run sequentially or in parallel -jobs: - check-linux-avxvnni-artifact: - if: ${{contains(inputs.platform, 'Linux')}} - runs-on: [Shire] - outputs: - if-exists: ${{steps.check_artifact.outputs.exists}} - steps: - - name: Check if built - id: check_artifact - uses: xSAVIKx/artifact-exists-action@v0 - with: - name: linux-avxvnni - - linux-build-avxvnni: - runs-on: [self-hosted, AVX2, almalinux8] - needs: check-linux-avxvnni-artifact - if: needs.check-linux-avxvnni-artifact.outputs.if-exists == 'false' - steps: - - name: Set access token - run: | - echo "github_access_token=${GITHUB_ACCESS_TOKEN}" >> "$GITHUB_ENV" - - name: Install Build Environment - shell: bash - run: | - export http_proxy=${HTTP_PROXY} - export https_proxy=${HTTPS_PROXY} - yum install --nogpgcheck -y gcc-toolset-11 cmake git - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: "intel-analytics/llm.cpp" - ref: ${{ inputs.llmcpp-ref }} - token: ${{ env.github_access_token }} - submodules: "recursive" - - name: Build binary - shell: bash - run: | - scl enable gcc-toolset-11 "cmake -B build" - scl enable gcc-toolset-11 "cmake --build build --config Release -j" - - name: Move release binary - shell: bash - run: | - mkdir release - mv build/main-bloom release/main-bloom - mv build/libbloom-api.so release/libbloom-api.so - mv build/quantize-bloom release/quantize-bloom - mv build/libbloom.so release/libbloom_avxvnni.so - mv build/main-llama release/main-llama - mv build/libllama-api.so release/libllama-api.so - mv build/quantize-llama release/quantize-llama - mv build/libllama.so release/libllama_avxvnni.so - mv build/main-gptneox release/main-gptneox - mv build/libgptneox-api.so release/libgptneox-api.so - mv build/quantize-gptneox release/quantize-gptneox - mv build/libgptneox.so release/libgptneox_avxvnni.so - mv build/main-starcoder release/main-starcoder - mv build/libstarcoder-api.so release/libstarcoder-api.so - mv build/quantize-starcoder release/quantize-starcoder - mv build/libstarcoder.so release/libstarcoder_avxvnni.so - - name: Archive build files - uses: actions/upload-artifact@v3 - with: - name: linux-avxvnni - path: | - release - - name: Clean up test environment - shell: bash - run: | - make clean - - check-linux-avx512-artifact: - if: ${{contains(inputs.platform, 'Linux')}} - runs-on: [Shire] - outputs: - if-exists: ${{steps.check_artifact.outputs.exists}} - steps: - - name: Check if built - id: check_artifact - uses: xSAVIKx/artifact-exists-action@v0 - with: - name: linux-avx512 - - linux-build-avx512: - runs-on: [self-hosted, AVX512, almalinux8] - needs: check-linux-avx512-artifact - if: needs.check-linux-avx512-artifact.outputs.if-exists == 'false' - steps: - - name: Set access token - run: | - echo "github_access_token=${GITHUB_ACCESS_TOKEN}" >> "$GITHUB_ENV" - - name: Install Build Environment - shell: bash - run: | - export http_proxy=${HTTP_PROXY} - export https_proxy=${HTTPS_PROXY} - yum install --nogpgcheck -y gcc-toolset-11 cmake git - conda remove -n python39 --all -y - conda create -n python39 python=3.9 -y - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: "intel-analytics/llm.cpp" - ref: ${{ inputs.llmcpp-ref }} - token: ${{ env.github_access_token }} - submodules: "recursive" - - name: Build avx512 binary - shell: bash - run: | - scl enable gcc-toolset-11 "cmake -DONLYAVX=OFF -DONLYAVX2=OFF -B build" - scl enable gcc-toolset-11 "cmake --build build --config Release -j" - - name: Move avx512 release binary - shell: bash - run: | - mkdir avx512_release - mv build/quantize-bloom avx512_release/quantize-bloom_avx512 - mv build/libbloom.so avx512_release/libbloom_avx512.so - mv build/quantize-llama avx512_release/quantize-llama_avx512 - mv build/libllama.so avx512_release/libllama_avx512.so - mv build/quantize-gptneox avx512_release/quantize-gptneox_avx512 - mv build/libgptneox.so avx512_release/libgptneox_avx512.so - mv build/quantize-starcoder avx512_release/quantize-starcoder_avx512 - mv build/libstarcoder.so avx512_release/libstarcoder_avx512.so - - name: Build avx2 binary - shell: bash - run: | - scl enable gcc-toolset-11 "cmake -DONLYAVX=OFF -DONLYAVX2=ON -B build" - scl enable gcc-toolset-11 "cmake --build build --config Release -j" - - name: Move avx2 release binary - shell: bash - run: | - mkdir avx2_release - mv build/libbloom.so avx2_release/libbloom_avx2.so - mv build/libllama.so avx2_release/libllama_avx2.so - mv build/libgptneox.so avx2_release/libgptneox_avx2.so - mv build/libstarcoder.so avx2_release/libstarcoder_avx2.so - - name: Build avx binary - shell: bash - run: | - scl enable gcc-toolset-11 "cmake -DONLYAVX=ON -DONLYAVX2=OFF -B build" - scl enable gcc-toolset-11 "cmake --build build --config Release -j" - - name: Move avx release binary - shell: bash - run: | - mkdir avx_release - mv build/libbloom.so avx_release/libbloom_avx.so - mv build/libllama.so avx_release/libllama_avx.so - mv build/libgptneox.so avx_release/libgptneox_avx.so - mv build/libstarcoder.so avx_release/libstarcoder_avx.so - - name: Archive avx512 build files - uses: actions/upload-artifact@v3 - with: - name: linux-avx512 - path: | - avx512_release - - name: Archive avx2 build files - uses: actions/upload-artifact@v3 - with: - name: linux-avx2 - path: | - avx2_release - - name: Archive avx build files - uses: actions/upload-artifact@v3 - with: - name: linux-avx - path: | - avx_release - - name: Clean up test environment - if: ${{ always() }} - shell: bash - run: | - make clean - conda remove -n python39 --all -y - - check-linux-amx-artifact: - if: ${{contains(inputs.platform, 'Linux')}} - runs-on: [Shire] - outputs: - if-exists: ${{steps.check_artifact.outputs.exists}} - steps: - - name: Check if built - id: check_artifact - uses: xSAVIKx/artifact-exists-action@v0 - with: - name: linux-amx - - linux-build-amx: - runs-on: [self-hosted, amx, almalinux8] - needs: check-linux-amx-artifact - if: needs.check-linux-amx-artifact.outputs.if-exists == 'false' - steps: - - name: Set access token - run: | - echo "github_access_token=${GITHUB_ACCESS_TOKEN}" >> "$GITHUB_ENV" - - name: Install Build Environment - shell: bash - run: | - export http_proxy=${HTTP_PROXY} - export https_proxy=${HTTPS_PROXY} - yum install --nogpgcheck -y gcc-toolset-11 cmake git - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: "intel-analytics/llm.cpp" - ref: ${{ inputs.llmcpp-ref }} - token: ${{ env.github_access_token }} - submodules: "recursive" - - name: Build amx binary - shell: bash - run: | - scl enable gcc-toolset-11 "cmake -DONLYAVX=OFF -DONLYAVX2=OFF -B build" - scl enable gcc-toolset-11 "cmake --build build --config Release -j" - - name: Move amx release binary - shell: bash - run: | - mkdir amx_release - mv build/quantize-bloom amx_release/quantize-bloom_amx - mv build/libbloom.so amx_release/libbloom_amx.so - mv build/quantize-llama amx_release/quantize-llama_amx - mv build/libllama.so amx_release/libllama_amx.so - mv build/quantize-gptneox amx_release/quantize-gptneox_amx - mv build/libgptneox.so amx_release/libgptneox_amx.so - mv build/quantize-starcoder amx_release/quantize-starcoder_amx - mv build/libstarcoder.so amx_release/libstarcoder_amx.so - - name: Archive amx build files - uses: actions/upload-artifact@v3 - with: - name: linux-amx - path: | - amx_release - - name: Clean up test environment - shell: bash - run: | - make clean - - check-windows-avx2-artifact: - if: ${{contains(inputs.platform, 'Windows')}} - runs-on: [Shire] - outputs: - if-exists: ${{steps.check_artifact.outputs.exists}} - steps: - - name: Check if built - id: check_artifact - uses: xSAVIKx/artifact-exists-action@v0 - with: - name: windows-avx2 - - windows-build-avx2: - runs-on: [self-hosted, Windows, AVX-VNNI-Build] - needs: check-windows-avx2-artifact - if: needs.check-windows-avx2-artifact.outputs.if-exists == 'false' - steps: - - name: Set access token - run: | - echo "github_access_token=$env:GITHUB_ACCESS_TOKEN" >> $env:GITHUB_ENV - echo "github_access_token=$env:GITHUB_ACCESS_TOKEN" - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: "intel-analytics/llm.cpp" - ref: ${{ inputs.llmcpp-ref }} - token: ${{ env.github_access_token }} - submodules: "recursive" - - name: Add msbuild to PATH - uses: microsoft/setup-msbuild@v1.1 - with: - msbuild-architecture: x64 - - name: Add cmake to PATH - uses: ilammy/msvc-dev-cmd@v1 - - name: Build binary - shell: powershell - run: | - cmake . - cmake --build . --config Release -j - - name: Archive build files - uses: actions/upload-artifact@v3 - with: - name: windows-avx2 - path: | - build/Release - - check-windows-avx-vnni-artifact: - if: ${{contains(inputs.platform, 'Windows')}} - runs-on: [Shire] - outputs: - if-exists: ${{steps.check_artifact.outputs.exists}} - steps: - - name: Check if built - id: check_artifact - uses: xSAVIKx/artifact-exists-action@v0 - with: - name: windows-avx-vnni - - windows-build-avx-vnni: - runs-on: [self-hosted, Windows, AVX-VNNI-Build] - needs: check-windows-avx-vnni-artifact - if: needs.check-windows-avx-vnni-artifact.outputs.if-exists == 'false' - steps: - - name: Set access token - run: | - echo "github_access_token=$env:GITHUB_ACCESS_TOKEN" >> $env:GITHUB_ENV - echo "github_access_token=$env:GITHUB_ACCESS_TOKEN" - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: "intel-analytics/llm.cpp" - ref: ${{ inputs.llmcpp-ref }} - token: ${{ env.github_access_token }} - submodules: "recursive" - - name: Add msbuild to PATH - uses: microsoft/setup-msbuild@v1.1 - with: - msbuild-architecture: x64 - - name: Add cmake to PATH - uses: ilammy/msvc-dev-cmd@v1 - - name: Build binary - shell: powershell - run: | - cmake -DAVXVNNI=ON . - cmake --build . --config Release -j - - name: Move release binary - shell: powershell - run: | - if (Test-Path ./release) { rm -r -fo release } - mkdir release - # mv build/Release/main-bloom.exe release/main-bloom_vnni.exe - mv build/Release/quantize-bloom.exe release/quantize-bloom_vnni.exe - mv build/Release/bloom.dll release/libbloom_vnni.dll - - # mv build/Release/main-llama.exe release/main-llama_vnni.exe - mv build/Release/quantize-llama.exe release/quantize-llama_vnni.exe - mv build/Release/llama.dll release/libllama_vnni.dll - - # mv build/Release/main-gptneox.exe release/main-gptneox_vnni.exe - mv build/Release/quantize-gptneox.exe release/quantize-gptneox_vnni.exe - mv build/Release/gptneox.dll release/libgptneox_vnni.dll - - # mv build/Release/main-starcoder.exe release/main-starcoder_vnni.exe - mv build/Release/quantize-starcoder.exe release/quantize-starcoder_vnni.exe - mv build/Release/starcoder.dll release/libstarcoder_vnni.dll - - name: Archive build files - uses: actions/upload-artifact@v3 - with: - name: windows-avx-vnni - path: | - release - - check-windows-avx-artifact: - if: ${{contains(inputs.platform, 'Windows')}} - runs-on: [Shire] - outputs: - if-exists: ${{steps.check_artifact.outputs.exists}} - steps: - - name: Check if built - id: check_artifact - uses: xSAVIKx/artifact-exists-action@v0 - with: - name: windows-avx - - windows-build-avx: - runs-on: [self-hosted, Windows, AVX-VNNI-Build] - needs: check-windows-avx-artifact - if: needs.check-windows-avx-artifact.outputs.if-exists == 'false' - steps: - - name: Set access token - run: | - echo "github_access_token=$env:GITHUB_ACCESS_TOKEN" >> $env:GITHUB_ENV - echo "github_access_token=$env:GITHUB_ACCESS_TOKEN" - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: "intel-analytics/llm.cpp" - ref: ${{ inputs.llmcpp-ref }} - token: ${{ env.github_access_token }} - submodules: "recursive" - - name: Add msbuild to PATH - uses: microsoft/setup-msbuild@v1.1 - with: - msbuild-architecture: x64 - - name: Add cmake to PATH - uses: ilammy/msvc-dev-cmd@v1 - - name: Build binary - shell: powershell - run: | - cmake -DONLYAVX=ON . - cmake --build . --config Release -j - - name: Move release binary - shell: powershell - run: | - if (Test-Path ./release) { rm -r -fo release } - mkdir release - mv build/Release/bloom.dll release/libbloom_avx.dll - - mv build/Release/llama.dll release/libllama_avx.dll - - mv build/Release/gptneox.dll release/libgptneox_avx.dll - - mv build/Release/starcoder.dll release/libstarcoder_avx.dll - - name: Archive build files - uses: actions/upload-artifact@v3 - with: - name: windows-avx - path: | - release - - check-windows-npu-level0-artifact: - if: ${{contains(inputs.platform, 'Windows')}} - runs-on: [Shire] - outputs: - if-exists: ${{steps.check_artifact.outputs.exists}} - steps: - - name: Check if built - id: check_artifact - uses: xSAVIKx/artifact-exists-action@v0 - with: - name: windows-npu-level0 - - windows-build-npu-level0: - runs-on: [self-hosted, Windows, npu-level0] - needs: check-windows-npu-level0-artifact - if: needs.check-windows-npu-level0-artifact.outputs.if-exists == 'false' - steps: - - name: Set access token - run: | - echo "github_access_token=$env:GITHUB_ACCESS_TOKEN" >> $env:GITHUB_ENV - echo "github_access_token=$env:GITHUB_ACCESS_TOKEN" - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: "intel-analytics/llm.cpp" - ref: ${{ inputs.llmcpp-ref }} - token: ${{ env.github_access_token }} - submodules: "recursive" - - name: Add msbuild to PATH - uses: microsoft/setup-msbuild@v1.1 - with: - msbuild-architecture: x64 - - name: Add cmake to PATH - uses: ilammy/msvc-dev-cmd@v1 - - name: Build binary - shell: cmd - run: | - call "C:\Program Files (x86)\Intel\openvino_2024.4.0\setupvars.bat" - cd bigdl-core-npu-level0 - sed -i "/FetchContent_MakeAvailable(intel_npu_acceleration_library)/s/^/#/" CMakeLists.txt - mkdir build - cd build - cmake .. - cmake --build . --config Release -t pipeline - - name: Move release binary - shell: powershell - run: | - cd bigdl-core-npu-level0 - if (Test-Path ./release) { rm -r -fo release } - mkdir release - mv build/Release/pipeline.dll release/pipeline.dll - - name: Archive build files - uses: actions/upload-artifact@v3 - with: - name: windows-npu-level0 - path: | - bigdl-core-npu-level0/release - - - # to make llm-binary-build optionally skippable - dummy-step: - if: ${{ inputs.platform == 'Dummy' }} - runs-on: ubuntu-latest - steps: - - name: dummy echo - run: | - echo "dummy step" diff --git a/.github/workflows/llm-nightly-test.yml b/.github/workflows/llm-nightly-test.yml deleted file mode 100644 index cd575fdd..00000000 --- a/.github/workflows/llm-nightly-test.yml +++ /dev/null @@ -1,125 +0,0 @@ -name: LLM Nightly Tests - -# Cancel previous runs in the PR when you push new commits -concurrency: - group: ${{ github.workflow }}-llm-nightly-test-${{ github.event.pull_request.number || github.run_id }} - cancel-in-progress: true - -permissions: - contents: read - -# Controls when the action will run. -on: - # schedule: - # - cron: "00 13 * * *" # GMT time, 13:00 GMT == 21:00 China - # pull_request: - # branches: [main] - # paths: - # - ".github/workflows/llm-nightly-test.yml" - # - ".github/actions/llm/setup-llm-env/action.yml" - # - ".github/actions/llm/remove-llm-env/action.yml" - # - ".github/actions/llm/convert-test/action.yml" - # # Allows you to run this workflow manually from the Actions tab - # workflow_dispatch: - workflow_call: - inputs: - checkout-ref: - description: 'ref for checking out, including branch, tag or SHA' - required: true - type: string - -# A workflow run is made up of one or more jobs that can run sequentially or in parallel -jobs: - llm-cpp-build: - uses: ./.github/workflows/llm-binary-build.yml - llm-nightly-convert-test: - needs: llm-cpp-build - strategy: - fail-fast: false - matrix: - include: - - os: windows - instruction: AVX-VNNI-UT - python-version: "3.11" - - os: ubuntu-20.04-lts - instruction: avx512 - python-version: "3.11" - runs-on: [self-hosted, llm, "${{matrix.instruction}}", "${{matrix.os}}"] - env: - ANALYTICS_ZOO_ROOT: ${{ github.workspace }} - steps: - - name: Set model directories - shell: bash - run: | - echo "ORIGIN_DIR=$(pwd)/../llm/origin-models" >> "$GITHUB_ENV" - echo "INT4_CKPT_DIR=$(pwd)/../llm/nightly-converted-models" >> "$GITHUB_ENV" - - name: Create model directories - shell: bash - run: | - if [ ! -d $ORIGIN_DIR ]; then - mkdir -p $ORIGIN_DIR - fi - if [ ! -d $INT4_CKPT_DIR ]; then - mkdir -p $INT4_CKPT_DIR - fi - - name: Set environment variables - shell: bash - run: | - echo "LLAMA_ORIGIN_PATH=${ORIGIN_DIR}/llama-7b-hf" >> "$GITHUB_ENV" - echo "GPTNEOX_ORIGIN_PATH=${ORIGIN_DIR}/gptneox-7b-redpajama-bf16" >> "$GITHUB_ENV" - echo "BLOOM_ORIGIN_PATH=${ORIGIN_DIR}/bloomz-7b1" >> "$GITHUB_ENV" - echo "STARCODER_ORIGIN_PATH=${ORIGIN_DIR}/gpt_bigcode-santacoder" >> "$GITHUB_ENV" - - echo "LLAMA_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_llama_q4_0.bin" >> "$GITHUB_ENV" - echo "GPTNEOX_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_gptneox_q4_0.bin" >> "$GITHUB_ENV" - echo "BLOOM_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_bloom_q4_0.bin" >> "$GITHUB_ENV" - echo "STARCODER_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_starcoder_q4_0.bin" >> "$GITHUB_ENV" - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - shell: bash - run: | - python -m pip install --upgrade pip - python -m pip install --upgrade wheel - - - name: Download llm binary - uses: ./.github/actions/llm/download-llm-binary - - - name: Install IPEX-LLM - uses: ./.github/actions/llm/setup-llm-env - - - name: Download original models & convert - uses: ./.github/actions/llm/convert-test - - - name: Upload ckpt to ftp - shell: bash - if: runner.os == 'Linux' && github.event_name == 'schedule' - run: | - curl -T $LLAMA_INT4_CKPT_PATH ${LLM_FTP_URL}/llm/ggml-actions/nightly/bigdl_llm_llama_7b_q4_0.bin - curl -T $GPTNEOX_INT4_CKPT_PATH ${LLM_FTP_URL}/llm/ggml-actions/nightly/bigdl_llm_redpajama_7b_q4_0.bin - curl -T $BLOOM_INT4_CKPT_PATH ${LLM_FTP_URL}/llm/ggml-actions/nightly/bigdl_llm_bloom_7b_q4_0.bin - curl -T $STARCODER_INT4_CKPT_PATH ${LLM_FTP_URL}/llm/ggml-actions/nightly/bigdl_llm_santacoder_1b_q4_0.bin - - name: Delete ckpt - shell: bash - run: | - rm -rf $LLAMA_INT4_CKPT_PATH - rm -rf $GPTNEOX_INT4_CKPT_PATH - rm -rf $BLOOM_INT4_CKPT_PATH - rm -rf $STARCODER_INT4_CKPT_PATH - - llm-unit-tests: - needs: llm-cpp-build - uses: ./.github/workflows/llm_unit_tests.yml - with: - checkout-ref: ${{ inputs.checkout-ref }} - llm-example-test: - needs: llm-cpp-build - uses: ./.github/workflows/llm_example_tests.yml - with: - checkout-ref: ${{ inputs.checkout-ref }} diff --git a/.github/workflows/llm_example_tests.yml b/.github/workflows/llm_example_tests.yml deleted file mode 100644 index a87c539f..00000000 --- a/.github/workflows/llm_example_tests.yml +++ /dev/null @@ -1,82 +0,0 @@ -name: LLM Example Test - -# Cancel previous runs in the PR when you push new commits -concurrency: - group: ${{ github.workflow }}-llm-example-tests-${{ github.event.pull_request.number || github.run_id }} - cancel-in-progress: true - -permissions: - contents: read - -# Controls when the action will run. -on: - # schedule: - # - cron: '00 13 * * *' # GMT time, 13:00 GMT == 21:00 China - # pull_request: - # branches: [ main ] - # paths: - # - '.github/workflows/llm_example_tests.yml' - # - '.github/workflows/llm-binary-build.yml' - # - '.github/actions/llm/example-test/action.yml' - # - '.github/actions/llm/setup-llm-env/action.yml' - # - '.github/actions/llm/remove-llm-env/action.yml' - # - '.github/actions/llm/download-llm-binary/action.yml' - # - 'python/llm/dev/test/run-example-tests.sh' - # - 'python/llm/example/**' - # workflow_dispatch: - workflow_call: - inputs: - checkout-ref: - description: 'ref for checking out, including branch, tag or SHA' - required: true - type: string - -env: - INT4_CKPT_DIR: ./llm/ggml-actions/stable - LLM_DIR: ./llm - -# A workflow run is made up of one or more jobs that can run sequentially or in parallel -jobs: - llm-cpp-build: - uses: ./.github/workflows/llm-binary-build.yml - llm-example-test: - needs: llm-cpp-build - strategy: - fail-fast: false - matrix: - python-version: ["3.11"] - instruction: ["AVX512"] - runs-on: [ self-hosted, llm,"${{matrix.instruction}}", ubuntu-20.04-lts ] - env: - THREAD_NUM: 24 - steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # actions/checkout@v2 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - python -m pip install --upgrade wheel - - - name: Download llm binary - uses: ./.github/actions/llm/download-llm-binary - - - name: Run LLM install (all) test - uses: ./.github/actions/llm/setup-llm-env - env: - ANALYTICS_ZOO_ROOT: ${{ github.workspace }} - - - name: Run LLM example test - uses: ./.github/actions/llm/example-test - env: - ANALYTICS_ZOO_ROOT: ${{ github.workspace }} - - # - name: Clean up test environment - # uses: ./.github/actions/llm/remove-llm-env - # env: - # ANALYTICS_ZOO_ROOT: ${{ github.workspace }} diff --git a/.github/workflows/llm_performance_tests.yml b/.github/workflows/llm_performance_tests.yml deleted file mode 100644 index f68d2a74..00000000 --- a/.github/workflows/llm_performance_tests.yml +++ /dev/null @@ -1,1964 +0,0 @@ -name: LLM Performance Test - -# Cancel previous runs in the PR when you push new commits -concurrency: - group: ${{ github.workflow }}-llm-performance-tests-${{ github.event.pull_request.number || github.run_id }} - cancel-in-progress: true - -permissions: - contents: read - -# Controls when the action will run. -on: - # schedule: - # - cron: "30 16 * * *" # GMT time, 16:30 GMT == 00:30 China - # please uncomment it for PR tests - # pull_request: - # branches: [main] - # paths: - # - ".github/workflows/llm_performance_tests.yml" - # - "python/llm/test/benchmark/**" - # - "python/llm/dev/benchmark/all-in-one/**" - # workflow_dispatch: - # inputs: - # arc: - # description: "If trigger performance test on Arc" - # required: false - # type: boolean - # default: true - # spr: - # description: "If trigger performance test on SPR" - # required: false - # type: boolean - # default: true - # core: - # description: "If trigger performance test on Core" - # required: false - # type: boolean - # default: true - # igpu: - # description: "If trigger performance test on iGPU" - # required: false - # type: boolean - # default: true - workflow_call: - inputs: - checkout-ref: - description: 'ref for checking out, including branch, tag or SHA' - required: true - type: string - arc: - description: "If trigger performance test on Arc" - required: false - type: boolean - default: true - spr: - description: "If trigger performance test on SPR" - required: false - type: boolean - default: true - core: - description: "If trigger performance test on Core" - required: false - type: boolean - default: true - mtl: - description: "If trigger performance test on MTL (Windows)" - required: false - type: boolean - default: true - lnl: - description: "If trigger performance test on LNL (Windows)" - required: false - type: boolean - default: true - dgpu: - description: "If trigger performance test on dGPU (Windows)" - required: false - type: boolean - default: true - gpu-pytorch-version: - description: 'PyTorch version used for GPU perf tests' - required: false - type: string - -# A workflow run is made up of one or more jobs that can run sequentially or in parallel -jobs: - llm-cpp-build: - uses: ./.github/workflows/llm-binary-build.yml - with: - platform: ${{ (github.event_name == 'workflow_dispatch' && (inputs.checkout-ref != 'main')) && 'Windows,Linux' || 'Dummy' }} - - llm-performance-test-on-arc: - if: ${{ github.event.schedule || ( github.event_name == 'workflow_dispatch' && inputs.arc ) }} # please comment it for PR tests - needs: llm-cpp-build - strategy: - fail-fast: false - matrix: - python-version: ["3.11"] - runs-on: [self-hosted, llm, perf] - env: - OMP_NUM_THREADS: 16 - THREAD_NUM: 16 - ANALYTICS_ZOO_ROOT: ${{ github.workspace }} - CSV_SAVE_PATH: ${{ (github.event.schedule || (github.event_name == 'workflow_dispatch' && (inputs.checkout-ref == 'main'))) && '/mnt/disk1/nightly_perf_gpu/' || '/mnt/disk1/pr_perf_gpu/' }} - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - - name: Install dependencies - shell: bash - # pip install transformers_stream_generator for model internlm-chat-7b-8k - # pip install tiktoken for model Qwen-7B-Chat-10-12 - # pip install matplotlib for model Qwen-VL-Chat - run: | - python -m pip install --upgrade pip - python -m pip install --upgrade wheel - python -m pip install --upgrade omegaconf - python -m pip install --upgrade pandas - python -m pip install --upgrade einops - python -m pip install --upgrade transformers_stream_generator - python -m pip install --upgrade tiktoken - python -m pip install --upgrade matplotlib - - # specific for test on certain commits - - name: Download llm binary - if: ${{ github.event_name == 'workflow_dispatch' && (inputs.checkout-ref != 'main') }} - uses: ./.github/actions/llm/download-llm-binary - - - name: Install IPEX-LLM from source - if: ${{ github.event_name == 'workflow_dispatch' && (inputs.checkout-ref != 'main') }} - uses: ./.github/actions/llm/setup-llm-env - with: - extra-dependency: "xpu_2.1" - - - name: Install IPEX-LLM from Pypi - if: ${{ github.event.schedule || (github.event_name == 'workflow_dispatch' && (inputs.checkout-ref == 'main')) }} - shell: bash - run: | - test_version_date=`date -d 'yesterday' '+%Y%m%d'` - test_version=2.2.0b$test_version_date - pip install --pre --upgrade ipex-llm[xpu]==$test_version --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/ - if ! pip show ipex-llm | grep $test_version; then - echo "Did not install ipex-llm with excepted version $test_version" - exit 1 - fi - - - name: Test installed xpu version - shell: bash - run: | - source /opt/intel/oneapi/setvars.sh - python -m pip install --upgrade pytest - python -m pip install --upgrade expecttest - bash python/llm/test/run-llm-install-tests.sh - - - name: Test on xpu(transformers==4.36.2) - shell: bash - run: | - date_for_test_version=$(date -d yesterday +%Y-%m-%d) - sed -i "s/date.today()/\"$date_for_test_version\"/g" python/llm/dev/benchmark/all-in-one/run.py - source /opt/intel/oneapi/setvars.sh - export USE_XETLA=OFF - export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - pip install transformers==4.36.2 - cp python/llm/test/benchmark/arc-perf-transformers-436.yaml python/llm/dev/benchmark/all-in-one/config.yaml - cd python/llm/dev/benchmark/all-in-one - mkdir test_batch1 - mkdir test_batch2 - mkdir test_batch4 - # batch_size 1 - # hide time info - sed -i 's/str(end - st)/"xxxxxx"/g' run.py - # change csv name - sed -i 's/{today}/{today}_test1_batch1/g' run.py - python run.py - mv *.csv test_batch1 - # batch_size 2 - cd ../../../../../ - cp python/llm/test/benchmark/arc-perf-transformers-436-batch2.yaml python/llm/dev/benchmark/all-in-one/config.yaml - cd python/llm/dev/benchmark/all-in-one - # change csv name - sed -i 's/batch1/batch2/g' run.py - python run.py - mv *.csv test_batch2 - # batch_size 4 - cd ../../../../../ - cp python/llm/test/benchmark/arc-perf-transformers-436-batch4.yaml python/llm/dev/benchmark/all-in-one/config.yaml - cd python/llm/dev/benchmark/all-in-one - # change csv name - sed -i 's/batch2/batch4/g' run.py - python run.py - mv *.csv test_batch4 - - - name: Test on xpu(transformers==4.37.0) - shell: bash - run: | - source /opt/intel/oneapi/setvars.sh - export USE_XETLA=OFF - export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - # upgrade for default transformers version - python -m pip install transformers==4.37.0 - # batch_size 1 - cp python/llm/test/benchmark/arc-perf-transformers-437.yaml python/llm/dev/benchmark/all-in-one/config.yaml - cd python/llm/dev/benchmark/all-in-one - # change csv name - sed -i 's/test1_batch4/test2_batch1/g' run.py - python run.py - mv *.csv test_batch1 - # batch_size 2 - cd ../../../../../ - cp python/llm/test/benchmark/arc-perf-transformers-437-batch2.yaml python/llm/dev/benchmark/all-in-one/config.yaml - cd python/llm/dev/benchmark/all-in-one - # change csv name - sed -i 's/batch1/batch2/g' run.py - python run.py - mv *.csv test_batch2 - # batch_size 4 - cd ../../../../../ - cp python/llm/test/benchmark/arc-perf-transformers-437-batch4.yaml python/llm/dev/benchmark/all-in-one/config.yaml - cd python/llm/dev/benchmark/all-in-one - # change csv name - sed -i 's/batch2/batch4/g' run.py - python run.py - mv *.csv test_batch4 - - - name: Test on xpu(transformers==4.40.0) - shell: bash - run: | - source /opt/intel/oneapi/setvars.sh - export USE_XETLA=OFF - export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - # upgrade transformers for model Qwen/Qwen1.5-MoE-A2.7B-Chat - python -m pip install transformers==4.40.0 - python -m pip install "trl<0.12.0" - # batch_size 1 - cp python/llm/test/benchmark/arc-perf-transformers-440.yaml python/llm/dev/benchmark/all-in-one/config.yaml - cd python/llm/dev/benchmark/all-in-one - # change csv name - sed -i 's/test2_batch4/test3_batch1/g' run.py - python run.py - mv *.csv test_batch1 - - - name: Test on xpu(transformers==4.43.1) - shell: bash - run: | - source /opt/intel/oneapi/setvars.sh - export USE_XETLA=OFF - export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - # upgrade for default transformers version - python -m pip install transformers==4.43.1 - python -m pip install "trl<0.12.0" - # batch_size 1 - cp python/llm/test/benchmark/arc-perf-transformers-443.yaml python/llm/dev/benchmark/all-in-one/config.yaml - cd python/llm/dev/benchmark/all-in-one - # change csv name - sed -i 's/test3_batch1/test4_batch1/g' run.py - python run.py - mv *.csv test_batch1 - # batch_size 2 - cd ../../../../../ - cp python/llm/test/benchmark/arc-perf-transformers-443-batch2.yaml python/llm/dev/benchmark/all-in-one/config.yaml - cd python/llm/dev/benchmark/all-in-one - # change csv name - sed -i 's/batch1/batch2/g' run.py - python run.py - mv *.csv test_batch2 - # batch_size 4 - cd ../../../../../ - cp python/llm/test/benchmark/arc-perf-transformers-443-batch4.yaml python/llm/dev/benchmark/all-in-one/config.yaml - cd python/llm/dev/benchmark/all-in-one - # change csv name - sed -i 's/batch2/batch4/g' run.py - python run.py - mv *.csv test_batch4 - python -m pip uninstall -y trl - - - name: Test on xpu(transformers==4.45.0) - shell: bash - run: | - source /opt/intel/oneapi/setvars.sh - export USE_XETLA=OFF - export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - # upgrade for default transformers version - python -m pip install transformers==4.45.0 - python -m pip install "trl<0.12.0" - python -m pip install accelerate==0.33.0 - # batch_size 1 - cp python/llm/test/benchmark/arc-perf-transformers-445.yaml python/llm/dev/benchmark/all-in-one/config.yaml - cd python/llm/dev/benchmark/all-in-one - # change csv name - sed -i 's/test4_batch4/test5_batch1/g' run.py - python run.py - mv *.csv test_batch1 - # batch_size 2 - cd ../../../../../ - cp python/llm/test/benchmark/arc-perf-transformers-445-batch2.yaml python/llm/dev/benchmark/all-in-one/config.yaml - cd python/llm/dev/benchmark/all-in-one - # change csv name - sed -i 's/batch1/batch2/g' run.py - python run.py - mv *.csv test_batch2 - # batch_size 4 - cd ../../../../../ - cp python/llm/test/benchmark/arc-perf-transformers-445-batch4.yaml python/llm/dev/benchmark/all-in-one/config.yaml - cd python/llm/dev/benchmark/all-in-one - # change csv name - sed -i 's/batch2/batch4/g' run.py - python run.py - mv *.csv test_batch4 - python -m pip install accelerate==0.23.0 - python -m pip uninstall -y trl - - - name: Concat csv and generate html - shell: bash - run: | - # batch_size 1 - cd python/llm/dev/benchmark/all-in-one/test_batch1 - python ../../../../test/benchmark/concat_csv.py - for file in *.csv; do - if [[ $file != *test* ]]; then - cp "$file" $CSV_SAVE_PATH/batch_size_1 - fi - done - python -m pip install pandas==1.5.3 - cd ../../../../test/benchmark - python csv_to_html.py -f $CSV_SAVE_PATH/batch_size_1 - # batch_size 2 - cd ../../../../ - cd python/llm/dev/benchmark/all-in-one/test_batch2 - python ../../../../test/benchmark/concat_csv.py - for file in *.csv; do - if [[ $file != *test* ]]; then - cp "$file" $CSV_SAVE_PATH/batch_size_2 - fi - done - cd ../../../../test/benchmark - python csv_to_html.py -f $CSV_SAVE_PATH/batch_size_2 - # batch_size 4 - cd ../../../../ - cd python/llm/dev/benchmark/all-in-one/test_batch4 - python ../../../../test/benchmark/concat_csv.py - for file in *.csv; do - if [[ $file != *test* ]]; then - cp "$file" $CSV_SAVE_PATH/batch_size_4 - fi - done - cd ../../../../test/benchmark - python csv_to_html.py -f $CSV_SAVE_PATH/batch_size_4 - - - name: Merge and sort csv files of multiple batches and generate html - shell: bash - run: | - cd python/llm/test/benchmark - mkdir merged_temp - # go through all the files and go to merged_temp - cd ../../dev/benchmark/all-in-one/test_batch1 - for file in *.csv; do - if [[ $file != *test* ]]; then - cp "$file" ../../../../test/benchmark/merged_temp - fi - done - cd ../test_batch2 - for file in *.csv; do - if [[ $file != *test* ]]; then - cp "$file" ../../../../test/benchmark/merged_temp - fi - done - cd ../test_batch4 - for file in *.csv; do - if [[ $file != *test* ]]; then - cp "$file" ../../../../test/benchmark/merged_temp - fi - done - cd ../../../../test/benchmark - python merge_csv_batch.py -f ./merged_temp - cd merged_temp - find . -name "*batch*.csv" -delete - for file in *.csv; do - cp "$file" $CSV_SAVE_PATH/merged - done - cd .. - python csv_to_html.py -f $CSV_SAVE_PATH/merged - rm -r merged_temp - - - name: Update html in parent folder - shell: bash - run: | - cd python/llm/test/benchmark - python update_html_in_parent_folder.py -f $CSV_SAVE_PATH - - - name: Check and upload results to ftp - shell: bash - run: | - # batch_size 1 - cd python/llm/dev/benchmark/all-in-one/test_batch1 - python ../../../../test/benchmark/check_results.py -c test1 -y ../../../../test/benchmark/arc-perf-transformers-436.yaml - python ../../../../test/benchmark/check_results.py -c test2 -y ../../../../test/benchmark/arc-perf-transformers-437.yaml - python ../../../../test/benchmark/check_results.py -c test3 -y ../../../../test/benchmark/arc-perf-transformers-440.yaml - find . -name "*test*.csv" -delete - if [[ ${{ github.event_name }} == "schedule" ]]; then - curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/gpu/ - elif [[ ${{ github.event_name }} == "workflow_dispatch" ]] && [[ ${{ inputs.checkout-ref }} == "main" ]]; then - curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/gpu/ - fi - cd ../ - rm -r test_batch1 - # batch_size 2 - cd test_batch2 - python ../../../../test/benchmark/check_results.py -c test1 -y ../../../../test/benchmark/arc-perf-transformers-436-batch2.yaml - python ../../../../test/benchmark/check_results.py -c test2 -y ../../../../test/benchmark/arc-perf-transformers-437-batch2.yaml - find . -name "*test*.csv" -delete - if [[ ${{ github.event_name }} == "schedule" ]]; then - curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/gpu/ - elif [[ ${{ github.event_name }} == "workflow_dispatch" ]] && [[ ${{ inputs.checkout-ref }} == "main" ]]; then - curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/gpu/ - fi - cd ../ - rm -r test_batch2 - # batch_size 4 - cd test_batch4 - python ../../../../test/benchmark/check_results.py -c test1 -y ../../../../test/benchmark/arc-perf-transformers-436-batch4.yaml - python ../../../../test/benchmark/check_results.py -c test2 -y ../../../../test/benchmark/arc-perf-transformers-437-batch4.yaml - find . -name "*test*.csv" -delete - if [[ ${{ github.event_name }} == "schedule" ]]; then - curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/gpu/ - elif [[ ${{ github.event_name }} == "workflow_dispatch" ]] && [[ ${{ inputs.checkout-ref }} == "main" ]]; then - curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/gpu/ - fi - cd ../ - rm -r test_batch4 - - - llm-performance-test-on-spr: - if: ${{ github.event.schedule || ( github.event_name == 'workflow_dispatch' && inputs.spr ) }} # please comment it for PR tests - needs: llm-cpp-build - strategy: - fail-fast: false - matrix: - python-version: ["3.11"] - runs-on: [self-hosted, llm, spr-perf] - env: - OMP_NUM_THREADS: 16 - THREAD_NUM: 16 - ANALYTICS_ZOO_ROOT: ${{ github.workspace }} - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - - name: Install dependencies - shell: bash - run: | - python -m pip install --upgrade pip - python -m pip install --upgrade wheel - python -m pip install --upgrade omegaconf - python -m pip install --upgrade pandas - python -m pip install --upgrade einops - python -m pip install --upgrade tiktoken - python -m pip install --upgrade transformers_stream_generator - # specific for test on certain commits - - name: Download llm binary - if: ${{ github.event_name == 'workflow_dispatch' && (inputs.checkout-ref != 'main') }} - uses: ./.github/actions/llm/download-llm-binary - - - name: Install IPEX-LLM from source - if: ${{ github.event_name == 'workflow_dispatch' && (inputs.checkout-ref != 'main') }} - uses: ./.github/actions/llm/setup-llm-env - - - name: Install IPEX-LLM from Pypi - if: ${{ github.event.schedule || (github.event_name == 'workflow_dispatch' && (inputs.checkout-ref == 'main')) }} - shell: bash - run: | - test_version_date=`date -d 'yesterday' '+%Y%m%d'` - test_version=2.2.0b$test_version_date - pip install --pre --upgrade ipex-llm[all]==$test_version --extra-index-url https://download.pytorch.org/whl/cpu - if ! pip show ipex-llm | grep $test_version; then - echo "Did not install ipex-llm with excepted version $test_version" - exit 1 - fi - - - name: Test on cpu - shell: bash - run: | - date_for_test_version=$(date -d yesterday +%Y-%m-%d) - sed -i "s/date.today()/\"$date_for_test_version\"/g" python/llm/dev/benchmark/all-in-one/run.py - - mv python/llm/test/benchmark/cpu-perf-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml - cd python/llm/dev/benchmark/all-in-one - export http_proxy=${HTTP_PROXY} - export https_proxy=${HTTPS_PROXY} - source ipex-llm-init -t - export OMP_NUM_THREADS=48 - # hide time info - sed -i 's/str(end - st)/"xxxxxx"/g' run.py - python run.py - cp ./*.csv /mnt/disk1/models/nightly_perf_cpu - cd ../../../test/benchmark - python -m pip install pandas==1.5.3 - python csv_to_html.py -f /mnt/disk1/models/nightly_perf_cpu - cd /mnt/disk1/models/nightly_perf_cpu - for f in *.html; do - curl -T "$f" ${LLM_FTP_URL}/llm/nightly_perf/nightly_perf_cpu/ - done - - llm-performance-test-on-core: - if: ${{ github.event.schedule || ( github.event_name == 'workflow_dispatch' && inputs.core ) }} # please comment it for PR tests - needs: llm-cpp-build - strategy: - fail-fast: false - matrix: - include: - - os: windows - platform: dp - python-version: "3.11" - # - os: windows - # platform: lp - # python-version: "3.11" - runs-on: [self-hosted, "${{ matrix.os }}", llm, perf-core, "${{ matrix.platform }}"] - env: - ANALYTICS_ZOO_ROOT: ${{ github.workspace }} - CSV_SAVE_PATH: ${{ (github.event.schedule || (github.event_name == 'workflow_dispatch' && (inputs.checkout-ref == 'main'))) && 'D:/action-runners/nightly_perf_core_' || 'D:/action-runners/pr_perf_core_' }}${{ matrix.platform }}/ - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - - name: Install dependencies - shell: bash - run: | - python -m pip install --upgrade pip - python -m pip install --upgrade wheel - python -m pip install --upgrade omegaconf pandas - python -m pip install --upgrade tiktoken einops transformers_stream_generator - - # specific for test on certain commits - - name: Download llm binary - if: ${{ github.event_name == 'workflow_dispatch' && (inputs.checkout-ref != 'main') }} - uses: ./.github/actions/llm/download-llm-binary - - - name: Install IPEX-LLM from source - if: ${{ github.event_name == 'workflow_dispatch' && (inputs.checkout-ref != 'main') }} - uses: ./.github/actions/llm/setup-llm-env - - - name: Install IPEX-LLM from Pypi - if: ${{ github.event.schedule || (github.event_name == 'workflow_dispatch' && (inputs.checkout-ref == 'main')) }} - shell: bash - run: | - test_version_date=`date -d 'yesterday' '+%Y%m%d'` - test_version=2.2.0b$test_version_date - pip install --pre --upgrade ipex-llm[all]==$test_version - if ! pip show ipex-llm | grep $test_version; then - echo "Did not install ipex-llm with excepted version $test_version" - exit 1 - fi - - - - name: Test on core ${{ matrix.platform }} - shell: bash - run: | - date_for_test_version=$(date -d yesterday +%Y-%m-%d) - sed -i "s/date.today()/\"$date_for_test_version\"/g" python/llm/dev/benchmark/all-in-one/run.py - - mv python/llm/test/benchmark/core-perf-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml - cd python/llm/dev/benchmark/all-in-one - export http_proxy=${HTTP_PROXY} - export https_proxy=${HTTPS_PROXY} - # hide time info - sed -i 's/str(end - st)/"xxxxxx"/g' run.py - python run.py - cp ./*.csv $CSV_SAVE_PATH - cd ../../../test/benchmark - python -m pip install pandas==1.5.3 - python csv_to_html.py -f $CSV_SAVE_PATH - cd ../../dev/benchmark/all-in-one/ - if [[ ${{ github.event_name }} == "schedule" ]]; then - curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/core_${{ matrix.platform }}/ - elif [[ ${{ github.event_name }} == "workflow_dispatch" ]] && [[ ${{ inputs.checkout-ref }} == "main" ]]; then - curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/core_${{ matrix.platform }}/ - fi - - select-gpu-win-test-platform: - if: ${{ github.event.schedule || ( github.event_name == 'workflow_dispatch' && inputs.mtl ) || ( github.event_name == 'workflow_dispatch' && inputs.lnl ) || ( github.event_name == 'workflow_dispatch' && inputs.dgpu ) }} - needs: llm-cpp-build - runs-on: [self-hosted, Shire] - outputs: - platform: ${{ steps.select-platform.outputs.platform }} - steps: - - name: Select GPU Windows test platform - shell: bash - id: select-platform - run: | - test_platform=() - if [[ ${{ github.event_name }} == "workflow_dispatch" ]]; then - if [ ${{ inputs.mtl }} == "true" ]; then - test_platform+=("\"perf-mtl\"") - fi - if [ ${{ inputs.lnl }} == "true" ]; then - test_platform+=("\"perf-lnl\"") - fi - if [ ${{ inputs.dgpu }} == "true" ]; then - test_platform+=("\"perf-dgpu\"") - fi - printf 'platform=[%s]\n' "$(IFS=','; echo "${test_platform[*]}")" >> "$GITHUB_OUTPUT" - else - echo 'platform=["perf-mtl", "perf-lnl"]' >> "$GITHUB_OUTPUT" - fi - - # TODO: rename igpu specific tests to gpu-win - llm-performance-test-on-gpu-win: - if: ${{ github.event.schedule || ( github.event_name == 'workflow_dispatch' && inputs.mtl ) || ( github.event_name == 'workflow_dispatch' && inputs.lnl ) || ( github.event_name == 'workflow_dispatch' && inputs.dgpu ) }} - needs: select-gpu-win-test-platform - strategy: - fail-fast: false - matrix: - platform: ${{ fromJSON(needs.select-gpu-win-test-platform.outputs.platform) }} - python-version: ["3.11"] - runs-on: [self-hosted, Windows, llm, "${{ matrix.platform }}"] - env: - ANALYTICS_ZOO_ROOT: ${{ github.workspace }} - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - # TODO: Put the ipex-llm related install process for win gpu into a action function - - # specific for test on certain commits - - name: Download llm binary - if: ${{ github.event_name == 'workflow_dispatch' && (inputs.checkout-ref != 'main') }} - uses: ./.github/actions/llm/download-llm-binary - - - name: Prepare for install ipex-llm from source - if: ${{ github.event_name == 'workflow_dispatch' && (inputs.checkout-ref != 'main') }} - shell: bash - run: | - if [ ${{ inputs.gpu-pytorch-version }} == "2.6" ]; then - sed -i 's/"bigdl-core-xe-all==" + CORE_XE_VERSION/"bigdl-core-xe-all"/g' python/llm/setup.py - else - if [ ${{ matrix.platform }} == "perf-mtl" ]; then - sed -i 's/"bigdl-core-xe-21==" + CORE_XE_VERSION/"bigdl-core-xe-21"/g' python/llm/setup.py - sed -i 's/"bigdl-core-xe-batch-21==" + CORE_XE_VERSION/"bigdl-core-xe-batch-21"/g' python/llm/setup.py - sed -i 's/"bigdl-core-xe-addons-21==" + CORE_XE_VERSION/"bigdl-core-xe-addons-21"/g' python/llm/setup.py - fi - if [ ${{ matrix.platform }} == "perf-lnl" ] || [ ${{ matrix.platform }} == "perf-dgpu" ]; then - sed -i 's/"bigdl-core-xe-23==" + CORE_XE_VERSION/"bigdl-core-xe-23"/g' python/llm/setup.py - sed -i 's/"bigdl-core-xe-batch-23==" + CORE_XE_VERSION/"bigdl-core-xe-batch-23"/g' python/llm/setup.py - sed -i 's/"bigdl-core-xe-addons-23==" + CORE_XE_VERSION/"bigdl-core-xe-addons-23"/g' python/llm/setup.py - fi - fi - - - name: Install ipex-llm and other related packages (install from source) - if: ${{ github.event_name == 'workflow_dispatch' && (inputs.checkout-ref != 'main') }} - shell: cmd - run: | - call conda create -n igpu-perf python=${{ matrix.python-version }} libuv -y - call conda activate igpu-perf - - pip install --upgrade pip - pip install --upgrade wheel - pip install --upgrade omegaconf pandas - pip install --upgrade tiktoken einops transformers_stream_generator matplotlib - - cd python\llm - python setup.py clean --all bdist_wheel --win - if not exist dist\ipex_llm*.whl (exit /b 1) - for %%i in (dist\ipex_llm*.whl) do set whl_name=%%i - - if "${{ inputs.gpu-pytorch-version }}"=="2.6" ( - pip install --pre --upgrade %whl_name%[xpu_2.6] --extra-index-url https://download.pytorch.org/whl/test/xpu - ) else ( - if "${{ matrix.platform }}"=="perf-mtl" ( - pip install --pre --upgrade %whl_name%[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/ - ) - if "${{ matrix.platform }}"=="perf-lnl" ( - pip install --pre --upgrade %whl_name%[xpu_lnl] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/lnl/cn/ - ) - if "${{ matrix.platform }}"=="perf-dgpu" ( - pip install --pre --upgrade %whl_name%[xpu_lnl] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/lnl/cn/ - ) - ) - if %ERRORLEVEL% neq 0 (exit /b 1) - pip list - - call conda deactivate - - - name: Determine desired ipex-llm version - if: ${{ github.event.schedule || (github.event_name == 'workflow_dispatch' && (inputs.checkout-ref == 'main')) }} - shell: bash - run: | - test_version_date=`date -d 'yesterday' '+%Y%m%d'` - test_version=2.2.0b$test_version_date - echo "TEST_VERSION=${test_version}" >> "$GITHUB_ENV" - - - name: Install ipex-llm and other related packages (install from pypi) - if: ${{ github.event.schedule || (github.event_name == 'workflow_dispatch' && (inputs.checkout-ref == 'main')) }} - shell: cmd - run: | - call conda create -n igpu-perf python=${{ matrix.python-version }} libuv -y - call conda activate igpu-perf - - pip install --upgrade pip - pip install --upgrade wheel - pip install --upgrade omegaconf pandas - pip install --upgrade tiktoken einops transformers_stream_generator matplotlib - - if "${{ inputs.gpu-pytorch-version }}"=="2.6" ( - pip install --pre --upgrade ipex-llm[xpu_2.6] --extra-index-url https://download.pytorch.org/whl/test/xpu - ) else ( - if "${{ matrix.platform }}"=="perf-mtl" ( - pip install --pre --upgrade ipex-llm[xpu]==%TEST_VERSION% --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/ - ) - if "${{ matrix.platform }}"=="perf-lnl" ( - pip install --pre --upgrade ipex-llm[xpu_lnl]==%TEST_VERSION% --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/lnl/cn/ - ) - if "${{ matrix.platform }}"=="perf-dgpu" ( - pip install --pre --upgrade ipex-llm[xpu_lnl]==%TEST_VERSION% --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/lnl/cn/ - ) - ) - pip show ipex-llm | findstr %TEST_VERSION% - if %ERRORLEVEL% neq 0 ( - echo "Did not install ipex-llm with excepted version %TEST_VERSION%" - exit /b 1 - ) - pip list - - call conda deactivate - - - name: Create env for html generation - shell: cmd - run: | - call conda create -n html-gen python=3.11 -y - call conda activate html-gen - - pip install pandas==1.5.3 - pip install Jinja2 - pip install "numpy<2.0.0" - - call conda deactivate - - - name: Set directory envs & and fix generated csv date name - shell: bash - run: | - if [[ ${{ github.event_name }} == "schedule" ]]; then - echo "CSV_SAVE_PATH=${CSV_NIGHTLY_PATH}" >> "$GITHUB_ENV" - elif [[ ${{ github.event_name }} == "workflow_dispatch" ]] && [[ ${{ inputs.checkout-ref }} == "main" ]]; then - echo "CSV_SAVE_PATH=${CSV_NIGHTLY_PATH}" >> "$GITHUB_ENV" - else - echo "CSV_SAVE_PATH=${CSV_PR_PATH}" >> "$GITHUB_ENV" - fi - date_for_test_version=$(date -d yesterday +%Y-%m-%d) - echo "LOG_FILE=${date_for_test_version}_output.txt" >> "$GITHUB_ENV" - - sed -i "s/date.today()/\"$date_for_test_version\"/g" python/llm/dev/benchmark/all-in-one/run.py - - - name: Add extra warmup for chatglm3-6b int4+fp32 & MiniCPM int4+fp16 int4+fp32 for more stable results - if: ${{ matrix.platform == 'perf-mtl' }} - shell: bash - run: | - sed -i '/^\s*result = run_transformer_int4_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming)/ i\ - if repo_id in ["THUDM/chatglm3-6b", "THUDM/glm-4-9b-chat", "openbmb/MiniCPM-1B-sft-bf16", "openbmb/MiniCPM-2B-sft-bf16"]:\ - run_transformer_int4_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming) - ' python/llm/dev/benchmark/all-in-one/run.py - - sed -i '/^\s*result = run_transformer_int4_fp16_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming)/ i\ - if repo_id in ["openbmb/MiniCPM-1B-sft-bf16", "openbmb/MiniCPM-2B-sft-bf16"]:\ - run_transformer_int4_fp16_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming) - ' python/llm/dev/benchmark/all-in-one/run.py - - # lnl: 32/1k/2k/3k/4k, dgpu: 32/1k/2k/3k (in temp) - - name: Adjust model list - shell: bash - run: | - if [ ${{ matrix.platform }} == "perf-lnl" ]; then - sed -i "s/- 'mistralai\/Mistral-7B-Instruct-v0.2'/# - 'mistralai\/Mistral-7B-Instruct-v0.2'/" python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml - sed -i "s/- 'mistralai\/Mistral-7B-Instruct-v0.2'/# - 'mistralai\/Mistral-7B-Instruct-v0.2'/" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml - sed -i "s/- 'mistralai\/Mistral-7B-Instruct-v0.2'/# - 'mistralai\/Mistral-7B-Instruct-v0.2'/" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml - sed -i "s/- 'mistralai\/Mistral-7B-Instruct-v0.2'/# - 'mistralai\/Mistral-7B-Instruct-v0.2'/" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml - sed -i "s/- 'mistralai\/Mistral-7B-Instruct-v0.2'/# - 'mistralai\/Mistral-7B-Instruct-v0.2'/" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml - fi - if [ ${{ matrix.platform }} == "perf-dgpu" ]; then - sed -i "s/- 'mistralai\/Mistral-7B-Instruct-v0.2'/# - 'mistralai\/Mistral-7B-Instruct-v0.2'/" python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml - sed -i "s/- 'mistralai\/Mistral-7B-Instruct-v0.2'/# - 'mistralai\/Mistral-7B-Instruct-v0.2'/" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml - sed -i "s/- 'mistralai\/Mistral-7B-Instruct-v0.2'/# - 'mistralai\/Mistral-7B-Instruct-v0.2'/" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml - sed -i "s/- 'mistralai\/Mistral-7B-Instruct-v0.2'/# - 'mistralai\/Mistral-7B-Instruct-v0.2'/" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml - sed -i "s/- 'baichuan-inc\/Baichuan2-13B-Chat'/# - 'baichuan-inc\/Baichuan2-13B-Chat'/" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml - sed -i "s/- 'meta-llama\/Llama-2-13b-chat-hf'/# - 'meta-llama\/Llama-2-13b-chat-hf'/" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml - fi - - # 32-32 int4+fp16 - - name: Prepare igpu perf test (32-32 int4+fp16) - shell: bash - run: | - # hide time info - # sed -i 's/str(end - st)/"xxxxxx"/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{api}-results-{today}.csv/32-32-{api}-results-{today}_test1.csv/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml - - - name: Test on igpu (32-32 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - - :: TODO: speficy configurations on perf-arc - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - REM for llava - set TRANSFORMERS_OFFLINE=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\32-32_int4_fp16.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\32-32_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test1 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.36 (32-32 int4+fp16) - shell: bash - run: | - sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_436.yaml - - - name: Test on igpu for transformers 4.36 (32-32 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.36.2 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\32-32_int4_fp16_436.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\32-32_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.38 (32-32 int4+fp16) - shell: bash - run: | - sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_438.yaml - - - name: Test on igpu for transformers 4.38 (32-32 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.38.2 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\32-32_int4_fp16_438.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\32-32_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 if %ERRORLEVEL% neq -1073740791 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.43 (32-32 int4+fp16) - shell: bash - run: | - sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_443.yaml - - - name: Test on igpu for transformers 4.43 (32-32 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.43.1 - pip install "trl<0.12.0" - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\32-32_int4_fp16_443.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\32-32_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 if %ERRORLEVEL% neq -1073740791 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 - if %ERRORLEVEL% neq 0 (exit /b 1) - - pip uninstall trl -y - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.45 (32-32 int4+fp16) - shell: bash - run: | - sed -i 's/{today}_test4/{today}_test5/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_445.yaml - - - name: Test on igpu for transformers 4.45 (32-32 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.45.0 - pip install accelerate==0.33.0 - pip install "trl<0.12.0" - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\32-32_int4_fp16_445.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\32-32_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 if %ERRORLEVEL% neq -1073740791 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test5 - if %ERRORLEVEL% neq 0 (exit /b 1) - - pip uninstall trl -y - pip install accelerate==0.23.0 - call conda deactivate - - - name: Concat csv and generate html (32-32 int4+fp16) - shell: cmd - run: | - call conda activate html-gen - - cd python\llm\dev\benchmark\all-in-one - python ..\..\..\test\benchmark\concat_csv.py - if %ERRORLEVEL% neq 0 (exit /b 1) - del /q *test*.csv - move *.csv %CSV_SAVE_PATH%\32-32_int4_fp16\ - cd ..\..\..\test\benchmark - python csv_to_html.py -f %CSV_SAVE_PATH%\32-32_int4_fp16\ - if %ERRORLEVEL% neq 0 (exit /b 1) - move %CSV_SAVE_PATH%\32-32_int4_fp16\*.html %CSV_SAVE_PATH% - - call conda deactivate - - # TODO: create a action function here for different input - # 1024-128 int4+fp16 - - name: Prepare igpu perf test (1024-128 int4+fp16) - shell: bash - run: | - sed -i 's/32-32/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test5/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml - - - name: Test on igpu (1024-128 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.37.0 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - REM for llava - set TRANSFORMERS_OFFLINE=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test1 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.36 (1024-128 int4+fp16) - shell: bash - run: | - sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_436.yaml - - - name: Test on igpu for transformers 4.36 (1024-128 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.36.2 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_436.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.38 (1024-128 int4+fp16) - shell: bash - run: | - sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_438.yaml - - - name: Test on igpu for transformers 4.38 (1024-128 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.38.2 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_438.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.43 (1024-128 int4+fp16) - shell: bash - run: | - sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_443.yaml - - - name: Test on igpu for transformers 4.43 (1024-128 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.43.1 - pip install "trl<0.12.0" - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_443.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 - if %ERRORLEVEL% neq 0 (exit /b 1) - - pip uninstall trl -y - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.45 (1024-128 int4+fp16) - shell: bash - run: | - sed -i 's/{today}_test4/{today}_test5/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_445.yaml - - - name: Test on igpu for transformers 4.45 (1024-128 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.45.0 - pip install accelerate==0.33.0 - pip install "trl<0.12.0" - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_445.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test5 - if %ERRORLEVEL% neq 0 (exit /b 1) - - pip uninstall trl -y - pip install accelerate==0.23.0 - call conda deactivate - - - name: Concat csv and generate html (1024-128 int4+fp16) - shell: cmd - run: | - call conda activate html-gen - - cd python\llm\dev\benchmark\all-in-one - python ..\..\..\test\benchmark\concat_csv.py - if %ERRORLEVEL% neq 0 (exit /b 1) - del /q *test*.csv - move *.csv %CSV_SAVE_PATH%\1024-128_int4_fp16\ - cd ..\..\..\test\benchmark - python csv_to_html.py -f %CSV_SAVE_PATH%\1024-128_int4_fp16\ - if %ERRORLEVEL% neq 0 (exit /b 1) - move %CSV_SAVE_PATH%\1024-128_int4_fp16\*.html %CSV_SAVE_PATH% - - call conda deactivate - - # 2048-256 int4+fp16 - - name: Prepare igpu perf test (2048-256 int4+fp16) - shell: bash - run: | - sed -i 's/1024-128/2048-256/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test5/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml - - - name: Test on igpu (2048-256 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.37.0 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - REM for llava - set TRANSFORMERS_OFFLINE=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\2048-256_int4_fp16.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\2048-256_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test1 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.36 (2048-256 int4+fp16) - shell: bash - run: | - sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_436.yaml - - - name: Test on igpu for transformers 4.36 (2048-256 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.36.2 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\2048-256_int4_fp16_436.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\2048-256_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.38 (2048-256 int4+fp16) - shell: bash - run: | - sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_438.yaml - - - name: Test on igpu for transformers 4.38 (2048-256 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.38.2 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\2048-256_int4_fp16_438.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\2048-256_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.43 (2048-256 int4+fp16) - shell: bash - run: | - sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_443.yaml - - - name: Test on igpu for transformers 4.43 (2048-256 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.43.1 - pip install "trl<0.12.0" - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\2048-256_int4_fp16_443.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\2048-256_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 - if %ERRORLEVEL% neq 0 (exit /b 1) - - pip uninstall trl -y - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.45 (2048-256 int4+fp16) - shell: bash - run: | - sed -i 's/{today}_test4/{today}_test5/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_445.yaml - - - name: Test on igpu for transformers 4.45 (2048-256 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.45.0 - pip install accelerate==0.33.0 - pip install "trl<0.12.0" - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\2048-256_int4_fp16_445.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\2048-256_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test5 - if %ERRORLEVEL% neq 0 (exit /b 1) - - pip uninstall trl -y - pip install accelerate==0.23.0 - call conda deactivate - - - name: Concat csv and generate html (2048-256 int4+fp16) - shell: cmd - run: | - call conda activate html-gen - - cd python\llm\dev\benchmark\all-in-one - python ..\..\..\test\benchmark\concat_csv.py - if %ERRORLEVEL% neq 0 (exit /b 1) - del /q *test*.csv - move *.csv %CSV_SAVE_PATH%\2048-256_int4_fp16\ - cd ..\..\..\test\benchmark - python csv_to_html.py -f %CSV_SAVE_PATH%\2048-256_int4_fp16\ - if %ERRORLEVEL% neq 0 (exit /b 1) - move %CSV_SAVE_PATH%\2048-256_int4_fp16\*.html %CSV_SAVE_PATH% - - call conda deactivate - - # 3072-384 int4+fp16 - - name: Prepare igpu perf test (3072-384 int4+fp16) - shell: bash - run: | - sed -i 's/2048-256/3072-384/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test5/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml - - - name: Test on igpu (3072-384 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.37.0 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - REM for llava - set TRANSFORMERS_OFFLINE=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\3072-384_int4_fp16.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\3072-384_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test1 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.36 (3072-384 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' || matrix.platform == 'perf-lnl' }} - shell: bash - run: | - sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_436.yaml - - - name: Test on igpu for transformers 4.36 (3072-384 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' || matrix.platform == 'perf-lnl' }} - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.36.2 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\3072-384_int4_fp16_436.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\3072-384_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.38 (3072-384 int4+fp16) - shell: bash - run: | - if [ ${{ matrix.platform }} == "perf-mtl" ] || [ ${{ matrix.platform }} == "perf-lnl" ]; then - sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py - fi - if [ ${{ matrix.platform }} == "perf-dgpu" ]; then - sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py - fi - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_438.yaml - - - name: Test on igpu for transformers 4.38 (3072-384 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.38.2 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\3072-384_int4_fp16_438.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\3072-384_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - if "${{ matrix.platform }}"=="perf-mtl" ( - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 - ) - if "${{ matrix.platform }}"=="perf-lnl" ( - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 - ) - if "${{ matrix.platform }}"=="perf-dgpu" ( - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2 - ) - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.43 (3072-384 int4+fp16) - shell: bash - run: | - if [ ${{ matrix.platform }} == "perf-mtl" ] || [ ${{ matrix.platform }} == "perf-lnl" ]; then - sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py - fi - if [ ${{ matrix.platform }} == "perf-dgpu" ]; then - sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py - fi - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_443.yaml - - - name: Test on igpu for transformers 4.43 (3072-384 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.43.1 - pip install "trl<0.12.0" - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\3072-384_int4_fp16_443.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\3072-384_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - if "${{ matrix.platform }}"=="perf-mtl" ( - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 - ) - if "${{ matrix.platform }}"=="perf-lnl" ( - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 - ) - if "${{ matrix.platform }}"=="perf-dgpu" ( - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 - ) - if %ERRORLEVEL% neq 0 (exit /b 1) - - pip uninstall trl -y - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.45 (3072-384 int4+fp16) - shell: bash - run: | - if [ ${{ matrix.platform }} == "perf-mtl" ] || [ ${{ matrix.platform }} == "perf-lnl" ]; then - sed -i 's/{today}_test4/{today}_test5/g' python/llm/dev/benchmark/all-in-one/run.py - fi - if [ ${{ matrix.platform }} == "perf-dgpu" ]; then - sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py - fi - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_445.yaml - - - name: Test on igpu for transformers 4.45 (3072-384 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.45.0 - pip install accelerate==0.33.0 - pip install "trl<0.12.0" - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\3072-384_int4_fp16_445.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\3072-384_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - if "${{ matrix.platform }}"=="perf-mtl" ( - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test5 - ) - if "${{ matrix.platform }}"=="perf-lnl" ( - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test5 - ) - if "${{ matrix.platform }}"=="perf-dgpu" ( - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 - ) - if %ERRORLEVEL% neq 0 (exit /b 1) - - pip uninstall trl -y - pip install accelerate==0.23.0 - call conda deactivate - - - name: Concat csv and generate html (3072-384 int4+fp16) - shell: cmd - run: | - call conda activate html-gen - - cd python\llm\dev\benchmark\all-in-one - python ..\..\..\test\benchmark\concat_csv.py - if %ERRORLEVEL% neq 0 (exit /b 1) - del /q *test*.csv - move *.csv %CSV_SAVE_PATH%\3072-384_int4_fp16\ - cd ..\..\..\test\benchmark - python csv_to_html.py -f %CSV_SAVE_PATH%\3072-384_int4_fp16\ - if %ERRORLEVEL% neq 0 (exit /b 1) - move %CSV_SAVE_PATH%\3072-384_int4_fp16\*.html %CSV_SAVE_PATH% - - call conda deactivate - - # 4096-512 int4+fp16 - - name: Prepare igpu perf test (4096-512 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' || matrix.platform == 'perf-lnl' }} - shell: bash - run: | - sed -i 's/3072-384/4096-512/g' python/llm/dev/benchmark/all-in-one/run.py - if [ ${{ matrix.platform }} == "perf-mtl" ] || [ ${{ matrix.platform }} == "perf-lnl" ]; then - sed -i 's/{today}_test5/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py - fi - if [ ${{ matrix.platform }} == "perf-dgpu" ]; then - sed -i 's/{today}_test4/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py - fi - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml - - - name: Test on igpu (4096-512 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' || matrix.platform == 'perf-lnl' }} - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.37.0 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - REM for llava - set TRANSFORMERS_OFFLINE=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\4096-512_int4_fp16.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\4096-512_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test1 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.38 (4096-512 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' || matrix.platform == 'perf-lnl' }} - shell: bash - run: | - sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_438.yaml - - - name: Test on igpu for transformers 4.38 (4096-512 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' || matrix.platform == 'perf-lnl' }} - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.38.2 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\4096-512_int4_fp16_438.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\4096-512_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.43 (4096-512 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' || matrix.platform == 'perf-lnl' }} - shell: bash - run: | - sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_443.yaml - - - name: Test on igpu for transformers 4.43 (4096-512 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' || matrix.platform == 'perf-lnl' }} - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.43.1 - pip install "trl<0.12.0" - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\4096-512_int4_fp16_443.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\4096-512_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 - if %ERRORLEVEL% neq 0 (exit /b 1) - - pip uninstall trl -y - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.45 (4096-512 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' || matrix.platform == 'perf-lnl' }} - shell: bash - run: | - sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_445.yaml - - - name: Test on igpu for transformers 4.45 (4096-512 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' || matrix.platform == 'perf-lnl' }} - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.45.0 - pip install accelerate==0.33.0 - pip install "trl<0.12.0" - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\4096-512_int4_fp16_445.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\4096-512_int4_fp16\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 - if %ERRORLEVEL% neq 0 (exit /b 1) - - pip uninstall trl -y - pip install accelerate==0.23.0 - call conda deactivate - - - name: Concat csv and generate html (4096-512 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' || matrix.platform == 'perf-lnl' }} - shell: cmd - run: | - call conda activate html-gen - - cd python\llm\dev\benchmark\all-in-one - python ..\..\..\test\benchmark\concat_csv.py - if %ERRORLEVEL% neq 0 (exit /b 1) - del /q *test*.csv - move *.csv %CSV_SAVE_PATH%\4096-512_int4_fp16\ - cd ..\..\..\test\benchmark - python csv_to_html.py -f %CSV_SAVE_PATH%\4096-512_int4_fp16\ - if %ERRORLEVEL% neq 0 (exit /b 1) - move %CSV_SAVE_PATH%\4096-512_int4_fp16\*.html %CSV_SAVE_PATH% - - call conda deactivate - - # load_low_bit 1024-128 int4+fp16 - - name: Prepare igpu perf test (load_low_bit 1024-128 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: bash - run: | - sed -i 's/4096-512/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test4/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml - - - name: Test on igpu (load_low_bit 1024-128 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.37.0 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - REM for llava - set TRANSFORMERS_OFFLINE=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_loadlowbit.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test1 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.36 (load_low_bit 1024-128 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: bash - run: | - sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_436.yaml - - - name: Test on igpu for transformers 4.36 (load_low_bit 1024-128 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.36.2 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_loadlowbit_436.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.38 (load_low_bit 1024-128 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: bash - run: | - sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_438.yaml - - - name: Test on igpu for transformers 4.38 (load_low_bit 1024-128 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.38.2 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_loadlowbit_438.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.43 (load_low_bit 1024-128 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: bash - run: | - sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_443.yaml - - - name: Test on igpu for transformers 4.43 (load_low_bit 1024-128 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.43.1 - pip install "trl<0.12.0" - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_loadlowbit_443.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 - if %ERRORLEVEL% neq 0 (exit /b 1) - - pip uninstall trl -y - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.45 (load_low_bit 1024-128 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: bash - run: | - sed -i 's/{today}_test4/{today}_test5/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_445.yaml - - - name: Test on igpu for transformers 4.45 (load_low_bit 1024-128 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.45.0 - pip install accelerate==0.33.0 - pip install "trl<0.12.0" - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_loadlowbit_445.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test5 - if %ERRORLEVEL% neq 0 (exit /b 1) - - pip uninstall trl -y - pip install accelerate==0.23.0 - call conda deactivate - - - name: Concat csv and generate html (load_low_bit 1024-128 int4+fp16) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: cmd - run: | - call conda activate html-gen - - cd python\llm\dev\benchmark\all-in-one - python ..\..\..\test\benchmark\concat_csv.py - if %ERRORLEVEL% neq 0 (exit /b 1) - del /q *test*.csv - move *.csv %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\ - cd ..\..\..\test\benchmark - python csv_to_html.py -f %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\ - if %ERRORLEVEL% neq 0 (exit /b 1) - move %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\*.html %CSV_SAVE_PATH% - - call conda deactivate - - # 1024-128 - - name: Prepare igpu perf test (1024-128) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: bash - run: | - sed -i 's/{today}_test5/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128.yaml - - - name: Test on igpu (1024-128) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.37.0 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - REM for llava - set TRANSFORMERS_OFFLINE=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test1 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.36 (1024-128) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: bash - run: | - sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_436.yaml - - - name: Test on igpu for transformers 4.36 (1024-128) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.36.2 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_436.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.38 (1024-128) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: bash - run: | - sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_438.yaml - - - name: Test on igpu for transformers 4.38 (1024-128) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.38.2 - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_438.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 - if %ERRORLEVEL% neq 0 (exit /b 1) - - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.43 (1024-128) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: bash - run: | - sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_443.yaml - - - name: Test on igpu for transformers 4.43 (1024-128) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.43.1 - pip install "trl<0.12.0" - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_443.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 - if %ERRORLEVEL% neq 0 (exit /b 1) - - pip uninstall trl -y - call conda deactivate - - - name: Prepare igpu perf test for transformers 4.45 (1024-128) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: bash - run: | - sed -i 's/{today}_test4/{today}_test5/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_445.yaml - - - name: Test on igpu for transformers 4.45 (1024-128) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.45.0 - pip install accelerate==0.33.0 - pip install "trl<0.12.0" - - set SYCL_CACHE_PERSISTENT=1 - set SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\1024-128_445.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1 - REM if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test5 - if %ERRORLEVEL% neq 0 (exit /b 1) - - pip uninstall trl -y - pip install accelerate==0.23.0 - call conda deactivate - - - name: Concat csv and generate html (1024-128) - if: ${{ matrix.platform == 'perf-mtl' }} - shell: cmd - run: | - call conda activate html-gen - - cd python\llm\dev\benchmark\all-in-one - python ..\..\..\test\benchmark\concat_csv.py - if %ERRORLEVEL% neq 0 (exit /b 1) - del /q *test*.csv - move *.csv %CSV_SAVE_PATH%\1024-128\ - cd ..\..\..\test\benchmark - python csv_to_html.py -f %CSV_SAVE_PATH%\1024-128\ - if %ERRORLEVEL% neq 0 (exit /b 1) - move %CSV_SAVE_PATH%\1024-128\*.html %CSV_SAVE_PATH% - - call conda deactivate - - # TODO: avoid duplicated code - - name: Upload results to ftp - if: ${{ always() }} - shell: cmd - run: | - cd %CSV_SAVE_PATH% - IF "${{ github.event_name }}"=="schedule" ( - IF "${{ matrix.platform }}"=="perf-mtl" ( - for %%f in (*.html) do ( - curl -T "%%f" %FTP_IGPU_NIGHTLY_PERF_PATH% - ) - ) - IF "${{ matrix.platform }}"=="perf-lnl" ( - for %%f in (*.html) do ( - curl -T "%%f" %FTP_IGPU_NIGHTLY_PERF_PATH% - ) - ) - ) - IF "${{ github.event_name }}"=="workflow_dispatch" ( - IF "${{ inputs.checkout-ref }}"=="main" ( - IF "${{ matrix.platform }}"=="perf-mtl" ( - for %%f in (*.html) do ( - curl -T "%%f" %FTP_IGPU_NIGHTLY_PERF_PATH% - ) - ) - IF "${{ matrix.platform }}"=="perf-lnl" ( - for %%f in (*.html) do ( - curl -T "%%f" %FTP_IGPU_NIGHTLY_PERF_PATH% - ) - ) - ) - ) - # for test on machine when encountering error - # - name: Remove conda env - # if: ${{ always() }} - # shell: cmd - # run: | - # call conda env remove -n igpu-perf -y diff --git a/.github/workflows/llm_unit_tests.yml b/.github/workflows/llm_unit_tests.yml deleted file mode 100644 index e89224cb..00000000 --- a/.github/workflows/llm_unit_tests.yml +++ /dev/null @@ -1,495 +0,0 @@ -name: LLM Unit Tests - -# Cancel previous runs in the PR when you push new commits -concurrency: - group: ${{ github.workflow }}-llm-unittest-${{ github.event.pull_request.number || github.run_id }} - cancel-in-progress: true - -permissions: - contents: read - -# Controls when the action will run. -on: - # Triggers the workflow on push or pull request events but only for the main branch - # push: - # branches: [main] - # paths: - # - "python/llm/**" - # - ".github/workflows/llm_unit_tests.yml" - # - ".github/workflows/llm-binary-build.yml" - # - ".github/actions/llm/setup-llm-env/action.yml" - # - ".github/actions/llm/remove-llm-env/action.yml" - # - ".github/actions/llm/cli-test-linux/action.yml" - # - ".github/actions/llm/cli-test-windows/action.yml" - # - ".github/actions/llm/download-llm-binary/action.yml" - # pull_request: - # branches: [main] - # paths: - # - "python/llm/**" - # - ".github/workflows/llm_unit_tests.yml" - # - ".github/workflows/llm-binary-build.yml" - # - ".github/actions/llm/setup-llm-env/action.yml" - # - ".github/actions/llm/remove-llm-env/action.yml" - # - ".github/actions/llm/cli-test-linux/action.yml" - # - ".github/actions/llm/cli-test-windows/action.yml" - # - ".github/actions/llm/download-llm-binary/action.yml" - # workflow_dispatch: - workflow_call: - inputs: - checkout-ref: - description: 'ref for checking out, including branch, tag or SHA' - required: true - type: string - -# A workflow run is made up of one or more jobs that can run sequentially or in parallel -jobs: - llm-cpp-build: - uses: ./.github/workflows/llm-binary-build.yml - setup-python-version: - runs-on: ubuntu-latest - outputs: - python-version: ${{ steps.setup-python-version.outputs.python-version }} - steps: - - name: setup-python-version - id: setup-python-version - run: | - if [ ${{ github.event_name }} == 'schedule' ]; then - python_version='["3.9", "3.10", "3.11"]' - else - python_version='["3.11"]' - fi - list=$(echo ${python_version} | jq -c) - echo "python-version=${list}" >> "$GITHUB_OUTPUT" - llm-unit-test: - needs: [setup-python-version, llm-cpp-build] - strategy: - fail-fast: false - matrix: - os: [windows, ubuntu-20.04-lts] - python-version: ${{ fromJson(needs.setup-python-version.outputs.python-version) }} - include: - - os: windows - instruction: AVX-VNNI-UT - - os: ubuntu-20.04-lts - instruction: avx512 - runs-on: [self-hosted, llm, "${{matrix.instruction}}", "${{matrix.os}}"] - env: - THREAD_NUM: 24 - ANALYTICS_ZOO_ROOT: ${{ github.workspace }} - steps: - - name: Set model directories - shell: bash - run: | - echo "DATASET_DIR=${{ github.workspace }}/../llm/datasets" >> "$GITHUB_ENV" - echo "ORIGIN_DIR=${{ github.workspace }}/../llm/origin-models" >> "$GITHUB_ENV" - echo "INT4_CKPT_DIR=${{ github.workspace }}/../llm/converted-models" >> "$GITHUB_ENV" - - name: Create model directories - shell: bash - run: | - if [ ! -d $DATASET_DIR ]; then - mkdir -p $DATASET_DIR - fi - if [ ! -d $ORIGIN_DIR ]; then - mkdir -p $ORIGIN_DIR - fi - if [ ! -d $INT4_CKPT_DIR ]; then - mkdir -p $INT4_CKPT_DIR - fi - - name: Set environment variables - shell: bash - run: | - echo "SPEECH_DATASET_PATH=${DATASET_DIR}/librispeech_asr_dummy" >> "$GITHUB_ENV" - echo "COMMON_VOICE_PATH=${DATASET_DIR}/common_voice" >> "$GITHUB_ENV" - - echo "LLAMA_ORIGIN_PATH=${ORIGIN_DIR}/llama-7b-hf" >> "$GITHUB_ENV" - echo "BLOOM_ORIGIN_PATH=${ORIGIN_DIR}/bloom-7b1" >> "$GITHUB_ENV" - echo "ORIGINAL_CHATGLM2_6B_PATH=${ORIGIN_DIR}/chatglm2-6b" >> "$GITHUB_ENV" - echo "ORIGINAL_CODESHELL_7B_PATH=${ORIGIN_DIR}/CodeShell-7B-Chat" >> "$GITHUB_ENV" - echo "ORIGINAL_WHISPER_TINY_PATH=${ORIGIN_DIR}/whisper-tiny" >> "$GITHUB_ENV" - echo "MISTRAL_ORIGIN_PATH=${ORIGIN_DIR}/Mistral-7B-v0.1" >> "$GITHUB_ENV" - echo "LLAMA2_7B_ORIGIN_PATH=${ORIGIN_DIR}/Llama-2-7b-chat-hf" >> "$GITHUB_ENV" - echo "VICUNA_7B_1_3_ORIGIN_PATH=${ORIGIN_DIR}/vicuna-7b-v1.3" >> "$GITHUB_ENV" - - echo "LLAMA_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_llama_7b_q4_0.bin" >> "$GITHUB_ENV" - echo "GPTNEOX_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_redpajama_7b_q4_0.bin" >> "$GITHUB_ENV" - echo "BLOOM_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_bloom_7b_q4_0.bin" >> "$GITHUB_ENV" - echo "STARCODER_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_santacoder_1b_q4_0.bin" >> "$GITHUB_ENV" - echo "CHATGLM_INT4_CKPT_PATH=${INT4_CKPT_DIR}/chatglm2-6b-q4_0.bin" >> "$GITHUB_ENV" - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - shell: bash - run: | - python -m pip install --upgrade pip - python -m pip install --upgrade wheel - - # May remove later - pip uninstall sentence-transformers -y || true - - - name: Download llm binary - uses: ./.github/actions/llm/download-llm-binary - - - name: Run LLM install (all) test - uses: ./.github/actions/llm/setup-llm-env - - - name: Download ckpt & original models - shell: bash - run: | - if [ ! -e $LLAMA_INT4_CKPT_PATH ]; then - echo "Directory $LLAMA_INT4_CKPT_PATH not found. Downloading from FTP server..." - echo "wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/bigdl_llm_llama_7b_q4_0.bin -P $INT4_CKPT_DIR" - wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/bigdl_llm_llama_7b_q4_0.bin -P $INT4_CKPT_DIR - fi - if [ ! -e $GPTNEOX_INT4_CKPT_PATH ]; then - echo "Directory $GPTNEOX_INT4_CKPT_PATH not found. Downloading from FTP server..." - wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/bigdl_llm_redpajama_7b_q4_0.bin -P $INT4_CKPT_DIR - fi - if [ ! -e $BLOOM_INT4_CKPT_PATH ]; then - echo "Directory $BLOOM_INT4_CKPT_PATH not found. Downloading from FTP server..." - wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/bigdl_llm_bloom_7b_q4_0.bin -P $INT4_CKPT_DIR - fi - if [ ! -e $STARCODER_INT4_CKPT_PATH ]; then - echo "Directory $STARCODER_INT4_CKPT_PATH not found. Downloading from FTP server..." - wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/bigdl_llm_santacoder_1b_q4_0.bin -P $INT4_CKPT_DIR - fi - # if [ ! -e $CHATGLM_INT4_CKPT_PATH ]; then - # echo "Directory $CHATGLM_INT4_CKPT_PATH not found. Downloading from FTP server..." - # wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/chatglm2-6b-q4_0.bin -P $INT4_CKPT_DIR - # fi - if [ ! -d $ORIGINAL_CHATGLM2_6B_PATH ]; then - echo "Directory $ORIGINAL_CHATGLM2_6B_PATH not found. Downloading from FTP server..." - echo "wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/updated_for_4.36/chatglm2-6b -P $ORIGIN_DIR" - wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/updated_for_4.36/chatglm2-6b -P $ORIGIN_DIR - fi - if [ ! -d $ORIGINAL_CODESHELL_7B_PATH ]; then - echo "Directory $ORIGINAL_CODESHELL_7B_PATH not found. Downloading from FTP server..." - echo "wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/CodeShell-7B-Chat -P $ORIGIN_DIR" - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/CodeShell-7B-Chat -P $ORIGIN_DIR - fi - if [ ! -d $ORIGINAL_WHISPER_TINY_PATH ]; then - echo "Directory $ORIGINAL_WHISPER_TINY_PATH not found. Downloading from FTP server..." - echo "wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/whisper-tiny -P $ORIGIN_DIR" - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/whisper-tiny -P $ORIGIN_DIR - fi - if [ ! -d $MISTRAL_ORIGIN_PATH ]; then - echo "Directory $MISTRAL_ORIGIN_PATH not found. Downloading from FTP server..." - echo "wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/Mistral-7B-v0.1 -P $ORIGIN_DIR" - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/Mistral-7B-v0.1 -P $ORIGIN_DIR - fi - if [ ! -d $LLAMA_ORIGIN_PATH ]; then - echo "Directory $LLAMA_ORIGIN_PATH not found. Downloading from FTP server..." - echo "wget --no-verbose $LLM_FTP_URL/llm/llama-7b-hf -P $ORIGIN_DIR" - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/llama-7b-hf -P $ORIGIN_DIR - fi - if [ ! -d $BLOOM_ORIGIN_PATH ]; then - echo "Directory $BLOOM_ORIGIN_PATH not found. Downloading from FTP server..." - echo "wget --no-verbose $LLM_FTP_URL/llm/bloom-7b1 -P $ORIGIN_DIR" - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/bloom-7b1 -P $ORIGIN_DIR - fi - if [ ! -d $SPEECH_DATASET_PATH ]; then - echo "Directory $SPEECH_DATASET_PATH not found. Downloading from FTP server..." - echo "wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/librispeech_asr_dummy -P $DATASET_DIR" - wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/librispeech_asr_dummy -P $DATASET_DIR - fi - if [ ! -d $COMMON_VOICE_PATH ]; then - echo "Directory $COMMON_VOICE_PATH not found. Downloading from FTP server..." - echo "wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/common_voice -P $DATASET_DIR" - wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/common_voice -P $DATASET_DIR - fi - if [ ! -d $LLAMA2_7B_ORIGIN_PATH ]; then - echo "Directory $LLAMA2_7B_ORIGIN_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/Llama-2-7b-chat-hf -P $ORIGIN_DIR - fi - if [ ! -d $VICUNA_7B_1_3_ORIGIN_PATH ]; then - echo "Directory $VICUNA_7B_1_3_ORIGIN_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/vicuna-7b-v1.3 -P $ORIGIN_DIR - fi - - - name: Run LLM cli test (Linux) - if: runner.os == 'Linux' - uses: ./.github/actions/llm/cli-test-linux - - - name: Setup Python Path - if: runner.os == 'Windows' - shell: bash - run: | - # Get Python interpreter path - python_path=$(python -c 'import sys; print(sys.executable)') - python_dir=$(dirname "$python_path") - scripts_dir="$python_dir/Scripts" - - # Set environment variables - echo "PYTHON_DIR=$python_dir" >> $GITHUB_ENV - echo "SCRIPTS_DIR=$scripts_dir" >> $GITHUB_ENV - - - name: Run LLM cli test (Windows) - if: runner.os == 'Windows' - shell: powershell - run: | - # Retrieve environment variables - $pythonDir = $env:PYTHON_DIR - $scriptsDir = $env:SCRIPTS_DIR - - # Update PATH - $env:PATH = "$pythonDir;$scriptsDir;$env:PATH" - - # Run tests - llm-cli.ps1 -t $env:THREAD_NUM -n 256 -x llama -m $env:LLAMA_INT4_CKPT_PATH -p 'Once upon a time,' - llm-cli.ps1 -t $env:THREAD_NUM -n 256 -x gptneox -m $env:GPTNEOX_INT4_CKPT_PATH -p 'Once upon a time,' - llm-cli.ps1 -t $env:THREAD_NUM -n 256 -x bloom -m $env:BLOOM_INT4_CKPT_PATH -p 'Once upon a time,' - # llm-cli.ps1 -t $env:THREAD_NUM -x starcoder -m $env:STARCODER_INT4_CKPT_PATH -p 'def check_odd(' - - - name: Run LLM inference test - shell: bash - run: | - python -m pip install einops datasets librosa openai-whisper - bash python/llm/test/run-llm-inference-tests.sh - - name: Run LLM langchain test - shell: bash - run: | - pip install -U langchain==0.0.184 - pip install -U chromadb==0.3.25 - pip install -U pandas==2.0.3 - bash python/llm/test/run-llm-langchain-tests.sh - - name: Run LLM llamaindex test - shell: bash - run: | - pip install "llama-index-readers-file<0.2.0" - pip install "llama-index-vector-stores-postgres<0.2.0" - pip install "llama-index-embeddings-huggingface<0.3.0" - pip install transformers==4.36.2 - pip install "pydantic>=2.0.0" - bash python/llm/test/run-llm-llamaindex-tests.sh - - name: Run sentence-transformers uninstallation - if: ${{ always() }} - shell: bash - run: | - pip uninstall sentence-transformers -y || true - - llm-unit-test-on-arc: - needs: [setup-python-version, llm-cpp-build] - strategy: - fail-fast: false - matrix: - runner: ['arc-ut', 'arc-ut-win'] - pytorch-version: ['2.1'] - python-version: ${{ fromJson(needs.setup-python-version.outputs.python-version) }} - runs-on: [self-hosted, llm, "${{ matrix.runner }}"] - env: - # OMP_NUM_THREADS: 16 - # THREAD_NUM: 16 - ANALYTICS_ZOO_ROOT: ${{ github.workspace }} - steps: - - name: Set environment variables - shell: bash - run: | - echo "DATASET_DIR=${ORIGIN_DIR}/../datasets" >> "$GITHUB_ENV" - echo "YAHMA_ALPACA_CLEANED_PATH=${ORIGIN_DIR}/../datasets/yahma_alpaca_cleaned" >> "$GITHUB_ENV" - echo "SPEECH_DATASET_PATH=${ORIGIN_DIR}/../datasets/librispeech_asr_dummy" >> "$GITHUB_ENV" - - echo "LLAMA2_7B_ORIGIN_PATH=${ORIGIN_DIR}/Llama-2-7b-chat-hf" >> "$GITHUB_ENV" - echo "CHATGLM2_6B_ORIGIN_PATH=${ORIGIN_DIR}/chatglm2-6b" >> "$GITHUB_ENV" - echo "FALCON_7B_ORIGIN_PATH=${ORIGIN_DIR}/falcon-7b-instruct-with-patch" >> "$GITHUB_ENV" - echo "MPT_7B_ORIGIN_PATH=${ORIGIN_DIR}/mpt-7b-chat" >> "$GITHUB_ENV" - echo "WHISPER_TINY_ORIGIN_PATH=${ORIGIN_DIR}/whisper-tiny" >> "$GITHUB_ENV" - echo "MISTRAL_7B_INSTRUCT_V0_1_ORIGIN_PATH=${ORIGIN_DIR}/Mistral-7B-Instruct-v0.1" >> "$GITHUB_ENV" - echo "BAICHUAN2_7B_ORIGIN_PATH=${ORIGIN_DIR}/Baichuan2-7B-Chat" >> "$GITHUB_ENV" - echo "QWEN_7B_ORIGIN_PATH=${ORIGIN_DIR}/Qwen-7B-Chat" >> "$GITHUB_ENV" - echo "VICUNA_7B_1_3_ORIGIN_PATH=${ORIGIN_DIR}/vicuna-7b-v1.3" >> "$GITHUB_ENV" - - name: Checkout repo - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - - name: Install dependencies - shell: bash - run: | - python -m pip install --upgrade pip - python -m pip install --upgrade wheel - python -m pip install --upgrade notebook - - # May remove later - pip uninstall sentence-transformers -y || true - - # On Windows, we need to add "Python3_ROOT_DIR/bin" to path to make libuv work - if [[ "$RUNNER_OS" == "Windows" ]]; then - echo $Python3_ROOT_DIR'\bin\' - echo $Python3_ROOT_DIR'\bin\' >> $GITHUB_PATH - fi - - - name: Download llm binary - uses: ./.github/actions/llm/download-llm-binary - - - name: Install IPEX-LLM for xpu - uses: ./.github/actions/llm/setup-llm-env - with: - extra-dependency: "xpu_${{ matrix.pytorch-version }}" - - - name: Test installed xpu version - shell: bash - run: | - # Specific oneapi position on arc ut test machines - if [[ "$RUNNER_OS" == "Linux" ]]; then - if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then - source /opt/intel/oneapi/setvars.sh - elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then - source /home/arda/intel/oneapi/setvars.sh - fi - fi - bash python/llm/test/run-llm-install-tests.sh - - - name: Download LLMs and datasets - shell: bash - run: | - if [ ! -d $LLAMA2_7B_ORIGIN_PATH ]; then - echo "Directory $LLAMA2_7B_ORIGIN_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/Llama-2-7b-chat-hf -P $ORIGIN_DIR - fi - if [ ! -d $CHATGLM2_6B_ORIGIN_PATH ]; then - echo "Directory $CHATGLM2_6B_ORIGIN_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/updated_for_4.36/chatglm2-6b -P $ORIGIN_DIR - fi - if [ ! -d $FALCON_7B_ORIGIN_PATH ]; then - echo "Directory $FALCON_7B_ORIGIN_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/falcon-7b-instruct-with-patch -P $ORIGIN_DIR - fi - if [ ! -d $MPT_7B_ORIGIN_PATH ]; then - echo "Directory $MPT_7B_ORIGIN_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/updated_for_4.36/mpt-7b-chat -P $ORIGIN_DIR - fi - if [ ! -d $WHISPER_TINY_ORIGIN_PATH ]; then - echo "Directory $WHISPER_TINY_ORIGIN_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/whisper-tiny -P $ORIGIN_DIR - fi - if [ ! -d $DATASET_DIR ]; then - mkdir -p $DATASET_DIR - fi - if [ ! -d $YAHMA_ALPACA_CLEANED_PATH ]; then - echo "Directory $YAHMA_ALPACA_CLEANED_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/yahma_alpaca_cleaned -P $DATASET_DIR - fi - if [ ! -d $SPEECH_DATASET_PATH ]; then - echo "Directory $SPEECH_DATASET_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/librispeech_asr_dummy -P $DATASET_DIR - fi - if [ ! -d $MISTRAL_7B_INSTRUCT_V0_1_ORIGIN_PATH ]; then - echo "Directory $MISTRAL_7B_INSTRUCT_V0_1_ORIGIN_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/Mistral-7B-Instruct-v0.1 -P $ORIGIN_DIR - fi - if [ ! -d $QWEN_7B_ORIGIN_PATH ]; then - echo "Directory $QWEN_7B_ORIGIN_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/Qwen-7B-Chat -P $ORIGIN_DIR - fi - if [ ! -d $BAICHUAN2_7B_ORIGIN_PATH ]; then - echo "Directory $BAICHUAN2_7B_ORIGIN_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/updated_for_4.36/Baichuan2-7B-Chat -P $ORIGIN_DIR - fi - if [ ! -d $VICUNA_7B_1_3_ORIGIN_PATH ]; then - echo "Directory $VICUNA_7B_1_3_ORIGIN_PATH not found. Downloading from FTP server..." - wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/vicuna-7b-v1.3 -P $ORIGIN_DIR - fi - - - name: Run LLM inference test - shell: bash - run: | - # Specific oneapi position on arc ut test machines - if [[ "$RUNNER_OS" == "Linux" ]]; then - if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then - source /opt/intel/oneapi/setvars.sh - elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then - source /home/arda/intel/oneapi/setvars.sh - fi - fi - python -m pip install datasets librosa soundfile einops tiktoken transformers_stream_generator - - bash python/llm/test/run-llm-inference-tests-gpu.sh - - - name: Run LLM example tests - shell: bash - run: | - python -m pip uninstall datasets -y - python -m pip install transformers==4.36.0 datasets peft==0.10.0 - python -m pip install bitsandbytes scipy - # Specific oneapi position on arc ut test machines - if [[ "$RUNNER_OS" == "Linux" ]]; then - if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then - source /opt/intel/oneapi/setvars.sh - elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then - source /home/arda/intel/oneapi/setvars.sh - fi - fi - bash python/llm/test/run-llm-example-tests-gpu.sh - - - name: Get Langchain version - shell: bash - id: get_langchain_version - run: | - pip install langchain - LANGCHAIN_VERSION=$(pip show langchain | grep Version | cut -d " " -f 2) - LANGCHAIN_REF="langchain==$LANGCHAIN_VERSION" - echo "langchain_ver=$LANGCHAIN_REF" >> $GITHUB_OUTPUT - - - name: Checkout Langchain repo - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 - with: - repository: "langchain-ai/langchain" - ref: ${{ join(steps.get_langchain_version.outputs.*, '\n') }} - path: langchain_upstream - - - name: Run LLM langchain GPU test - shell: bash - run: | - pip install -U langchain==0.0.184 - pip install -U chromadb==0.3.25 - pip install -U pandas==2.0.3 - # Specific oneapi position on arc ut test machines - if [[ "$RUNNER_OS" == "Linux" ]]; then - if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then - source /opt/intel/oneapi/setvars.sh - elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then - source /home/arda/intel/oneapi/setvars.sh - fi - fi - bash python/llm/test/run-llm-langchain-tests-gpu.sh - - pip install -U langchain - pip install -U langchain-community - bash python/llm/test/run-langchain-upstream-tests.sh - - - name: Run LLM llamaindex GPU test - shell: bash - run: | - pip install "llama-index-readers-file<0.2.0" - pip install "llama-index-vector-stores-postgres<0.2.0" - pip install "llama-index-embeddings-huggingface<0.3.0" - # Specific oneapi position on arc ut test machines - if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then - pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/ - if [[ "$RUNNER_OS" == "Linux" ]]; then - source /opt/intel/oneapi/setvars.sh - fi - elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then - pip install --pre --upgrade ipex-llm[xpu_2.0] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/ - if [[ "$RUNNER_OS" == "Linux" ]]; then - source /home/arda/intel/oneapi/setvars.sh - fi - fi - pip install transformers==4.36.2 - pip install "pydantic>=2.0.0" - bash python/llm/test/run-llm-llamaindex-tests-gpu.sh - - name: Run sentence-transformers uninstallation - if: ${{ always() }} - shell: bash - run: | - pip uninstall sentence-transformers -y || true diff --git a/.github/workflows/manually_build.yml b/.github/workflows/manually_build.yml deleted file mode 100644 index 30306a7e..00000000 --- a/.github/workflows/manually_build.yml +++ /dev/null @@ -1,400 +0,0 @@ -name: Manually Build - -on: - # workflow_dispatch: - # inputs: - # checkout-ref: - # description: 'commit id (SHA-1 hash)' - # required: true - # type: string - # artifact: - # description: 'select which job to run("all" will make all jobs run)' - # required: true - # default: 'all' - # type: choice - # options: - # - all - # - ipex-llm-cpu - # - ipex-llm-xpu - # - ipex-llm-inference-cpp-xpu - # - ipex-llm-serving-cpu - # - ipex-llm-serving-xpu - # - ipex-llm-finetune-lora-cpu - # - ipex-llm-finetune-qlora-cpu - # - ipex-llm-finetune-qlora-cpu-k8s - # - ipex-llm-finetune-xpu - # tag: - # description: 'docker image tag (e.g. 2.2.0-SNAPSHOT)' - # required: true - # default: '2.2.0-SNAPSHOT' - # type: string - workflow_call: - inputs: - checkout-ref: - description: 'ref for checking out, including branch, tag or SHA' - required: true - type: string - artifact: - description: 'select which job to run("all" will make all jobs run)' - required: true - default: 'all' - type: string - tag: - description: 'docker image tag (e.g. 2.2.0-SNAPSHOT)' - required: true - default: '2.2.0-SNAPSHOT' - type: string - public: - description: "if the docker image push to public docker hub" - required: true - type: boolean - default: true - -env: - TAG: ${{ inputs.tag }} - -permissions: - contents: read - -jobs: - ipex-llm-finetune-lora-cpu: - if: ${{ inputs.artifact == 'ipex-llm-finetune-lora-cpu' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-finetune-lora-cpu - run: | - echo "##############################################################" - echo "####### ipex-llm-finetune-lora-cpu ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-finetune-lora-cpu - cd docker/llm/finetune/lora/cpu/docker - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - # push docker image to public hub - if [[ "${{ inputs.public }}" == "true" ]]; then - sudo docker push ${image}:${TAG} - # tag 'latest' - sudo docker tag ${image}:${TAG} ${image}:latest - sudo docker push ${image}:latest - sudo docker rmi -f ${image}:${TAG} ${image}:latest - else - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - fi - - ipex-llm-finetune-qlora-cpu: - if: ${{ inputs.artifact == 'ipex-llm-finetune-qlora-cpu' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-finetune-qlora-cpu - run: | - echo "##############################################################" - echo "####### ipex-llm-finetune-qlora-cpu ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-finetune-qlora-cpu - cd docker/llm/finetune/qlora/cpu/docker - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - # push docker image to public hub - if [[ "${{ inputs.public }}" == "true" ]]; then - sudo docker push ${image}:${TAG} - # tag 'latest' - sudo docker tag ${image}:${TAG} ${image}:latest - sudo docker push ${image}:latest - sudo docker rmi -f ${image}:${TAG} ${image}:latest - else - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - fi - - ipex-llm-finetune-qlora-cpu-k8s: - if: ${{ inputs.artifact == 'ipex-llm-finetune-qlora-cpu-k8s' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-finetune-qlora-cpu-k8s - run: | - echo "##############################################################" - echo "####### ipex-llm-finetune-qlora-cpu-k8s ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-finetune-qlora-cpu-k8s - cd docker/llm/finetune/qlora/cpu/docker - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile.k8s . - # push docker image to public hub - if [[ "${{ inputs.public }}" == "true" ]]; then - sudo docker push ${image}:${TAG} - # tag 'latest' - sudo docker tag ${image}:${TAG} ${image}:latest - sudo docker push ${image}:latest - sudo docker rmi -f ${image}:${TAG} ${image}:latest - else - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - fi - - ipex-llm-finetune-xpu: - if: ${{ inputs.artifact == 'ipex-llm-finetune-xpu' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-finetune-xpu - run: | - echo "##############################################################" - echo "####### ipex-llm-finetune-xpu ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-finetune-xpu - cd docker/llm/finetune/xpu - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - # push docker image to public hub - if [[ "${{ inputs.public }}" == "true" ]]; then - sudo docker push ${image}:${TAG} - # tag 'latest' - sudo docker tag ${image}:${TAG} ${image}:latest - sudo docker push ${image}:latest - sudo docker rmi -f ${image}:${TAG} ${image}:latest - else - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - fi - - ipex-llm-xpu: - if: ${{ inputs.artifact == 'ipex-llm-xpu' || inputs.artifact == 'all' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-xpu - run: | - echo "##############################################################" - echo "####### ipex-llm-xpu ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-xpu - cd docker/llm/inference/xpu/docker - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - # push docker image to public hub - if [[ "${{ inputs.public }}" == "true" ]]; then - sudo docker push ${image}:${TAG} - # tag 'latest' - sudo docker tag ${image}:${TAG} ${image}:latest - sudo docker push ${image}:latest - sudo docker rmi -f ${image}:${TAG} ${image}:latest - else - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - fi - - ipex-llm-inference-cpp-xpu: - if: ${{ inputs.artifact == 'ipex-llm-inference-cpp-xpu' || inputs.artifact == 'all' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-inference-cpp-xpu - run: | - echo "##############################################################" - echo "####### ipex-llm-inference-cpp-xpu ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-inference-cpp-xpu - cd docker/llm/inference-cpp/ - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - # push docker image to public hub - if [[ "${{ inputs.public }}" == "true" ]]; then - sudo docker push ${image}:${TAG} - # tag 'latest' - sudo docker tag ${image}:${TAG} ${image}:latest - sudo docker push ${image}:latest - sudo docker rmi -f ${image}:${TAG} ${image}:latest - else - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - fi - - ipex-llm-cpu: - if: ${{ inputs.artifact == 'ipex-llm-cpu' || inputs.artifact == 'all' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-cpu - run: | - echo "##############################################################" - echo "####### ipex-llm-cpu ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-cpu - cd docker/llm/inference/cpu/docker - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - # push docker image to public hub - if [[ "${{ inputs.public }}" == "true" ]]; then - sudo docker push ${image}:${TAG} - # tag 'latest' - sudo docker tag ${image}:${TAG} ${image}:latest - sudo docker push ${image}:latest - sudo docker rmi -f ${image}:${TAG} ${image}:latest - else - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - fi - - ipex-llm-serving-xpu: - if: ${{ inputs.artifact == 'ipex-llm-serving-xpu' || inputs.artifact == 'all' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-serving-xpu - run: | - echo "##############################################################" - echo "####### ipex-llm-serving-xpu ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-serving-xpu - cd docker/llm/serving/xpu/docker - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - # push docker image to public hub - if [[ "${{ inputs.public }}" == "true" ]]; then - sudo docker push ${image}:${TAG} - # tag 'latest' - sudo docker tag ${image}:${TAG} ${image}:latest - sudo docker push ${image}:latest - sudo docker rmi -f ${image}:${TAG} ${image}:latest - else - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - fi - - ipex-llm-serving-cpu: - if: ${{ inputs.artifact == 'ipex-llm-serving-cpu' || inputs.artifact == 'all' }} - runs-on: [self-hosted, Shire, AVX512] - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-serving-cpu - run: | - echo "##############################################################" - echo "####### ipex-llm-serving-cpu ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-serving-cpu - cd docker/llm/serving/cpu/docker - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - # push docker image to public hub - if [[ "${{ inputs.public }}" == "true" ]]; then - sudo docker push ${image}:${TAG} - # tag 'latest' - sudo docker tag ${image}:${TAG} ${image}:latest - sudo docker push ${image}:latest - sudo docker rmi -f ${image}:${TAG} ${image}:latest - else - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - fi - diff --git a/.github/workflows/manually_build_for_testing.yml b/.github/workflows/manually_build_for_testing.yml deleted file mode 100644 index c4ada1cc..00000000 --- a/.github/workflows/manually_build_for_testing.yml +++ /dev/null @@ -1,317 +0,0 @@ -name: Manually Build For Testing - -on: - workflow_dispatch: - inputs: - sha: - description: 'commit id (SHA-1 hash)' - required: true - type: string - artifact: - description: 'select which job to run("all" will make all jobs run)' - required: true - default: 'all' - type: choice - options: - - all - - ipex-llm-cpu - - ipex-llm-xpu - - ipex-llm-inference-cpp-xpu - - ipex-llm-serving-cpu - - ipex-llm-serving-xpu - - ipex-llm-serving-xpu-tgi - - ipex-llm-finetune-lora-cpu - - ipex-llm-finetune-qlora-cpu - - ipex-llm-finetune-qlora-cpu-k8s - - ipex-llm-finetune-xpu - tag: - description: 'docker image tag (e.g. test)' - required: true - default: 'test' - type: string - -env: - TAG: ${{ github.event.inputs.tag }} - -permissions: - contents: read - -jobs: - ipex-llm-finetune-lora-cpu: - if: ${{ github.event.inputs.artifact == 'ipex-llm-finetune-lora-cpu' || github.event.inputs.artifact == 'all' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - ref: ${{ github.event.inputs.sha }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-finetune-lora-cpu - run: | - echo "##############################################################" - echo "####### ipex-llm-finetune-lora-cpu ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-finetune-lora-cpu - cd docker/llm/finetune/lora/cpu/docker - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - - ipex-llm-finetune-qlora-cpu: - if: ${{ github.event.inputs.artifact == 'ipex-llm-finetune-qlora-cpu' || github.event.inputs.artifact == 'all' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - ref: ${{ github.event.inputs.sha }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-finetune-qlora-cpu - run: | - echo "##############################################################" - echo "####### ipex-llm-finetune-qlora-cpu ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-finetune-qlora-cpu - cd docker/llm/finetune/qlora/cpu/docker - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - - ipex-llm-finetune-qlora-cpu-k8s: - if: ${{ inputs.artifact == 'ipex-llm-finetune-qlora-cpu-k8s' || inputs.artifact == 'all' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-finetune-qlora-cpu-k8s - run: | - echo "##############################################################" - echo "####### ipex-llm-finetune-qlora-cpu-k8s ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-finetune-qlora-cpu-k8s - cd docker/llm/finetune/qlora/cpu/docker - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile.k8s . - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - - ipex-llm-finetune-xpu: - if: ${{ github.event.inputs.artifact == 'ipex-llm-finetune-xpu' || github.event.inputs.artifact == 'all' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - ref: ${{ github.event.inputs.sha }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-finetune-xpu - run: | - echo "##############################################################" - echo "####### ipex-llm-finetune-xpu ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-finetune-xpu - cd docker/llm/finetune/xpu - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - - ipex-llm-xpu: - if: ${{ github.event.inputs.artifact == 'ipex-llm-xpu' || github.event.inputs.artifact == 'all' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - ref: ${{ github.event.inputs.sha }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-xpu - run: | - echo "##############################################################" - echo "####### ipex-llm-xpu ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-xpu - cd docker/llm/inference/xpu/docker - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - - ipex-llm-serving-xpu-tgi: - if: ${{ github.event.inputs.artifact == 'ipex-llm-serving-xpu-tgi' || github.event.inputs.artifact == 'all' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - ref: ${{ github.event.inputs.sha }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-serving-xpu-tgi - run: | - echo "##############################################################" - echo "####### ipex-llm-serving-xpu-tgi ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-serving-xpu-tgi - cd docker/llm/serving/xpu-tgi - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - - ipex-llm-inference-cpp-xpu: - if: ${{ github.event.inputs.artifact == 'ipex-llm-inference-cpp-xpu' || github.event.inputs.artifact == 'all' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - ref: ${{ github.event.inputs.sha }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-inference-cpp-xpu - run: | - echo "##############################################################" - echo "####### ipex-llm-inference-cpp-xpu ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-inference-cpp-xpu - cd docker/llm/inference-cpp/ - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - - ipex-llm-cpu: - if: ${{ github.event.inputs.artifact == 'ipex-llm-cpu' || github.event.inputs.artifact == 'all' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - ref: ${{ github.event.inputs.sha }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-cpu - run: | - echo "##############################################################" - echo "####### ipex-llm-cpu ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-cpu - cd docker/llm/inference/cpu/docker - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - - ipex-llm-serving-xpu: - if: ${{ github.event.inputs.artifact == 'ipex-llm-serving-xpu' || github.event.inputs.artifact == 'all' }} - runs-on: [self-hosted, Shire] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - ref: ${{ github.event.inputs.sha }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-serving-xpu - run: | - echo "##############################################################" - echo "####### ipex-llm-serving-xpu ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-serving-xpu - cd docker/llm/serving/xpu/docker - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - - ipex-llm-serving-cpu: - if: ${{ github.event.inputs.artifact == 'ipex-llm-serving-cpu' || github.event.inputs.artifact == 'all' }} - runs-on: [self-hosted, Shire, AVX512] - - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - ref: ${{ github.event.inputs.sha }} - - name: docker login - run: | - docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD} - - name: ipex-llm-serving-cpu - run: | - echo "##############################################################" - echo "####### ipex-llm-serving-cpu ########" - echo "##############################################################" - export image=intelanalytics/ipex-llm-serving-cpu - cd docker/llm/serving/cpu/docker - sudo docker build \ - --no-cache=true \ - --build-arg http_proxy=${HTTP_PROXY} \ - --build-arg https_proxy=${HTTPS_PROXY} \ - --build-arg no_proxy=${NO_PROXY} \ - -t ${image}:${TAG} -f ./Dockerfile . - sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - sudo docker push 10.239.45.10/arda/${image}:${TAG} - sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG} - diff --git a/.github/workflows/release-ipex-llm.yaml b/.github/workflows/release-ipex-llm.yaml deleted file mode 100644 index 272aa1a8..00000000 --- a/.github/workflows/release-ipex-llm.yaml +++ /dev/null @@ -1,56 +0,0 @@ -name: Release IPEX-LLM Pypi - -on: - workflow_dispatch: - inputs: - version: - description: 'ipex-llm version (e.g. 2.2.0b1)' - required: true - default: '2.2.0b0' - type: string - -permissions: - contents: read - -jobs: - - llm-cpp-build: - uses: ./.github/workflows/llm-binary-build.yml - - ipex-llm-release: - if: ${{ github.event_name == 'workflow_dispatch' }} - runs-on: [self-hosted, Bree] - needs: llm-cpp-build - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - - - name: set release version - env: - DEFAULT_VERSION: '2.2.0b0' - run: | - echo "RELEASE_VERSION=${{ github.event.inputs.version || env.DEFAULT_VERSION }}" >> $GITHUB_ENV - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: '3.7.15' - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install build - pip install wheel - pip install twine - - - name: Download llm binary - uses: ./.github/actions/llm/download-llm-binary - - - name: Build package - run: | - echo ${RELEASE_VERSION} - - ## windows ## - bash python/llm/dev/release_default_windows.sh ${RELEASE_VERSION} true - - ## linux ## - bash python/llm/dev/release_default_linux.sh ${RELEASE_VERSION} true diff --git a/.github/workflows/release-pypi.yml b/.github/workflows/release-pypi.yml deleted file mode 100644 index e6ac2662..00000000 --- a/.github/workflows/release-pypi.yml +++ /dev/null @@ -1,78 +0,0 @@ -name: Nightly Release - -on: - # pull_request: - # branches: [ main ] - # paths: - # - '.github/workflows/nightly_build.yml' - # schedule: - # - cron: '00 15 * * *' # GMT time, 15:00 GMT == 23:00 China - # workflow_dispatch: - workflow_call: - inputs: - checkout-ref: - description: 'ref for checking out, including branch, tag or SHA' - required: false - type: string - release-version: - description: 'ipex-llm version (e.g. 2.2.0b1)' - required: false - type: string - schedule-event: - description: 'whether it is triggered by schedule event' - required: true - type: boolean -permissions: - contents: read - -jobs: - - llm-cpp-build: - uses: ./.github/workflows/llm-binary-build.yml - - ipex-llm-build: - # python build can only be published once a day, please do not publish it manually - # if: ${{ github.event.schedule || github.event_name == 'workflow_dispatch' }} - runs-on: [self-hosted, Bree] - needs: llm-cpp-build - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 - with: - repository: 'intel-analytics/ipex-llm' - ref: ${{ inputs.checkout-ref }} - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: '3.7.15' - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install build - pip install wheel - pip install twine - - - name: Download llm binary - uses: ./.github/actions/llm/download-llm-binary - - - name: set release version - run: | - if [[ "${{ inputs.schedule-event }}" == "true" ]]; then - export TIMESTAMP=`date '+%Y%m%d'` - export PYPI_VERSION=2.2.0 - export RELEASE_VERSION=${PYPI_VERSION}b${TIMESTAMP} - else - export RELEASE_VERSION=${{ inputs.release-version }} - fi - echo "RELEASE_VERSION=${RELEASE_VERSION}" >> $GITHUB_ENV - - - name: Build package - run: | - echo ${RELEASE_VERSION} - - ## windows ## - bash python/llm/dev/release_default_windows.sh ${RELEASE_VERSION} true - - ## linux ## - bash python/llm/dev/release_default_linux.sh ${RELEASE_VERSION} true