Remove Migrated Workflows to Avoid Duplication and Confusion (#12801)
* Delete .github/actions/llm directory * Delete .github/workflows/release-ipex-llm.yaml * Delete .github/workflows/llm-nightly-test.yml * Delete .github/workflows/llm_unit_tests.yml * Delete .github/workflows/llm-binary-build.yml * Delete .github/workflows/llm_example_tests.yml * Delete .github/workflows/llm_performance_tests.yml * Delete .github/workflows/manually_build.yml * Delete .github/workflows/manually_build_for_testing.yml * Delete .github/workflows/release-pypi.yml
This commit is contained in:
		
							parent
							
								
									eb2df5ed70
								
							
						
					
					
						commit
						2701a9d1e3
					
				
					 14 changed files with 0 additions and 4233 deletions
				
			
		
							
								
								
									
										46
									
								
								.github/actions/llm/cli-test-linux/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										46
									
								
								.github/actions/llm/cli-test-linux/action.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -1,46 +0,0 @@
 | 
			
		|||
name: "llm-cli Flow Verification (Linux)"
 | 
			
		||||
description: "Verify the llm-cli flow on linux"
 | 
			
		||||
 | 
			
		||||
runs:
 | 
			
		||||
  using: "composite"
 | 
			
		||||
  steps:
 | 
			
		||||
    - name: Test llama llm-cli
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        llm-cli -t $THREAD_NUM -n 256 -x llama -m $LLAMA_INT4_CKPT_PATH -p 'Once upon a time,'
 | 
			
		||||
 | 
			
		||||
        timeout 30s llm-cli -t $THREAD_NUM -n 256 -x llama -m $LLAMA_INT4_CKPT_PATH -i -p \
 | 
			
		||||
        'A chat between a curious user and a helpful and polite AI assistant. User:Can you tell me a story? AI:' >test.out 2>&1 || true
 | 
			
		||||
 | 
			
		||||
        if ! grep -q 'A chat between a curious user and a helpful and polite AI assistant.' test.out ; then
 | 
			
		||||
          exit 1
 | 
			
		||||
        fi
 | 
			
		||||
        rm test.out
 | 
			
		||||
 | 
			
		||||
    - name: Test gptneox llm-cli
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        llm-cli -t $THREAD_NUM -n 256 -x gptneox -m $GPTNEOX_INT4_CKPT_PATH -p 'Once upon a time,'
 | 
			
		||||
 | 
			
		||||
        timeout 30s llm-cli -t $THREAD_NUM -n 256 -x gptneox -m $GPTNEOX_INT4_CKPT_PATH -i -p \
 | 
			
		||||
        'A chat between a curious user and a helpful and polite AI assistant. User:Can you tell me a story? AI:' >test.out 2>&1 || true
 | 
			
		||||
 | 
			
		||||
        if ! grep -q 'A chat between a curious user and a helpful and polite AI assistant.' test.out ; then
 | 
			
		||||
          exit 1
 | 
			
		||||
        fi
 | 
			
		||||
        rm test.out
 | 
			
		||||
 | 
			
		||||
    - name: Test bloom llm-cli
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        llm-cli -t $THREAD_NUM -n 256 -x bloom -m $BLOOM_INT4_CKPT_PATH -p 'Once upon a time,'
 | 
			
		||||
 | 
			
		||||
    - name: Test starcoder llm-cli
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        llm-cli -t $THREAD_NUM -n 256 -x starcoder -m $STARCODER_INT4_CKPT_PATH -p 'def check_odd('
 | 
			
		||||
 | 
			
		||||
    # - name: Test chatglm llm-cli
 | 
			
		||||
    #   shell: bash
 | 
			
		||||
    #   run: |
 | 
			
		||||
    #     llm-cli -t $THREAD_NUM -n 256 -x chatglm -m $CHATGLM_INT4_CKPT_PATH -p '你好'
 | 
			
		||||
							
								
								
									
										43
									
								
								.github/actions/llm/convert-test/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										43
									
								
								.github/actions/llm/convert-test/action.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -1,43 +0,0 @@
 | 
			
		|||
name: "IPEX-LLM convert tests"
 | 
			
		||||
description: "IPEX-LLM convert test, including downloading original models"
 | 
			
		||||
 | 
			
		||||
runs:
 | 
			
		||||
  using: "composite"
 | 
			
		||||
  steps:
 | 
			
		||||
    - name: Download original models (LLaMA)
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        if [ ! -d $LLAMA_ORIGIN_PATH ]; then
 | 
			
		||||
          echo "Directory $LLAMA_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
          wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/llama-7b-hf -P $ORIGIN_DIR
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
    - name: Download original models (GPT-NeoX)
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        if [ ! -d $GPTNEOX_ORIGIN_PATH ]; then
 | 
			
		||||
          echo "Directory $GPTNEOX_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
          wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/gptneox-7b-redpajama-bf16 -P $ORIGIN_DIR
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
    - name: Download original models (BLOOM)
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        if [ ! -d $BLOOM_ORIGIN_PATH ]; then
 | 
			
		||||
          echo "Directory $BLOOM_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
          wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/bloomz-7b1 -P $ORIGIN_DIR
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
    - name: Download original models (StarCoder)
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        if [ ! -d $STARCODER_ORIGIN_PATH ]; then
 | 
			
		||||
          echo "Directory $STARCODER_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
          wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/gpt_bigcode-santacoder -P $ORIGIN_DIR
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
    - name: Convert test
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "Running the convert models tests..."
 | 
			
		||||
        python -m pytest -s python/llm/test/convert/test_convert_model.py
 | 
			
		||||
| 
						 | 
				
			
			@ -1,40 +0,0 @@
 | 
			
		|||
name: Download LLM binary files
 | 
			
		||||
description: Download built binary files from github artifact
 | 
			
		||||
inputs:
 | 
			
		||||
  platform:
 | 
			
		||||
    description: 'Platforms to built on'
 | 
			
		||||
    default: 'Windows,Linux'
 | 
			
		||||
    required: false
 | 
			
		||||
    type: string
 | 
			
		||||
runs:
 | 
			
		||||
  using: "composite"
 | 
			
		||||
  steps:
 | 
			
		||||
    - name: Download all build files
 | 
			
		||||
      uses: actions/download-artifact@v3
 | 
			
		||||
    - name: Move build resources
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        rm -rf python/llm/llm-binary || true
 | 
			
		||||
        mkdir -p python/llm/llm-binary
 | 
			
		||||
        if ${{contains(inputs.platform, 'Linux')}}; then
 | 
			
		||||
          mv linux-avx2/* python/llm/llm-binary/ 
 | 
			
		||||
          mv linux-avx512/* python/llm/llm-binary/
 | 
			
		||||
          mv linux-avxvnni/* python/llm/llm-binary/
 | 
			
		||||
          mv linux-avx/* python/llm/llm-binary/
 | 
			
		||||
          mv linux-amx/* python/llm/llm-binary/
 | 
			
		||||
        fi
 | 
			
		||||
        if ${{contains(inputs.platform, 'Windows')}}; then
 | 
			
		||||
          mv windows-avx2/* python/llm/llm-binary/
 | 
			
		||||
          mv windows-avx-vnni/* python/llm/llm-binary/
 | 
			
		||||
          mv windows-avx/* python/llm/llm-binary/
 | 
			
		||||
          mv windows-npu-level0/* python/llm/llm-binary/
 | 
			
		||||
        fi
 | 
			
		||||
        rm -rf linux-avx2 || true
 | 
			
		||||
        rm -rf linux-avx512 || true
 | 
			
		||||
        rm -rf linux-avxvnni || true
 | 
			
		||||
        rm -rf linux-avx || true
 | 
			
		||||
        rm -rf linux-amx || true
 | 
			
		||||
        rm -rf windows-avx2 || true
 | 
			
		||||
        rm -rf windows-avx-vnni || true
 | 
			
		||||
        rm -rf windows-avx || true
 | 
			
		||||
        rm -rf windows-npu-level0 || true
 | 
			
		||||
							
								
								
									
										14
									
								
								.github/actions/llm/example-test/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								.github/actions/llm/example-test/action.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -1,14 +0,0 @@
 | 
			
		|||
name: 'IPEX-LLM example tests'
 | 
			
		||||
description: 'IPEX-LLM example tests'
 | 
			
		||||
 | 
			
		||||
runs:
 | 
			
		||||
  using: "composite"
 | 
			
		||||
  steps:
 | 
			
		||||
    - name: Test LLAMA2
 | 
			
		||||
      shell: bash
 | 
			
		||||
      env:
 | 
			
		||||
        INT4_CKPT_DIR: ./llm/ggml-actions/stable
 | 
			
		||||
        LLM_DIR: ./llm
 | 
			
		||||
      
 | 
			
		||||
      run: |
 | 
			
		||||
        bash python/llm/dev/test/run-example-tests.sh
 | 
			
		||||
							
								
								
									
										62
									
								
								.github/actions/llm/setup-llm-env/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										62
									
								
								.github/actions/llm/setup-llm-env/action.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -1,62 +0,0 @@
 | 
			
		|||
name: "Setup IPEX-LLM Env"
 | 
			
		||||
description: "IPEX-LLM installation"
 | 
			
		||||
inputs:
 | 
			
		||||
  extra-dependency:
 | 
			
		||||
    description: "Name of extra dependencies filled in brackets"
 | 
			
		||||
    required: false
 | 
			
		||||
    default: "all"
 | 
			
		||||
runs:
 | 
			
		||||
  using: "composite"
 | 
			
		||||
  steps:
 | 
			
		||||
    - name: Create conda env for llm tests and conduct install tests
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        # make sure we install the latest version for bigdl-core-xe related packages
 | 
			
		||||
        pip uninstall bigdl-core-xe -y || true
 | 
			
		||||
        pip uninstall bigdl-core-xe-batch -y || true
 | 
			
		||||
        pip uninstall bigdl-core-xe-addons -y || true
 | 
			
		||||
        pip uninstall bigdl-core-xe-esimd -y || true
 | 
			
		||||
        pip uninstall bigdl-core-xe-21 -y || true
 | 
			
		||||
        pip uninstall bigdl-core-xe-batch-21 -y || true
 | 
			
		||||
        pip uninstall bigdl-core-xe-addons-21 -y || true
 | 
			
		||||
        pip uninstall bigdl-core-xe-esimd-21 -y || true
 | 
			
		||||
        sed -i 's/"bigdl-core-xe==" + CORE_XE_VERSION + "/"bigdl-core-xe/g' python/llm/setup.py
 | 
			
		||||
        sed -i 's/"bigdl-core-xe-batch==" + CORE_XE_VERSION + "/"bigdl-core-xe-batch/g' python/llm/setup.py
 | 
			
		||||
        sed -i 's/"bigdl-core-xe-addons==" + CORE_XE_VERSION + "/"bigdl-core-xe-addons/g' python/llm/setup.py
 | 
			
		||||
        sed -i 's/"bigdl-core-xe-esimd==" + CORE_XE_VERSION + "/"bigdl-core-xe-esimd/g' python/llm/setup.py
 | 
			
		||||
        sed -i 's/"bigdl-core-xe-21==" + CORE_XE_VERSION/"bigdl-core-xe-21"/g' python/llm/setup.py
 | 
			
		||||
        sed -i 's/"bigdl-core-xe-batch-21==" + CORE_XE_VERSION/"bigdl-core-xe-batch-21"/g' python/llm/setup.py
 | 
			
		||||
        sed -i 's/"bigdl-core-xe-addons-21==" + CORE_XE_VERSION/"bigdl-core-xe-addons-21"/g' python/llm/setup.py
 | 
			
		||||
        sed -i 's/"bigdl-core-xe-esimd-21==" + CORE_XE_VERSION/"bigdl-core-xe-esimd-21"/g' python/llm/setup.py
 | 
			
		||||
 | 
			
		||||
        pip uninstall bigdl-core-xe-all -y || true
 | 
			
		||||
        sed -i 's/"bigdl-core-xe-all==" + CORE_XE_VERSION/"bigdl-core-xe-all"/g' python/llm/setup.py
 | 
			
		||||
 | 
			
		||||
        pip install requests
 | 
			
		||||
        if [[ ${{ runner.os }} == 'Linux' ]]; then
 | 
			
		||||
          bash python/llm/dev/release_default_linux.sh default false
 | 
			
		||||
        elif [[ ${{ runner.os }} == 'Windows' ]]; then
 | 
			
		||||
          bash python/llm/dev/release_default_windows.sh default false
 | 
			
		||||
        else
 | 
			
		||||
          echo "Runner os is not supported!!!!!"
 | 
			
		||||
          exit 1
 | 
			
		||||
        fi
 | 
			
		||||
        whl_name=$(ls python/llm/dist)
 | 
			
		||||
        if [[ ${{ inputs.extra-dependency }} == 'xpu_2.0' ]]; then
 | 
			
		||||
          pip install --upgrade --pre -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[xpu_2.0]" --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/ 
 | 
			
		||||
          pip install pytest expecttest
 | 
			
		||||
        elif [[ ${{ inputs.extra-dependency }} == 'xpu_2.1' ]]; then
 | 
			
		||||
          pip install --upgrade --pre -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[xpu_2.1]" --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/ 
 | 
			
		||||
          pip install pytest expecttest
 | 
			
		||||
        elif [[ ${{ inputs.extra-dependency }} == 'xpu_2.6' ]]; then
 | 
			
		||||
          pip install --upgrade --pre -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[xpu_2.6]" --extra-index-url https://download.pytorch.org/whl/test/xpu
 | 
			
		||||
          pip install pytest
 | 
			
		||||
        else
 | 
			
		||||
          if [[ ${{ runner.os }} == 'Linux' ]]; then
 | 
			
		||||
            pip install --upgrade --pre -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[all]" --extra-index-url https://download.pytorch.org/whl/cpu
 | 
			
		||||
          elif [[ ${{ runner.os }} == 'Windows' ]]; then
 | 
			
		||||
            pip install --upgrade --pre -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[all]"
 | 
			
		||||
          fi
 | 
			
		||||
          pip install pytest
 | 
			
		||||
          bash python/llm/test/run-llm-install-tests.sh
 | 
			
		||||
        fi
 | 
			
		||||
							
								
								
									
										511
									
								
								.github/workflows/llm-binary-build.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										511
									
								
								.github/workflows/llm-binary-build.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -1,511 +0,0 @@
 | 
			
		|||
name: LLM Binary Build
 | 
			
		||||
 | 
			
		||||
# Cancel previous runs in the PR when you push new commits
 | 
			
		||||
# concurrency:
 | 
			
		||||
#   group: ${{ github.workflow }}-llm-binary-build-${{ github.event.pull_request.number || github.run_id }}
 | 
			
		||||
#   cancel-in-progress: false
 | 
			
		||||
 | 
			
		||||
permissions:
 | 
			
		||||
  contents: read
 | 
			
		||||
 | 
			
		||||
# Controls when the action will run.
 | 
			
		||||
on:
 | 
			
		||||
  # Triggers the workflow on push or pull request events but only for the main branch
 | 
			
		||||
  # push:
 | 
			
		||||
  #   branches: [main]
 | 
			
		||||
  #   paths:
 | 
			
		||||
  #     - ".github/workflows/llm-binary-build.yml"
 | 
			
		||||
  # pull_request:
 | 
			
		||||
  #   branches: [main]
 | 
			
		||||
  #   paths:
 | 
			
		||||
  #     - ".github/workflows/llm-binary-build.yml"
 | 
			
		||||
  # workflow_dispatch:
 | 
			
		||||
  #   inputs:
 | 
			
		||||
  #     llmcpp-ref:
 | 
			
		||||
  #       description: 'Ref of llm.cpp code'
 | 
			
		||||
  #       default: ''
 | 
			
		||||
  #       required: false
 | 
			
		||||
  #       type: string
 | 
			
		||||
  #     platform:
 | 
			
		||||
  #       description: 'Platforms to built on'
 | 
			
		||||
  #       default: '["Windows", "Linux"]'
 | 
			
		||||
  #       required: false
 | 
			
		||||
  #       type: string
 | 
			
		||||
  workflow_call:
 | 
			
		||||
    inputs:
 | 
			
		||||
      llmcpp-ref:
 | 
			
		||||
        description: 'Ref of llm.cpp code'
 | 
			
		||||
        default: ''
 | 
			
		||||
        required: false
 | 
			
		||||
        type: string
 | 
			
		||||
      platform:
 | 
			
		||||
        description: 'Platforms to built on'
 | 
			
		||||
        default: 'Windows,Linux'
 | 
			
		||||
        required: false
 | 
			
		||||
        type: string
 | 
			
		||||
    
 | 
			
		||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
 | 
			
		||||
jobs:
 | 
			
		||||
  check-linux-avxvnni-artifact:
 | 
			
		||||
    if: ${{contains(inputs.platform, 'Linux')}}
 | 
			
		||||
    runs-on: [Shire]
 | 
			
		||||
    outputs:
 | 
			
		||||
      if-exists: ${{steps.check_artifact.outputs.exists}}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Check if built
 | 
			
		||||
        id: check_artifact
 | 
			
		||||
        uses: xSAVIKx/artifact-exists-action@v0
 | 
			
		||||
        with:
 | 
			
		||||
          name: linux-avxvnni
 | 
			
		||||
 | 
			
		||||
  linux-build-avxvnni:
 | 
			
		||||
    runs-on: [self-hosted, AVX2, almalinux8]
 | 
			
		||||
    needs: check-linux-avxvnni-artifact
 | 
			
		||||
    if: needs.check-linux-avxvnni-artifact.outputs.if-exists == 'false'
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Set access token
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "github_access_token=${GITHUB_ACCESS_TOKEN}" >> "$GITHUB_ENV"
 | 
			
		||||
      - name: Install Build Environment
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          export http_proxy=${HTTP_PROXY}
 | 
			
		||||
          export https_proxy=${HTTPS_PROXY}
 | 
			
		||||
          yum install --nogpgcheck -y gcc-toolset-11 cmake git
 | 
			
		||||
      - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          repository: "intel-analytics/llm.cpp"
 | 
			
		||||
          ref: ${{ inputs.llmcpp-ref }}
 | 
			
		||||
          token: ${{ env.github_access_token }}
 | 
			
		||||
          submodules: "recursive"
 | 
			
		||||
      - name: Build binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          scl enable gcc-toolset-11 "cmake -B build"
 | 
			
		||||
          scl enable gcc-toolset-11 "cmake --build build --config Release -j"
 | 
			
		||||
      - name: Move release binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          mkdir release
 | 
			
		||||
          mv build/main-bloom release/main-bloom
 | 
			
		||||
          mv build/libbloom-api.so release/libbloom-api.so
 | 
			
		||||
          mv build/quantize-bloom release/quantize-bloom
 | 
			
		||||
          mv build/libbloom.so release/libbloom_avxvnni.so
 | 
			
		||||
          mv build/main-llama release/main-llama
 | 
			
		||||
          mv build/libllama-api.so release/libllama-api.so
 | 
			
		||||
          mv build/quantize-llama release/quantize-llama
 | 
			
		||||
          mv build/libllama.so release/libllama_avxvnni.so
 | 
			
		||||
          mv build/main-gptneox release/main-gptneox
 | 
			
		||||
          mv build/libgptneox-api.so release/libgptneox-api.so
 | 
			
		||||
          mv build/quantize-gptneox release/quantize-gptneox
 | 
			
		||||
          mv build/libgptneox.so release/libgptneox_avxvnni.so
 | 
			
		||||
          mv build/main-starcoder release/main-starcoder
 | 
			
		||||
          mv build/libstarcoder-api.so release/libstarcoder-api.so
 | 
			
		||||
          mv build/quantize-starcoder release/quantize-starcoder
 | 
			
		||||
          mv build/libstarcoder.so release/libstarcoder_avxvnni.so
 | 
			
		||||
      - name: Archive build files
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: linux-avxvnni
 | 
			
		||||
          path: |
 | 
			
		||||
            release
 | 
			
		||||
      - name: Clean up test environment
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          make clean
 | 
			
		||||
 | 
			
		||||
  check-linux-avx512-artifact:
 | 
			
		||||
    if: ${{contains(inputs.platform, 'Linux')}}
 | 
			
		||||
    runs-on: [Shire]
 | 
			
		||||
    outputs:
 | 
			
		||||
      if-exists: ${{steps.check_artifact.outputs.exists}}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Check if built
 | 
			
		||||
        id: check_artifact
 | 
			
		||||
        uses: xSAVIKx/artifact-exists-action@v0
 | 
			
		||||
        with:
 | 
			
		||||
          name: linux-avx512
 | 
			
		||||
 | 
			
		||||
  linux-build-avx512:
 | 
			
		||||
    runs-on: [self-hosted, AVX512, almalinux8]
 | 
			
		||||
    needs: check-linux-avx512-artifact
 | 
			
		||||
    if: needs.check-linux-avx512-artifact.outputs.if-exists == 'false'
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Set access token
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "github_access_token=${GITHUB_ACCESS_TOKEN}" >> "$GITHUB_ENV"
 | 
			
		||||
      - name: Install Build Environment
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          export http_proxy=${HTTP_PROXY}
 | 
			
		||||
          export https_proxy=${HTTPS_PROXY}
 | 
			
		||||
          yum install --nogpgcheck -y gcc-toolset-11 cmake git
 | 
			
		||||
          conda remove -n python39 --all -y
 | 
			
		||||
          conda create -n python39 python=3.9 -y
 | 
			
		||||
      - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          repository: "intel-analytics/llm.cpp"
 | 
			
		||||
          ref: ${{ inputs.llmcpp-ref }}
 | 
			
		||||
          token: ${{ env.github_access_token }}
 | 
			
		||||
          submodules: "recursive"
 | 
			
		||||
      - name: Build avx512 binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          scl enable gcc-toolset-11 "cmake -DONLYAVX=OFF -DONLYAVX2=OFF -B build"
 | 
			
		||||
          scl enable gcc-toolset-11 "cmake --build build --config Release -j"
 | 
			
		||||
      - name: Move avx512 release binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          mkdir avx512_release
 | 
			
		||||
          mv build/quantize-bloom avx512_release/quantize-bloom_avx512
 | 
			
		||||
          mv build/libbloom.so avx512_release/libbloom_avx512.so
 | 
			
		||||
          mv build/quantize-llama avx512_release/quantize-llama_avx512
 | 
			
		||||
          mv build/libllama.so avx512_release/libllama_avx512.so
 | 
			
		||||
          mv build/quantize-gptneox avx512_release/quantize-gptneox_avx512
 | 
			
		||||
          mv build/libgptneox.so avx512_release/libgptneox_avx512.so
 | 
			
		||||
          mv build/quantize-starcoder avx512_release/quantize-starcoder_avx512
 | 
			
		||||
          mv build/libstarcoder.so avx512_release/libstarcoder_avx512.so
 | 
			
		||||
      - name: Build avx2 binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          scl enable gcc-toolset-11 "cmake -DONLYAVX=OFF -DONLYAVX2=ON -B build"
 | 
			
		||||
          scl enable gcc-toolset-11 "cmake --build build --config Release -j"
 | 
			
		||||
      - name: Move avx2 release binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          mkdir avx2_release
 | 
			
		||||
          mv build/libbloom.so avx2_release/libbloom_avx2.so
 | 
			
		||||
          mv build/libllama.so avx2_release/libllama_avx2.so
 | 
			
		||||
          mv build/libgptneox.so avx2_release/libgptneox_avx2.so
 | 
			
		||||
          mv build/libstarcoder.so avx2_release/libstarcoder_avx2.so
 | 
			
		||||
      - name: Build avx binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          scl enable gcc-toolset-11 "cmake -DONLYAVX=ON -DONLYAVX2=OFF -B build"
 | 
			
		||||
          scl enable gcc-toolset-11 "cmake --build build --config Release -j"
 | 
			
		||||
      - name: Move avx release binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          mkdir avx_release
 | 
			
		||||
          mv build/libbloom.so avx_release/libbloom_avx.so
 | 
			
		||||
          mv build/libllama.so avx_release/libllama_avx.so
 | 
			
		||||
          mv build/libgptneox.so avx_release/libgptneox_avx.so
 | 
			
		||||
          mv build/libstarcoder.so avx_release/libstarcoder_avx.so
 | 
			
		||||
      - name: Archive avx512 build files
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: linux-avx512
 | 
			
		||||
          path: |
 | 
			
		||||
            avx512_release
 | 
			
		||||
      - name: Archive avx2 build files
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: linux-avx2
 | 
			
		||||
          path: |
 | 
			
		||||
            avx2_release
 | 
			
		||||
      - name: Archive avx build files
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: linux-avx
 | 
			
		||||
          path: |
 | 
			
		||||
            avx_release
 | 
			
		||||
      - name: Clean up test environment
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          make clean
 | 
			
		||||
          conda remove -n python39 --all -y
 | 
			
		||||
 | 
			
		||||
  check-linux-amx-artifact:
 | 
			
		||||
    if: ${{contains(inputs.platform, 'Linux')}}
 | 
			
		||||
    runs-on: [Shire]
 | 
			
		||||
    outputs:
 | 
			
		||||
      if-exists: ${{steps.check_artifact.outputs.exists}}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Check if built
 | 
			
		||||
        id: check_artifact
 | 
			
		||||
        uses: xSAVIKx/artifact-exists-action@v0
 | 
			
		||||
        with:
 | 
			
		||||
          name: linux-amx
 | 
			
		||||
 | 
			
		||||
  linux-build-amx:
 | 
			
		||||
    runs-on: [self-hosted, amx, almalinux8]
 | 
			
		||||
    needs: check-linux-amx-artifact
 | 
			
		||||
    if: needs.check-linux-amx-artifact.outputs.if-exists == 'false'
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Set access token
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "github_access_token=${GITHUB_ACCESS_TOKEN}" >> "$GITHUB_ENV"
 | 
			
		||||
      - name: Install Build Environment
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          export http_proxy=${HTTP_PROXY}
 | 
			
		||||
          export https_proxy=${HTTPS_PROXY}
 | 
			
		||||
          yum install --nogpgcheck -y gcc-toolset-11 cmake git
 | 
			
		||||
      - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          repository: "intel-analytics/llm.cpp"
 | 
			
		||||
          ref: ${{ inputs.llmcpp-ref }}
 | 
			
		||||
          token: ${{ env.github_access_token }}
 | 
			
		||||
          submodules: "recursive"
 | 
			
		||||
      - name: Build amx binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          scl enable gcc-toolset-11 "cmake -DONLYAVX=OFF -DONLYAVX2=OFF -B build"
 | 
			
		||||
          scl enable gcc-toolset-11 "cmake --build build --config Release -j"
 | 
			
		||||
      - name: Move amx release binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          mkdir amx_release
 | 
			
		||||
          mv build/quantize-bloom amx_release/quantize-bloom_amx
 | 
			
		||||
          mv build/libbloom.so amx_release/libbloom_amx.so
 | 
			
		||||
          mv build/quantize-llama amx_release/quantize-llama_amx
 | 
			
		||||
          mv build/libllama.so amx_release/libllama_amx.so
 | 
			
		||||
          mv build/quantize-gptneox amx_release/quantize-gptneox_amx
 | 
			
		||||
          mv build/libgptneox.so amx_release/libgptneox_amx.so
 | 
			
		||||
          mv build/quantize-starcoder amx_release/quantize-starcoder_amx
 | 
			
		||||
          mv build/libstarcoder.so amx_release/libstarcoder_amx.so
 | 
			
		||||
      - name: Archive amx build files
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: linux-amx
 | 
			
		||||
          path: |
 | 
			
		||||
            amx_release
 | 
			
		||||
      - name: Clean up test environment
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          make clean
 | 
			
		||||
          
 | 
			
		||||
  check-windows-avx2-artifact:
 | 
			
		||||
    if: ${{contains(inputs.platform, 'Windows')}}
 | 
			
		||||
    runs-on: [Shire]
 | 
			
		||||
    outputs:
 | 
			
		||||
      if-exists: ${{steps.check_artifact.outputs.exists}}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Check if built
 | 
			
		||||
        id: check_artifact
 | 
			
		||||
        uses: xSAVIKx/artifact-exists-action@v0
 | 
			
		||||
        with:
 | 
			
		||||
          name: windows-avx2
 | 
			
		||||
 | 
			
		||||
  windows-build-avx2:
 | 
			
		||||
    runs-on: [self-hosted, Windows, AVX-VNNI-Build]
 | 
			
		||||
    needs: check-windows-avx2-artifact
 | 
			
		||||
    if: needs.check-windows-avx2-artifact.outputs.if-exists == 'false'
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Set access token
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "github_access_token=$env:GITHUB_ACCESS_TOKEN" >> $env:GITHUB_ENV
 | 
			
		||||
          echo "github_access_token=$env:GITHUB_ACCESS_TOKEN"
 | 
			
		||||
      - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          repository: "intel-analytics/llm.cpp"
 | 
			
		||||
          ref: ${{ inputs.llmcpp-ref }}
 | 
			
		||||
          token: ${{ env.github_access_token }}
 | 
			
		||||
          submodules: "recursive"
 | 
			
		||||
      - name: Add msbuild to PATH
 | 
			
		||||
        uses: microsoft/setup-msbuild@v1.1
 | 
			
		||||
        with:
 | 
			
		||||
          msbuild-architecture: x64
 | 
			
		||||
      - name: Add cmake to PATH
 | 
			
		||||
        uses: ilammy/msvc-dev-cmd@v1
 | 
			
		||||
      - name: Build binary
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          cmake .
 | 
			
		||||
          cmake --build . --config Release -j
 | 
			
		||||
      - name: Archive build files
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: windows-avx2
 | 
			
		||||
          path: |
 | 
			
		||||
            build/Release
 | 
			
		||||
 | 
			
		||||
  check-windows-avx-vnni-artifact:
 | 
			
		||||
    if: ${{contains(inputs.platform, 'Windows')}}
 | 
			
		||||
    runs-on: [Shire]
 | 
			
		||||
    outputs:
 | 
			
		||||
      if-exists: ${{steps.check_artifact.outputs.exists}}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Check if built
 | 
			
		||||
        id: check_artifact
 | 
			
		||||
        uses: xSAVIKx/artifact-exists-action@v0
 | 
			
		||||
        with:
 | 
			
		||||
          name: windows-avx-vnni
 | 
			
		||||
 | 
			
		||||
  windows-build-avx-vnni:
 | 
			
		||||
    runs-on: [self-hosted, Windows, AVX-VNNI-Build]
 | 
			
		||||
    needs: check-windows-avx-vnni-artifact
 | 
			
		||||
    if: needs.check-windows-avx-vnni-artifact.outputs.if-exists == 'false'
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Set access token
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "github_access_token=$env:GITHUB_ACCESS_TOKEN" >> $env:GITHUB_ENV
 | 
			
		||||
          echo "github_access_token=$env:GITHUB_ACCESS_TOKEN"
 | 
			
		||||
      - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          repository: "intel-analytics/llm.cpp"
 | 
			
		||||
          ref: ${{ inputs.llmcpp-ref }}
 | 
			
		||||
          token: ${{ env.github_access_token }}
 | 
			
		||||
          submodules: "recursive"
 | 
			
		||||
      - name: Add msbuild to PATH
 | 
			
		||||
        uses: microsoft/setup-msbuild@v1.1
 | 
			
		||||
        with:
 | 
			
		||||
          msbuild-architecture: x64
 | 
			
		||||
      - name: Add cmake to PATH
 | 
			
		||||
        uses: ilammy/msvc-dev-cmd@v1
 | 
			
		||||
      - name: Build binary
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          cmake -DAVXVNNI=ON .
 | 
			
		||||
          cmake --build . --config Release -j
 | 
			
		||||
      - name: Move release binary
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          if (Test-Path ./release) { rm -r -fo release }
 | 
			
		||||
          mkdir release
 | 
			
		||||
          # mv build/Release/main-bloom.exe release/main-bloom_vnni.exe
 | 
			
		||||
          mv build/Release/quantize-bloom.exe release/quantize-bloom_vnni.exe
 | 
			
		||||
          mv build/Release/bloom.dll release/libbloom_vnni.dll
 | 
			
		||||
 | 
			
		||||
          # mv build/Release/main-llama.exe release/main-llama_vnni.exe
 | 
			
		||||
          mv build/Release/quantize-llama.exe release/quantize-llama_vnni.exe
 | 
			
		||||
          mv build/Release/llama.dll release/libllama_vnni.dll
 | 
			
		||||
 | 
			
		||||
          # mv build/Release/main-gptneox.exe release/main-gptneox_vnni.exe
 | 
			
		||||
          mv build/Release/quantize-gptneox.exe release/quantize-gptneox_vnni.exe
 | 
			
		||||
          mv build/Release/gptneox.dll release/libgptneox_vnni.dll
 | 
			
		||||
 | 
			
		||||
          # mv build/Release/main-starcoder.exe release/main-starcoder_vnni.exe
 | 
			
		||||
          mv build/Release/quantize-starcoder.exe release/quantize-starcoder_vnni.exe
 | 
			
		||||
          mv build/Release/starcoder.dll release/libstarcoder_vnni.dll
 | 
			
		||||
      - name: Archive build files
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: windows-avx-vnni
 | 
			
		||||
          path: |
 | 
			
		||||
            release
 | 
			
		||||
 | 
			
		||||
  check-windows-avx-artifact:
 | 
			
		||||
    if: ${{contains(inputs.platform, 'Windows')}}
 | 
			
		||||
    runs-on: [Shire]
 | 
			
		||||
    outputs:
 | 
			
		||||
      if-exists: ${{steps.check_artifact.outputs.exists}}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Check if built
 | 
			
		||||
        id: check_artifact
 | 
			
		||||
        uses: xSAVIKx/artifact-exists-action@v0
 | 
			
		||||
        with:
 | 
			
		||||
          name: windows-avx
 | 
			
		||||
 | 
			
		||||
  windows-build-avx:
 | 
			
		||||
    runs-on: [self-hosted, Windows, AVX-VNNI-Build]
 | 
			
		||||
    needs: check-windows-avx-artifact
 | 
			
		||||
    if: needs.check-windows-avx-artifact.outputs.if-exists == 'false'
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Set access token
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "github_access_token=$env:GITHUB_ACCESS_TOKEN" >> $env:GITHUB_ENV
 | 
			
		||||
          echo "github_access_token=$env:GITHUB_ACCESS_TOKEN"
 | 
			
		||||
      - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          repository: "intel-analytics/llm.cpp"
 | 
			
		||||
          ref: ${{ inputs.llmcpp-ref }}
 | 
			
		||||
          token: ${{ env.github_access_token }}
 | 
			
		||||
          submodules: "recursive"
 | 
			
		||||
      - name: Add msbuild to PATH
 | 
			
		||||
        uses: microsoft/setup-msbuild@v1.1
 | 
			
		||||
        with:
 | 
			
		||||
          msbuild-architecture: x64
 | 
			
		||||
      - name: Add cmake to PATH
 | 
			
		||||
        uses: ilammy/msvc-dev-cmd@v1
 | 
			
		||||
      - name: Build binary
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          cmake -DONLYAVX=ON .
 | 
			
		||||
          cmake --build . --config Release -j
 | 
			
		||||
      - name: Move release binary
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          if (Test-Path ./release) { rm -r -fo release }
 | 
			
		||||
          mkdir release
 | 
			
		||||
          mv build/Release/bloom.dll release/libbloom_avx.dll
 | 
			
		||||
 | 
			
		||||
          mv build/Release/llama.dll release/libllama_avx.dll
 | 
			
		||||
 | 
			
		||||
          mv build/Release/gptneox.dll release/libgptneox_avx.dll
 | 
			
		||||
 | 
			
		||||
          mv build/Release/starcoder.dll release/libstarcoder_avx.dll
 | 
			
		||||
      - name: Archive build files
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: windows-avx
 | 
			
		||||
          path: |
 | 
			
		||||
            release
 | 
			
		||||
 | 
			
		||||
  check-windows-npu-level0-artifact:
 | 
			
		||||
    if: ${{contains(inputs.platform, 'Windows')}}
 | 
			
		||||
    runs-on: [Shire]
 | 
			
		||||
    outputs:
 | 
			
		||||
      if-exists: ${{steps.check_artifact.outputs.exists}}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Check if built
 | 
			
		||||
        id: check_artifact
 | 
			
		||||
        uses: xSAVIKx/artifact-exists-action@v0
 | 
			
		||||
        with:
 | 
			
		||||
          name: windows-npu-level0
 | 
			
		||||
 | 
			
		||||
  windows-build-npu-level0:
 | 
			
		||||
    runs-on: [self-hosted, Windows, npu-level0]
 | 
			
		||||
    needs: check-windows-npu-level0-artifact
 | 
			
		||||
    if: needs.check-windows-npu-level0-artifact.outputs.if-exists == 'false'
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Set access token
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "github_access_token=$env:GITHUB_ACCESS_TOKEN" >> $env:GITHUB_ENV
 | 
			
		||||
          echo "github_access_token=$env:GITHUB_ACCESS_TOKEN"
 | 
			
		||||
      - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          repository: "intel-analytics/llm.cpp"
 | 
			
		||||
          ref: ${{ inputs.llmcpp-ref }}
 | 
			
		||||
          token: ${{ env.github_access_token }}
 | 
			
		||||
          submodules: "recursive"
 | 
			
		||||
      - name: Add msbuild to PATH
 | 
			
		||||
        uses: microsoft/setup-msbuild@v1.1
 | 
			
		||||
        with:
 | 
			
		||||
          msbuild-architecture: x64
 | 
			
		||||
      - name: Add cmake to PATH
 | 
			
		||||
        uses: ilammy/msvc-dev-cmd@v1
 | 
			
		||||
      - name: Build binary
 | 
			
		||||
        shell: cmd
 | 
			
		||||
        run: |
 | 
			
		||||
          call "C:\Program Files (x86)\Intel\openvino_2024.4.0\setupvars.bat"
 | 
			
		||||
          cd bigdl-core-npu-level0
 | 
			
		||||
          sed -i "/FetchContent_MakeAvailable(intel_npu_acceleration_library)/s/^/#/" CMakeLists.txt
 | 
			
		||||
          mkdir build
 | 
			
		||||
          cd build
 | 
			
		||||
          cmake ..
 | 
			
		||||
          cmake --build . --config Release -t pipeline
 | 
			
		||||
      - name: Move release binary
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          cd bigdl-core-npu-level0
 | 
			
		||||
          if (Test-Path ./release) { rm -r -fo release }
 | 
			
		||||
          mkdir release
 | 
			
		||||
          mv build/Release/pipeline.dll release/pipeline.dll
 | 
			
		||||
      - name: Archive build files
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: windows-npu-level0
 | 
			
		||||
          path: |
 | 
			
		||||
            bigdl-core-npu-level0/release
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  # to make llm-binary-build optionally skippable
 | 
			
		||||
  dummy-step:
 | 
			
		||||
    if: ${{ inputs.platform == 'Dummy' }}
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: dummy echo
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "dummy step"
 | 
			
		||||
							
								
								
									
										125
									
								
								.github/workflows/llm-nightly-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										125
									
								
								.github/workflows/llm-nightly-test.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -1,125 +0,0 @@
 | 
			
		|||
name: LLM Nightly Tests
 | 
			
		||||
 | 
			
		||||
# Cancel previous runs in the PR when you push new commits
 | 
			
		||||
concurrency:
 | 
			
		||||
  group: ${{ github.workflow }}-llm-nightly-test-${{ github.event.pull_request.number || github.run_id }}
 | 
			
		||||
  cancel-in-progress: true
 | 
			
		||||
 | 
			
		||||
permissions:
 | 
			
		||||
  contents: read
 | 
			
		||||
 | 
			
		||||
# Controls when the action will run.
 | 
			
		||||
on:
 | 
			
		||||
  # schedule:
 | 
			
		||||
  #   - cron: "00 13 * * *" # GMT time, 13:00 GMT == 21:00 China
 | 
			
		||||
  # pull_request:
 | 
			
		||||
  #   branches: [main]
 | 
			
		||||
  #   paths:
 | 
			
		||||
  #     - ".github/workflows/llm-nightly-test.yml"
 | 
			
		||||
  #     - ".github/actions/llm/setup-llm-env/action.yml"
 | 
			
		||||
  #     - ".github/actions/llm/remove-llm-env/action.yml"
 | 
			
		||||
  #     - ".github/actions/llm/convert-test/action.yml"
 | 
			
		||||
  # # Allows you to run this workflow manually from the Actions tab
 | 
			
		||||
  # workflow_dispatch:
 | 
			
		||||
  workflow_call:
 | 
			
		||||
    inputs:
 | 
			
		||||
      checkout-ref:
 | 
			
		||||
        description: 'ref for checking out, including branch, tag or SHA'
 | 
			
		||||
        required: true
 | 
			
		||||
        type: string
 | 
			
		||||
 | 
			
		||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
 | 
			
		||||
jobs:
 | 
			
		||||
  llm-cpp-build:
 | 
			
		||||
    uses: ./.github/workflows/llm-binary-build.yml
 | 
			
		||||
  llm-nightly-convert-test:
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        include:
 | 
			
		||||
          - os: windows
 | 
			
		||||
            instruction: AVX-VNNI-UT
 | 
			
		||||
            python-version: "3.11"
 | 
			
		||||
          - os: ubuntu-20.04-lts
 | 
			
		||||
            instruction: avx512
 | 
			
		||||
            python-version: "3.11"
 | 
			
		||||
    runs-on: [self-hosted, llm, "${{matrix.instruction}}", "${{matrix.os}}"]
 | 
			
		||||
    env:
 | 
			
		||||
      ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Set model directories
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "ORIGIN_DIR=$(pwd)/../llm/origin-models" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "INT4_CKPT_DIR=$(pwd)/../llm/nightly-converted-models" >> "$GITHUB_ENV"
 | 
			
		||||
      - name: Create model directories
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          if [ ! -d $ORIGIN_DIR ]; then
 | 
			
		||||
            mkdir -p $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $INT4_CKPT_DIR ]; then
 | 
			
		||||
            mkdir -p $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
      - name: Set environment variables
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "LLAMA_ORIGIN_PATH=${ORIGIN_DIR}/llama-7b-hf" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "GPTNEOX_ORIGIN_PATH=${ORIGIN_DIR}/gptneox-7b-redpajama-bf16" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "BLOOM_ORIGIN_PATH=${ORIGIN_DIR}/bloomz-7b1" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "STARCODER_ORIGIN_PATH=${ORIGIN_DIR}/gpt_bigcode-santacoder" >> "$GITHUB_ENV"
 | 
			
		||||
 | 
			
		||||
          echo "LLAMA_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_llama_q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "GPTNEOX_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_gptneox_q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "BLOOM_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_bloom_q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "STARCODER_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_starcoder_q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
      - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          repository: 'intel-analytics/ipex-llm'
 | 
			
		||||
          ref: ${{ inputs.checkout-ref }}
 | 
			
		||||
      - name: Set up Python ${{ matrix.python-version }}
 | 
			
		||||
        uses: actions/setup-python@v4
 | 
			
		||||
        with:
 | 
			
		||||
          python-version: ${{ matrix.python-version }}
 | 
			
		||||
      - name: Install dependencies
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          python -m pip install --upgrade pip
 | 
			
		||||
          python -m pip install --upgrade wheel
 | 
			
		||||
 | 
			
		||||
      - name: Download llm binary
 | 
			
		||||
        uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
 | 
			
		||||
      - name: Install IPEX-LLM
 | 
			
		||||
        uses: ./.github/actions/llm/setup-llm-env
 | 
			
		||||
 | 
			
		||||
      - name: Download original models & convert
 | 
			
		||||
        uses: ./.github/actions/llm/convert-test
 | 
			
		||||
 | 
			
		||||
      - name: Upload ckpt to ftp
 | 
			
		||||
        shell: bash
 | 
			
		||||
        if: runner.os == 'Linux' && github.event_name == 'schedule'
 | 
			
		||||
        run: |
 | 
			
		||||
          curl -T $LLAMA_INT4_CKPT_PATH ${LLM_FTP_URL}/llm/ggml-actions/nightly/bigdl_llm_llama_7b_q4_0.bin
 | 
			
		||||
          curl -T $GPTNEOX_INT4_CKPT_PATH ${LLM_FTP_URL}/llm/ggml-actions/nightly/bigdl_llm_redpajama_7b_q4_0.bin
 | 
			
		||||
          curl -T $BLOOM_INT4_CKPT_PATH ${LLM_FTP_URL}/llm/ggml-actions/nightly/bigdl_llm_bloom_7b_q4_0.bin
 | 
			
		||||
          curl -T $STARCODER_INT4_CKPT_PATH ${LLM_FTP_URL}/llm/ggml-actions/nightly/bigdl_llm_santacoder_1b_q4_0.bin
 | 
			
		||||
      - name: Delete ckpt
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          rm -rf $LLAMA_INT4_CKPT_PATH
 | 
			
		||||
          rm -rf $GPTNEOX_INT4_CKPT_PATH
 | 
			
		||||
          rm -rf $BLOOM_INT4_CKPT_PATH
 | 
			
		||||
          rm -rf $STARCODER_INT4_CKPT_PATH
 | 
			
		||||
 | 
			
		||||
  llm-unit-tests:
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    uses: ./.github/workflows/llm_unit_tests.yml
 | 
			
		||||
    with:
 | 
			
		||||
      checkout-ref: ${{ inputs.checkout-ref }}
 | 
			
		||||
  llm-example-test:
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    uses: ./.github/workflows/llm_example_tests.yml
 | 
			
		||||
    with:
 | 
			
		||||
      checkout-ref: ${{ inputs.checkout-ref }}
 | 
			
		||||
							
								
								
									
										82
									
								
								.github/workflows/llm_example_tests.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										82
									
								
								.github/workflows/llm_example_tests.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -1,82 +0,0 @@
 | 
			
		|||
name: LLM Example Test
 | 
			
		||||
 | 
			
		||||
# Cancel previous runs in the PR when you push new commits
 | 
			
		||||
concurrency:
 | 
			
		||||
  group: ${{ github.workflow }}-llm-example-tests-${{ github.event.pull_request.number || github.run_id }}
 | 
			
		||||
  cancel-in-progress: true
 | 
			
		||||
 | 
			
		||||
permissions:
 | 
			
		||||
  contents: read
 | 
			
		||||
 | 
			
		||||
# Controls when the action will run. 
 | 
			
		||||
on:
 | 
			
		||||
  # schedule:
 | 
			
		||||
  #   - cron: '00 13 * * *' # GMT time, 13:00 GMT == 21:00 China
 | 
			
		||||
  # pull_request:
 | 
			
		||||
  #   branches: [ main ]
 | 
			
		||||
  #   paths:
 | 
			
		||||
  #     - '.github/workflows/llm_example_tests.yml'
 | 
			
		||||
  #     - '.github/workflows/llm-binary-build.yml'
 | 
			
		||||
  #     - '.github/actions/llm/example-test/action.yml'
 | 
			
		||||
  #     - '.github/actions/llm/setup-llm-env/action.yml'
 | 
			
		||||
  #     - '.github/actions/llm/remove-llm-env/action.yml'
 | 
			
		||||
  #     - '.github/actions/llm/download-llm-binary/action.yml'
 | 
			
		||||
  #     - 'python/llm/dev/test/run-example-tests.sh'
 | 
			
		||||
  #     - 'python/llm/example/**'
 | 
			
		||||
  # workflow_dispatch:
 | 
			
		||||
  workflow_call:
 | 
			
		||||
    inputs:
 | 
			
		||||
      checkout-ref:
 | 
			
		||||
        description: 'ref for checking out, including branch, tag or SHA'
 | 
			
		||||
        required: true
 | 
			
		||||
        type: string
 | 
			
		||||
 | 
			
		||||
env:
 | 
			
		||||
  INT4_CKPT_DIR: ./llm/ggml-actions/stable
 | 
			
		||||
  LLM_DIR: ./llm
 | 
			
		||||
 | 
			
		||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
 | 
			
		||||
jobs:
 | 
			
		||||
  llm-cpp-build:
 | 
			
		||||
    uses: ./.github/workflows/llm-binary-build.yml
 | 
			
		||||
  llm-example-test:
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        python-version: ["3.11"]
 | 
			
		||||
        instruction: ["AVX512"]
 | 
			
		||||
    runs-on: [ self-hosted, llm,"${{matrix.instruction}}", ubuntu-20.04-lts ]
 | 
			
		||||
    env:
 | 
			
		||||
      THREAD_NUM: 24
 | 
			
		||||
    steps:
 | 
			
		||||
      - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # actions/checkout@v2
 | 
			
		||||
        with:
 | 
			
		||||
          repository: 'intel-analytics/ipex-llm'
 | 
			
		||||
          ref: ${{ inputs.checkout-ref }}
 | 
			
		||||
      - name: Set up Python ${{ matrix.python-version }}
 | 
			
		||||
        uses: actions/setup-python@v2
 | 
			
		||||
        with:
 | 
			
		||||
          python-version: ${{ matrix.python-version }}
 | 
			
		||||
      - name: Install dependencies
 | 
			
		||||
        run: |
 | 
			
		||||
          python -m pip install --upgrade pip
 | 
			
		||||
          python -m pip install --upgrade wheel
 | 
			
		||||
 | 
			
		||||
      - name: Download llm binary
 | 
			
		||||
        uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM install (all) test
 | 
			
		||||
        uses: ./.github/actions/llm/setup-llm-env
 | 
			
		||||
        env:
 | 
			
		||||
          ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM example test
 | 
			
		||||
        uses: ./.github/actions/llm/example-test
 | 
			
		||||
        env:
 | 
			
		||||
          ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
 | 
			
		||||
 | 
			
		||||
      # - name: Clean up test environment
 | 
			
		||||
      #   uses: ./.github/actions/llm/remove-llm-env
 | 
			
		||||
      #   env:
 | 
			
		||||
      #     ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
 | 
			
		||||
							
								
								
									
										1964
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1964
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										495
									
								
								.github/workflows/llm_unit_tests.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										495
									
								
								.github/workflows/llm_unit_tests.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -1,495 +0,0 @@
 | 
			
		|||
name: LLM Unit Tests
 | 
			
		||||
 | 
			
		||||
# Cancel previous runs in the PR when you push new commits
 | 
			
		||||
concurrency:
 | 
			
		||||
  group: ${{ github.workflow }}-llm-unittest-${{ github.event.pull_request.number || github.run_id }}
 | 
			
		||||
  cancel-in-progress: true
 | 
			
		||||
 | 
			
		||||
permissions:
 | 
			
		||||
  contents: read
 | 
			
		||||
 | 
			
		||||
# Controls when the action will run.
 | 
			
		||||
on:
 | 
			
		||||
  # Triggers the workflow on push or pull request events but only for the main branch
 | 
			
		||||
  # push:
 | 
			
		||||
  #   branches: [main]
 | 
			
		||||
  #   paths:
 | 
			
		||||
  #     - "python/llm/**"
 | 
			
		||||
  #     - ".github/workflows/llm_unit_tests.yml"
 | 
			
		||||
  #     - ".github/workflows/llm-binary-build.yml"
 | 
			
		||||
  #     - ".github/actions/llm/setup-llm-env/action.yml"
 | 
			
		||||
  #     - ".github/actions/llm/remove-llm-env/action.yml"
 | 
			
		||||
  #     - ".github/actions/llm/cli-test-linux/action.yml"
 | 
			
		||||
  #     - ".github/actions/llm/cli-test-windows/action.yml"
 | 
			
		||||
  #     - ".github/actions/llm/download-llm-binary/action.yml"
 | 
			
		||||
  # pull_request:
 | 
			
		||||
  #   branches: [main]
 | 
			
		||||
  #   paths:
 | 
			
		||||
  #     - "python/llm/**"
 | 
			
		||||
  #     - ".github/workflows/llm_unit_tests.yml"
 | 
			
		||||
  #     - ".github/workflows/llm-binary-build.yml"
 | 
			
		||||
  #     - ".github/actions/llm/setup-llm-env/action.yml"
 | 
			
		||||
  #     - ".github/actions/llm/remove-llm-env/action.yml"
 | 
			
		||||
  #     - ".github/actions/llm/cli-test-linux/action.yml"
 | 
			
		||||
  #     - ".github/actions/llm/cli-test-windows/action.yml"
 | 
			
		||||
  #     - ".github/actions/llm/download-llm-binary/action.yml"
 | 
			
		||||
  # workflow_dispatch:
 | 
			
		||||
  workflow_call:
 | 
			
		||||
    inputs:
 | 
			
		||||
      checkout-ref:
 | 
			
		||||
        description: 'ref for checking out, including branch, tag or SHA'
 | 
			
		||||
        required: true
 | 
			
		||||
        type: string
 | 
			
		||||
 | 
			
		||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
 | 
			
		||||
jobs:
 | 
			
		||||
  llm-cpp-build:
 | 
			
		||||
    uses: ./.github/workflows/llm-binary-build.yml
 | 
			
		||||
  setup-python-version:
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    outputs:
 | 
			
		||||
      python-version: ${{ steps.setup-python-version.outputs.python-version }}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: setup-python-version
 | 
			
		||||
        id: setup-python-version
 | 
			
		||||
        run: |
 | 
			
		||||
          if [ ${{ github.event_name }} == 'schedule' ]; then
 | 
			
		||||
            python_version='["3.9", "3.10", "3.11"]'
 | 
			
		||||
          else
 | 
			
		||||
            python_version='["3.11"]'
 | 
			
		||||
          fi
 | 
			
		||||
          list=$(echo ${python_version} | jq -c)
 | 
			
		||||
          echo "python-version=${list}" >> "$GITHUB_OUTPUT"
 | 
			
		||||
  llm-unit-test:
 | 
			
		||||
    needs: [setup-python-version, llm-cpp-build]
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        os: [windows, ubuntu-20.04-lts]
 | 
			
		||||
        python-version: ${{ fromJson(needs.setup-python-version.outputs.python-version) }}
 | 
			
		||||
        include:
 | 
			
		||||
          - os: windows
 | 
			
		||||
            instruction: AVX-VNNI-UT
 | 
			
		||||
          - os: ubuntu-20.04-lts
 | 
			
		||||
            instruction: avx512
 | 
			
		||||
    runs-on: [self-hosted, llm, "${{matrix.instruction}}", "${{matrix.os}}"]
 | 
			
		||||
    env:
 | 
			
		||||
      THREAD_NUM: 24
 | 
			
		||||
      ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Set model directories
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "DATASET_DIR=${{ github.workspace }}/../llm/datasets" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "ORIGIN_DIR=${{ github.workspace }}/../llm/origin-models" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "INT4_CKPT_DIR=${{ github.workspace }}/../llm/converted-models" >> "$GITHUB_ENV"
 | 
			
		||||
      - name: Create model directories
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          if [ ! -d $DATASET_DIR ]; then
 | 
			
		||||
            mkdir -p $DATASET_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $ORIGIN_DIR ]; then
 | 
			
		||||
            mkdir -p $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $INT4_CKPT_DIR ]; then
 | 
			
		||||
            mkdir -p $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
      - name: Set environment variables
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "SPEECH_DATASET_PATH=${DATASET_DIR}/librispeech_asr_dummy" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "COMMON_VOICE_PATH=${DATASET_DIR}/common_voice" >> "$GITHUB_ENV"
 | 
			
		||||
 | 
			
		||||
          echo "LLAMA_ORIGIN_PATH=${ORIGIN_DIR}/llama-7b-hf" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "BLOOM_ORIGIN_PATH=${ORIGIN_DIR}/bloom-7b1" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "ORIGINAL_CHATGLM2_6B_PATH=${ORIGIN_DIR}/chatglm2-6b" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "ORIGINAL_CODESHELL_7B_PATH=${ORIGIN_DIR}/CodeShell-7B-Chat" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "ORIGINAL_WHISPER_TINY_PATH=${ORIGIN_DIR}/whisper-tiny" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "MISTRAL_ORIGIN_PATH=${ORIGIN_DIR}/Mistral-7B-v0.1" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "LLAMA2_7B_ORIGIN_PATH=${ORIGIN_DIR}/Llama-2-7b-chat-hf" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "VICUNA_7B_1_3_ORIGIN_PATH=${ORIGIN_DIR}/vicuna-7b-v1.3" >> "$GITHUB_ENV"
 | 
			
		||||
 | 
			
		||||
          echo "LLAMA_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_llama_7b_q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "GPTNEOX_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_redpajama_7b_q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "BLOOM_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_bloom_7b_q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "STARCODER_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_santacoder_1b_q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "CHATGLM_INT4_CKPT_PATH=${INT4_CKPT_DIR}/chatglm2-6b-q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
      - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          repository: 'intel-analytics/ipex-llm'
 | 
			
		||||
          ref: ${{ inputs.checkout-ref }}
 | 
			
		||||
      - name: Set up Python ${{ matrix.python-version }}
 | 
			
		||||
        uses: actions/setup-python@v4
 | 
			
		||||
        with:
 | 
			
		||||
          python-version: ${{ matrix.python-version }}
 | 
			
		||||
      - name: Install dependencies
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          python -m pip install --upgrade pip
 | 
			
		||||
          python -m pip install --upgrade wheel
 | 
			
		||||
 | 
			
		||||
          # May remove later
 | 
			
		||||
          pip uninstall sentence-transformers -y || true
 | 
			
		||||
 | 
			
		||||
      - name: Download llm binary
 | 
			
		||||
        uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM install (all) test
 | 
			
		||||
        uses: ./.github/actions/llm/setup-llm-env
 | 
			
		||||
 | 
			
		||||
      - name: Download ckpt & original models
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          if [ ! -e $LLAMA_INT4_CKPT_PATH ]; then
 | 
			
		||||
            echo "Directory $LLAMA_INT4_CKPT_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            echo "wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/bigdl_llm_llama_7b_q4_0.bin -P $INT4_CKPT_DIR"
 | 
			
		||||
            wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/bigdl_llm_llama_7b_q4_0.bin -P $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -e $GPTNEOX_INT4_CKPT_PATH ]; then
 | 
			
		||||
            echo "Directory $GPTNEOX_INT4_CKPT_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/bigdl_llm_redpajama_7b_q4_0.bin -P $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -e $BLOOM_INT4_CKPT_PATH ]; then
 | 
			
		||||
            echo "Directory $BLOOM_INT4_CKPT_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/bigdl_llm_bloom_7b_q4_0.bin -P $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -e $STARCODER_INT4_CKPT_PATH ]; then
 | 
			
		||||
            echo "Directory $STARCODER_INT4_CKPT_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/bigdl_llm_santacoder_1b_q4_0.bin -P $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          # if [ ! -e $CHATGLM_INT4_CKPT_PATH ]; then
 | 
			
		||||
          #   echo "Directory $CHATGLM_INT4_CKPT_PATH not found. Downloading from FTP server..."
 | 
			
		||||
          #   wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/chatglm2-6b-q4_0.bin -P $INT4_CKPT_DIR
 | 
			
		||||
          # fi
 | 
			
		||||
          if [ ! -d $ORIGINAL_CHATGLM2_6B_PATH ]; then
 | 
			
		||||
            echo "Directory $ORIGINAL_CHATGLM2_6B_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            echo "wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/updated_for_4.36/chatglm2-6b -P $ORIGIN_DIR"
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/updated_for_4.36/chatglm2-6b -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $ORIGINAL_CODESHELL_7B_PATH ]; then
 | 
			
		||||
            echo "Directory $ORIGINAL_CODESHELL_7B_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            echo "wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/CodeShell-7B-Chat -P $ORIGIN_DIR"
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/CodeShell-7B-Chat -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $ORIGINAL_WHISPER_TINY_PATH ]; then
 | 
			
		||||
            echo "Directory $ORIGINAL_WHISPER_TINY_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            echo "wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/whisper-tiny -P $ORIGIN_DIR"
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/whisper-tiny -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $MISTRAL_ORIGIN_PATH ]; then
 | 
			
		||||
            echo "Directory $MISTRAL_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            echo "wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/Mistral-7B-v0.1 -P $ORIGIN_DIR"
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/Mistral-7B-v0.1 -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $LLAMA_ORIGIN_PATH ]; then
 | 
			
		||||
            echo "Directory $LLAMA_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            echo "wget --no-verbose $LLM_FTP_URL/llm/llama-7b-hf -P $ORIGIN_DIR"
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/llama-7b-hf -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $BLOOM_ORIGIN_PATH ]; then
 | 
			
		||||
            echo "Directory $BLOOM_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            echo "wget --no-verbose $LLM_FTP_URL/llm/bloom-7b1 -P $ORIGIN_DIR"
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/bloom-7b1 -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $SPEECH_DATASET_PATH ]; then
 | 
			
		||||
            echo "Directory $SPEECH_DATASET_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            echo "wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/librispeech_asr_dummy -P $DATASET_DIR"
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/librispeech_asr_dummy -P $DATASET_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $COMMON_VOICE_PATH ]; then
 | 
			
		||||
            echo "Directory $COMMON_VOICE_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            echo "wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/common_voice -P $DATASET_DIR"
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/common_voice -P $DATASET_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $LLAMA2_7B_ORIGIN_PATH ]; then
 | 
			
		||||
            echo "Directory $LLAMA2_7B_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/Llama-2-7b-chat-hf -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $VICUNA_7B_1_3_ORIGIN_PATH ]; then
 | 
			
		||||
            echo "Directory $VICUNA_7B_1_3_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/vicuna-7b-v1.3 -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM cli test (Linux)
 | 
			
		||||
        if: runner.os == 'Linux'
 | 
			
		||||
        uses: ./.github/actions/llm/cli-test-linux
 | 
			
		||||
 | 
			
		||||
      - name: Setup Python Path
 | 
			
		||||
        if: runner.os == 'Windows'
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          # Get Python interpreter path
 | 
			
		||||
          python_path=$(python -c 'import sys; print(sys.executable)')
 | 
			
		||||
          python_dir=$(dirname "$python_path")
 | 
			
		||||
          scripts_dir="$python_dir/Scripts"
 | 
			
		||||
 | 
			
		||||
          # Set environment variables
 | 
			
		||||
          echo "PYTHON_DIR=$python_dir" >> $GITHUB_ENV
 | 
			
		||||
          echo "SCRIPTS_DIR=$scripts_dir" >> $GITHUB_ENV
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM cli test (Windows)
 | 
			
		||||
        if: runner.os == 'Windows'
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          # Retrieve environment variables
 | 
			
		||||
          $pythonDir = $env:PYTHON_DIR
 | 
			
		||||
          $scriptsDir = $env:SCRIPTS_DIR
 | 
			
		||||
 | 
			
		||||
          # Update PATH
 | 
			
		||||
          $env:PATH = "$pythonDir;$scriptsDir;$env:PATH"
 | 
			
		||||
 | 
			
		||||
          # Run tests
 | 
			
		||||
          llm-cli.ps1 -t $env:THREAD_NUM -n 256 -x llama -m $env:LLAMA_INT4_CKPT_PATH -p 'Once upon a time,'
 | 
			
		||||
          llm-cli.ps1 -t $env:THREAD_NUM -n 256 -x gptneox -m $env:GPTNEOX_INT4_CKPT_PATH -p 'Once upon a time,'
 | 
			
		||||
          llm-cli.ps1 -t $env:THREAD_NUM -n 256 -x bloom -m $env:BLOOM_INT4_CKPT_PATH -p 'Once upon a time,'
 | 
			
		||||
          # llm-cli.ps1 -t $env:THREAD_NUM -x starcoder -m $env:STARCODER_INT4_CKPT_PATH -p 'def check_odd('
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM inference test
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          python -m pip install einops datasets librosa openai-whisper
 | 
			
		||||
          bash python/llm/test/run-llm-inference-tests.sh
 | 
			
		||||
      - name: Run LLM langchain test
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          pip install -U langchain==0.0.184
 | 
			
		||||
          pip install -U chromadb==0.3.25
 | 
			
		||||
          pip install -U pandas==2.0.3
 | 
			
		||||
          bash python/llm/test/run-llm-langchain-tests.sh
 | 
			
		||||
      - name: Run LLM llamaindex test
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          pip install "llama-index-readers-file<0.2.0"
 | 
			
		||||
          pip install "llama-index-vector-stores-postgres<0.2.0"
 | 
			
		||||
          pip install "llama-index-embeddings-huggingface<0.3.0"
 | 
			
		||||
          pip install transformers==4.36.2
 | 
			
		||||
          pip install "pydantic>=2.0.0"
 | 
			
		||||
          bash python/llm/test/run-llm-llamaindex-tests.sh
 | 
			
		||||
      - name: Run sentence-transformers uninstallation
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          pip uninstall sentence-transformers -y || true
 | 
			
		||||
 | 
			
		||||
  llm-unit-test-on-arc:
 | 
			
		||||
    needs: [setup-python-version, llm-cpp-build]
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        runner: ['arc-ut', 'arc-ut-win']
 | 
			
		||||
        pytorch-version: ['2.1']
 | 
			
		||||
        python-version: ${{ fromJson(needs.setup-python-version.outputs.python-version) }}
 | 
			
		||||
    runs-on: [self-hosted, llm, "${{ matrix.runner }}"]
 | 
			
		||||
    env:
 | 
			
		||||
      # OMP_NUM_THREADS: 16
 | 
			
		||||
      # THREAD_NUM: 16
 | 
			
		||||
      ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Set environment variables
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "DATASET_DIR=${ORIGIN_DIR}/../datasets" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "YAHMA_ALPACA_CLEANED_PATH=${ORIGIN_DIR}/../datasets/yahma_alpaca_cleaned" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "SPEECH_DATASET_PATH=${ORIGIN_DIR}/../datasets/librispeech_asr_dummy" >> "$GITHUB_ENV"
 | 
			
		||||
 | 
			
		||||
          echo "LLAMA2_7B_ORIGIN_PATH=${ORIGIN_DIR}/Llama-2-7b-chat-hf" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "CHATGLM2_6B_ORIGIN_PATH=${ORIGIN_DIR}/chatglm2-6b" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "FALCON_7B_ORIGIN_PATH=${ORIGIN_DIR}/falcon-7b-instruct-with-patch" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "MPT_7B_ORIGIN_PATH=${ORIGIN_DIR}/mpt-7b-chat" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "WHISPER_TINY_ORIGIN_PATH=${ORIGIN_DIR}/whisper-tiny" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "MISTRAL_7B_INSTRUCT_V0_1_ORIGIN_PATH=${ORIGIN_DIR}/Mistral-7B-Instruct-v0.1" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "BAICHUAN2_7B_ORIGIN_PATH=${ORIGIN_DIR}/Baichuan2-7B-Chat" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "QWEN_7B_ORIGIN_PATH=${ORIGIN_DIR}/Qwen-7B-Chat" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "VICUNA_7B_1_3_ORIGIN_PATH=${ORIGIN_DIR}/vicuna-7b-v1.3" >> "$GITHUB_ENV"
 | 
			
		||||
      - name: Checkout repo
 | 
			
		||||
        uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          repository: 'intel-analytics/ipex-llm'
 | 
			
		||||
          ref: ${{ inputs.checkout-ref }}
 | 
			
		||||
      - name: Set up Python ${{ matrix.python-version }}
 | 
			
		||||
        uses: actions/setup-python@v4
 | 
			
		||||
        with:
 | 
			
		||||
          python-version: ${{ matrix.python-version }}
 | 
			
		||||
 | 
			
		||||
      - name: Install dependencies
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          python -m pip install --upgrade pip
 | 
			
		||||
          python -m pip install --upgrade wheel
 | 
			
		||||
          python -m pip install --upgrade notebook
 | 
			
		||||
 | 
			
		||||
          # May remove later
 | 
			
		||||
          pip uninstall sentence-transformers -y || true
 | 
			
		||||
 | 
			
		||||
          # On Windows, we need to add "Python3_ROOT_DIR/bin" to path to make libuv work
 | 
			
		||||
          if [[ "$RUNNER_OS" == "Windows" ]]; then
 | 
			
		||||
            echo $Python3_ROOT_DIR'\bin\'
 | 
			
		||||
            echo $Python3_ROOT_DIR'\bin\' >> $GITHUB_PATH
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
      - name: Download llm binary
 | 
			
		||||
        uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
 | 
			
		||||
      - name: Install IPEX-LLM for xpu
 | 
			
		||||
        uses: ./.github/actions/llm/setup-llm-env
 | 
			
		||||
        with:
 | 
			
		||||
          extra-dependency: "xpu_${{ matrix.pytorch-version }}"
 | 
			
		||||
 | 
			
		||||
      - name: Test installed xpu version
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          # Specific oneapi position on arc ut test machines
 | 
			
		||||
          if [[ "$RUNNER_OS" == "Linux" ]]; then
 | 
			
		||||
            if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then
 | 
			
		||||
              source /opt/intel/oneapi/setvars.sh
 | 
			
		||||
            elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then
 | 
			
		||||
              source /home/arda/intel/oneapi/setvars.sh
 | 
			
		||||
            fi
 | 
			
		||||
          fi
 | 
			
		||||
          bash python/llm/test/run-llm-install-tests.sh
 | 
			
		||||
 | 
			
		||||
      - name: Download LLMs and datasets
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          if [ ! -d $LLAMA2_7B_ORIGIN_PATH ]; then
 | 
			
		||||
            echo "Directory $LLAMA2_7B_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/Llama-2-7b-chat-hf -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $CHATGLM2_6B_ORIGIN_PATH ]; then
 | 
			
		||||
            echo "Directory $CHATGLM2_6B_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/updated_for_4.36/chatglm2-6b -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $FALCON_7B_ORIGIN_PATH ]; then
 | 
			
		||||
            echo "Directory $FALCON_7B_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/falcon-7b-instruct-with-patch -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $MPT_7B_ORIGIN_PATH ]; then
 | 
			
		||||
            echo "Directory $MPT_7B_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/updated_for_4.36/mpt-7b-chat -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $WHISPER_TINY_ORIGIN_PATH ]; then
 | 
			
		||||
            echo "Directory $WHISPER_TINY_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/whisper-tiny -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $DATASET_DIR ]; then
 | 
			
		||||
            mkdir -p $DATASET_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $YAHMA_ALPACA_CLEANED_PATH ]; then
 | 
			
		||||
            echo "Directory $YAHMA_ALPACA_CLEANED_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/yahma_alpaca_cleaned -P $DATASET_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $SPEECH_DATASET_PATH ]; then
 | 
			
		||||
            echo "Directory $SPEECH_DATASET_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/librispeech_asr_dummy -P $DATASET_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $MISTRAL_7B_INSTRUCT_V0_1_ORIGIN_PATH ]; then
 | 
			
		||||
            echo "Directory $MISTRAL_7B_INSTRUCT_V0_1_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/Mistral-7B-Instruct-v0.1 -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $QWEN_7B_ORIGIN_PATH ]; then
 | 
			
		||||
            echo "Directory $QWEN_7B_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/Qwen-7B-Chat -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $BAICHUAN2_7B_ORIGIN_PATH ]; then
 | 
			
		||||
            echo "Directory $BAICHUAN2_7B_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/updated_for_4.36/Baichuan2-7B-Chat -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $VICUNA_7B_1_3_ORIGIN_PATH ]; then
 | 
			
		||||
            echo "Directory $VICUNA_7B_1_3_ORIGIN_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/vicuna-7b-v1.3 -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM inference test
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          # Specific oneapi position on arc ut test machines
 | 
			
		||||
          if [[ "$RUNNER_OS" == "Linux" ]]; then
 | 
			
		||||
            if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then
 | 
			
		||||
              source /opt/intel/oneapi/setvars.sh
 | 
			
		||||
            elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then
 | 
			
		||||
              source /home/arda/intel/oneapi/setvars.sh
 | 
			
		||||
            fi
 | 
			
		||||
          fi
 | 
			
		||||
          python -m pip install datasets librosa soundfile einops tiktoken transformers_stream_generator
 | 
			
		||||
 | 
			
		||||
          bash python/llm/test/run-llm-inference-tests-gpu.sh
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM example tests
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          python -m pip uninstall datasets -y
 | 
			
		||||
          python -m pip install transformers==4.36.0 datasets peft==0.10.0
 | 
			
		||||
          python -m pip install bitsandbytes scipy
 | 
			
		||||
          # Specific oneapi position on arc ut test machines
 | 
			
		||||
          if [[ "$RUNNER_OS" == "Linux" ]]; then
 | 
			
		||||
            if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then
 | 
			
		||||
              source /opt/intel/oneapi/setvars.sh
 | 
			
		||||
            elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then
 | 
			
		||||
              source /home/arda/intel/oneapi/setvars.sh
 | 
			
		||||
            fi
 | 
			
		||||
          fi
 | 
			
		||||
          bash python/llm/test/run-llm-example-tests-gpu.sh
 | 
			
		||||
 | 
			
		||||
      - name: Get Langchain version
 | 
			
		||||
        shell: bash
 | 
			
		||||
        id: get_langchain_version
 | 
			
		||||
        run: |
 | 
			
		||||
          pip install langchain
 | 
			
		||||
          LANGCHAIN_VERSION=$(pip show langchain | grep Version | cut -d " " -f 2)
 | 
			
		||||
          LANGCHAIN_REF="langchain==$LANGCHAIN_VERSION"
 | 
			
		||||
          echo "langchain_ver=$LANGCHAIN_REF" >> $GITHUB_OUTPUT
 | 
			
		||||
 | 
			
		||||
      - name: Checkout Langchain repo
 | 
			
		||||
        uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
 | 
			
		||||
        with:
 | 
			
		||||
          repository: "langchain-ai/langchain"
 | 
			
		||||
          ref: ${{ join(steps.get_langchain_version.outputs.*, '\n') }}
 | 
			
		||||
          path: langchain_upstream
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM langchain GPU test
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          pip install -U langchain==0.0.184
 | 
			
		||||
          pip install -U chromadb==0.3.25
 | 
			
		||||
          pip install -U pandas==2.0.3
 | 
			
		||||
          # Specific oneapi position on arc ut test machines
 | 
			
		||||
          if [[ "$RUNNER_OS" == "Linux" ]]; then
 | 
			
		||||
            if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then
 | 
			
		||||
              source /opt/intel/oneapi/setvars.sh
 | 
			
		||||
            elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then
 | 
			
		||||
              source /home/arda/intel/oneapi/setvars.sh
 | 
			
		||||
            fi
 | 
			
		||||
          fi
 | 
			
		||||
          bash python/llm/test/run-llm-langchain-tests-gpu.sh
 | 
			
		||||
 | 
			
		||||
          pip install -U langchain
 | 
			
		||||
          pip install -U langchain-community
 | 
			
		||||
          bash python/llm/test/run-langchain-upstream-tests.sh
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM llamaindex GPU test
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          pip install "llama-index-readers-file<0.2.0"
 | 
			
		||||
          pip install "llama-index-vector-stores-postgres<0.2.0"
 | 
			
		||||
          pip install "llama-index-embeddings-huggingface<0.3.0"
 | 
			
		||||
          # Specific oneapi position on arc ut test machines
 | 
			
		||||
          if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then
 | 
			
		||||
            pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
 | 
			
		||||
            if [[ "$RUNNER_OS" == "Linux" ]]; then
 | 
			
		||||
              source /opt/intel/oneapi/setvars.sh
 | 
			
		||||
            fi
 | 
			
		||||
          elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then
 | 
			
		||||
            pip install --pre --upgrade ipex-llm[xpu_2.0] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
 | 
			
		||||
            if [[ "$RUNNER_OS" == "Linux" ]]; then
 | 
			
		||||
              source /home/arda/intel/oneapi/setvars.sh
 | 
			
		||||
            fi
 | 
			
		||||
          fi
 | 
			
		||||
          pip install transformers==4.36.2
 | 
			
		||||
          pip install "pydantic>=2.0.0"
 | 
			
		||||
          bash python/llm/test/run-llm-llamaindex-tests-gpu.sh
 | 
			
		||||
      - name: Run sentence-transformers uninstallation
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          pip uninstall sentence-transformers -y || true
 | 
			
		||||
							
								
								
									
										400
									
								
								.github/workflows/manually_build.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										400
									
								
								.github/workflows/manually_build.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -1,400 +0,0 @@
 | 
			
		|||
name: Manually Build
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  # workflow_dispatch:
 | 
			
		||||
  #   inputs:
 | 
			
		||||
  #     checkout-ref:
 | 
			
		||||
  #       description: 'commit id (SHA-1 hash)'
 | 
			
		||||
  #       required: true
 | 
			
		||||
  #       type: string
 | 
			
		||||
  #     artifact:
 | 
			
		||||
  #       description: 'select which job to run("all" will make all jobs run)'
 | 
			
		||||
  #       required: true
 | 
			
		||||
  #       default: 'all'
 | 
			
		||||
  #       type: choice
 | 
			
		||||
  #       options:
 | 
			
		||||
  #       - all
 | 
			
		||||
  #       - ipex-llm-cpu
 | 
			
		||||
  #       - ipex-llm-xpu
 | 
			
		||||
  #       - ipex-llm-inference-cpp-xpu
 | 
			
		||||
  #       - ipex-llm-serving-cpu
 | 
			
		||||
  #       - ipex-llm-serving-xpu
 | 
			
		||||
  #       - ipex-llm-finetune-lora-cpu
 | 
			
		||||
  #       - ipex-llm-finetune-qlora-cpu
 | 
			
		||||
  #       - ipex-llm-finetune-qlora-cpu-k8s
 | 
			
		||||
  #       - ipex-llm-finetune-xpu
 | 
			
		||||
  #     tag:
 | 
			
		||||
  #       description: 'docker image tag (e.g. 2.2.0-SNAPSHOT)'
 | 
			
		||||
  #       required: true
 | 
			
		||||
  #       default: '2.2.0-SNAPSHOT'
 | 
			
		||||
  #       type: string
 | 
			
		||||
  workflow_call:
 | 
			
		||||
    inputs:
 | 
			
		||||
      checkout-ref:
 | 
			
		||||
        description: 'ref for checking out, including branch, tag or SHA'
 | 
			
		||||
        required: true
 | 
			
		||||
        type: string
 | 
			
		||||
      artifact:
 | 
			
		||||
        description: 'select which job to run("all" will make all jobs run)'
 | 
			
		||||
        required: true
 | 
			
		||||
        default: 'all'
 | 
			
		||||
        type: string
 | 
			
		||||
      tag:
 | 
			
		||||
        description: 'docker image tag (e.g. 2.2.0-SNAPSHOT)'
 | 
			
		||||
        required: true
 | 
			
		||||
        default: '2.2.0-SNAPSHOT'
 | 
			
		||||
        type: string
 | 
			
		||||
      public:
 | 
			
		||||
        description: "if the docker image push to public docker hub"
 | 
			
		||||
        required: true
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: true
 | 
			
		||||
 | 
			
		||||
env:
 | 
			
		||||
  TAG: ${{ inputs.tag }}
 | 
			
		||||
 | 
			
		||||
permissions:
 | 
			
		||||
  contents: read
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  ipex-llm-finetune-lora-cpu:
 | 
			
		||||
    if: ${{ inputs.artifact == 'ipex-llm-finetune-lora-cpu' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      with:
 | 
			
		||||
        repository: 'intel-analytics/ipex-llm'
 | 
			
		||||
        ref: ${{ inputs.checkout-ref }}
 | 
			
		||||
    - name: docker login
 | 
			
		||||
      run: |
 | 
			
		||||
        docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
    - name: ipex-llm-finetune-lora-cpu
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        echo "####### ipex-llm-finetune-lora-cpu ########"
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        export image=intelanalytics/ipex-llm-finetune-lora-cpu
 | 
			
		||||
        cd docker/llm/finetune/lora/cpu/docker
 | 
			
		||||
        sudo docker build \
 | 
			
		||||
          --no-cache=true \
 | 
			
		||||
          --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
          --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
          --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
          -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
        # push docker image to public hub
 | 
			
		||||
        if [[ "${{ inputs.public }}" == "true" ]]; then
 | 
			
		||||
          sudo docker push ${image}:${TAG}
 | 
			
		||||
          # tag 'latest'
 | 
			
		||||
          sudo docker tag ${image}:${TAG} ${image}:latest
 | 
			
		||||
          sudo docker push ${image}:latest
 | 
			
		||||
          sudo docker rmi -f ${image}:${TAG} ${image}:latest
 | 
			
		||||
        else
 | 
			
		||||
          sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
  ipex-llm-finetune-qlora-cpu:
 | 
			
		||||
    if: ${{ inputs.artifact == 'ipex-llm-finetune-qlora-cpu' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
 | 
			
		||||
    steps:
 | 
			
		||||
      - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          repository: 'intel-analytics/ipex-llm'
 | 
			
		||||
          ref: ${{ inputs.checkout-ref }}
 | 
			
		||||
      - name: docker login
 | 
			
		||||
        run: |
 | 
			
		||||
          docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
      - name: ipex-llm-finetune-qlora-cpu
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "##############################################################"
 | 
			
		||||
          echo "####### ipex-llm-finetune-qlora-cpu ########"
 | 
			
		||||
          echo "##############################################################"
 | 
			
		||||
          export image=intelanalytics/ipex-llm-finetune-qlora-cpu
 | 
			
		||||
          cd docker/llm/finetune/qlora/cpu/docker
 | 
			
		||||
          sudo docker build \
 | 
			
		||||
            --no-cache=true \
 | 
			
		||||
            --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
            --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
            --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
            -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
          # push docker image to public hub
 | 
			
		||||
          if [[ "${{ inputs.public }}" == "true" ]]; then
 | 
			
		||||
            sudo docker push ${image}:${TAG}
 | 
			
		||||
            # tag 'latest'
 | 
			
		||||
            sudo docker tag ${image}:${TAG} ${image}:latest
 | 
			
		||||
            sudo docker push ${image}:latest
 | 
			
		||||
            sudo docker rmi -f ${image}:${TAG} ${image}:latest
 | 
			
		||||
          else
 | 
			
		||||
            sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
            sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
            sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
  ipex-llm-finetune-qlora-cpu-k8s:
 | 
			
		||||
    if: ${{ inputs.artifact == 'ipex-llm-finetune-qlora-cpu-k8s' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
 | 
			
		||||
    steps:
 | 
			
		||||
      - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          repository: 'intel-analytics/ipex-llm'
 | 
			
		||||
          ref: ${{ inputs.checkout-ref }}
 | 
			
		||||
      - name: docker login
 | 
			
		||||
        run: |
 | 
			
		||||
          docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
      - name: ipex-llm-finetune-qlora-cpu-k8s
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "##############################################################"
 | 
			
		||||
          echo "####### ipex-llm-finetune-qlora-cpu-k8s ########"
 | 
			
		||||
          echo "##############################################################"
 | 
			
		||||
          export image=intelanalytics/ipex-llm-finetune-qlora-cpu-k8s
 | 
			
		||||
          cd docker/llm/finetune/qlora/cpu/docker
 | 
			
		||||
          sudo docker build \
 | 
			
		||||
            --no-cache=true \
 | 
			
		||||
            --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
            --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
            --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
            -t ${image}:${TAG} -f ./Dockerfile.k8s .
 | 
			
		||||
          # push docker image to public hub
 | 
			
		||||
          if [[ "${{ inputs.public }}" == "true" ]]; then
 | 
			
		||||
            sudo docker push ${image}:${TAG}
 | 
			
		||||
            # tag 'latest'
 | 
			
		||||
            sudo docker tag ${image}:${TAG} ${image}:latest
 | 
			
		||||
            sudo docker push ${image}:latest
 | 
			
		||||
            sudo docker rmi -f ${image}:${TAG} ${image}:latest
 | 
			
		||||
          else
 | 
			
		||||
            sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
            sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
            sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
  ipex-llm-finetune-xpu:
 | 
			
		||||
    if: ${{ inputs.artifact == 'ipex-llm-finetune-xpu' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
    
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      with:
 | 
			
		||||
        repository: 'intel-analytics/ipex-llm'
 | 
			
		||||
        ref: ${{ inputs.checkout-ref }}
 | 
			
		||||
    - name: docker login
 | 
			
		||||
      run: |
 | 
			
		||||
        docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
    - name: ipex-llm-finetune-xpu
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        echo "####### ipex-llm-finetune-xpu ########"
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        export image=intelanalytics/ipex-llm-finetune-xpu
 | 
			
		||||
        cd docker/llm/finetune/xpu
 | 
			
		||||
        sudo docker build \
 | 
			
		||||
          --no-cache=true \
 | 
			
		||||
          --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
          --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
          --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
          -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
        # push docker image to public hub
 | 
			
		||||
        if [[ "${{ inputs.public }}" == "true" ]]; then
 | 
			
		||||
          sudo docker push ${image}:${TAG}
 | 
			
		||||
          # tag 'latest'
 | 
			
		||||
          sudo docker tag ${image}:${TAG} ${image}:latest
 | 
			
		||||
          sudo docker push ${image}:latest
 | 
			
		||||
          sudo docker rmi -f ${image}:${TAG} ${image}:latest
 | 
			
		||||
        else
 | 
			
		||||
          sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
  ipex-llm-xpu:
 | 
			
		||||
    if: ${{ inputs.artifact == 'ipex-llm-xpu' || inputs.artifact == 'all' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
    
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      with:
 | 
			
		||||
        repository: 'intel-analytics/ipex-llm'
 | 
			
		||||
        ref: ${{ inputs.checkout-ref }}
 | 
			
		||||
    - name: docker login
 | 
			
		||||
      run: |
 | 
			
		||||
        docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
    - name: ipex-llm-xpu
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        echo "####### ipex-llm-xpu ########"
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        export image=intelanalytics/ipex-llm-xpu
 | 
			
		||||
        cd docker/llm/inference/xpu/docker
 | 
			
		||||
        sudo docker build \
 | 
			
		||||
          --no-cache=true \
 | 
			
		||||
          --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
          --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
          --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
          -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
        # push docker image to public hub
 | 
			
		||||
        if [[ "${{ inputs.public }}" == "true" ]]; then
 | 
			
		||||
          sudo docker push ${image}:${TAG}
 | 
			
		||||
          # tag 'latest'
 | 
			
		||||
          sudo docker tag ${image}:${TAG} ${image}:latest
 | 
			
		||||
          sudo docker push ${image}:latest
 | 
			
		||||
          sudo docker rmi -f ${image}:${TAG} ${image}:latest
 | 
			
		||||
        else
 | 
			
		||||
          sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
  ipex-llm-inference-cpp-xpu:
 | 
			
		||||
    if: ${{ inputs.artifact == 'ipex-llm-inference-cpp-xpu' || inputs.artifact == 'all' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
    
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      with:
 | 
			
		||||
        repository: 'intel-analytics/ipex-llm'
 | 
			
		||||
        ref: ${{ inputs.checkout-ref }}
 | 
			
		||||
    - name: docker login
 | 
			
		||||
      run: |
 | 
			
		||||
        docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
    - name: ipex-llm-inference-cpp-xpu
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        echo "####### ipex-llm-inference-cpp-xpu ########"
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        export image=intelanalytics/ipex-llm-inference-cpp-xpu
 | 
			
		||||
        cd docker/llm/inference-cpp/
 | 
			
		||||
        sudo docker build \
 | 
			
		||||
          --no-cache=true \
 | 
			
		||||
          --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
          --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
          --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
          -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
        # push docker image to public hub
 | 
			
		||||
        if [[ "${{ inputs.public }}" == "true" ]]; then
 | 
			
		||||
          sudo docker push ${image}:${TAG}
 | 
			
		||||
          # tag 'latest'
 | 
			
		||||
          sudo docker tag ${image}:${TAG} ${image}:latest
 | 
			
		||||
          sudo docker push ${image}:latest
 | 
			
		||||
          sudo docker rmi -f ${image}:${TAG} ${image}:latest
 | 
			
		||||
        else
 | 
			
		||||
          sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
  ipex-llm-cpu:
 | 
			
		||||
    if: ${{ inputs.artifact == 'ipex-llm-cpu' || inputs.artifact == 'all' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      with:
 | 
			
		||||
        repository: 'intel-analytics/ipex-llm'
 | 
			
		||||
        ref: ${{ inputs.checkout-ref }}
 | 
			
		||||
    - name: docker login
 | 
			
		||||
      run: |
 | 
			
		||||
        docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
    - name: ipex-llm-cpu
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        echo "####### ipex-llm-cpu ########"
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        export image=intelanalytics/ipex-llm-cpu
 | 
			
		||||
        cd docker/llm/inference/cpu/docker
 | 
			
		||||
        sudo docker build \
 | 
			
		||||
          --no-cache=true \
 | 
			
		||||
          --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
          --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
          --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
          -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
        # push docker image to public hub
 | 
			
		||||
        if [[ "${{ inputs.public }}" == "true" ]]; then
 | 
			
		||||
          sudo docker push ${image}:${TAG}
 | 
			
		||||
          # tag 'latest'
 | 
			
		||||
          sudo docker tag ${image}:${TAG} ${image}:latest
 | 
			
		||||
          sudo docker push ${image}:latest
 | 
			
		||||
          sudo docker rmi -f ${image}:${TAG} ${image}:latest
 | 
			
		||||
        else
 | 
			
		||||
          sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
  ipex-llm-serving-xpu:
 | 
			
		||||
    if: ${{ inputs.artifact == 'ipex-llm-serving-xpu' || inputs.artifact == 'all' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
    
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      with:
 | 
			
		||||
        repository: 'intel-analytics/ipex-llm'
 | 
			
		||||
        ref: ${{ inputs.checkout-ref }}
 | 
			
		||||
    - name: docker login
 | 
			
		||||
      run: |
 | 
			
		||||
        docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
    - name: ipex-llm-serving-xpu
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        echo "####### ipex-llm-serving-xpu ########"
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        export image=intelanalytics/ipex-llm-serving-xpu
 | 
			
		||||
        cd docker/llm/serving/xpu/docker
 | 
			
		||||
        sudo docker build \
 | 
			
		||||
          --no-cache=true \
 | 
			
		||||
          --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
          --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
          --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
          -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
        # push docker image to public hub
 | 
			
		||||
        if [[ "${{ inputs.public }}" == "true" ]]; then
 | 
			
		||||
          sudo docker push ${image}:${TAG}
 | 
			
		||||
          # tag 'latest'
 | 
			
		||||
          sudo docker tag ${image}:${TAG} ${image}:latest
 | 
			
		||||
          sudo docker push ${image}:latest
 | 
			
		||||
          sudo docker rmi -f ${image}:${TAG} ${image}:latest
 | 
			
		||||
        else
 | 
			
		||||
          sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
  ipex-llm-serving-cpu:
 | 
			
		||||
    if: ${{ inputs.artifact == 'ipex-llm-serving-cpu' || inputs.artifact == 'all' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire, AVX512]
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      with:
 | 
			
		||||
        repository: 'intel-analytics/ipex-llm'
 | 
			
		||||
        ref: ${{ inputs.checkout-ref }}
 | 
			
		||||
    - name: docker login
 | 
			
		||||
      run: |
 | 
			
		||||
        docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
    - name: ipex-llm-serving-cpu
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        echo "####### ipex-llm-serving-cpu ########"
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        export image=intelanalytics/ipex-llm-serving-cpu
 | 
			
		||||
        cd docker/llm/serving/cpu/docker
 | 
			
		||||
        sudo docker build \
 | 
			
		||||
          --no-cache=true \
 | 
			
		||||
          --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
          --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
          --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
          -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
        # push docker image to public hub
 | 
			
		||||
        if [[ "${{ inputs.public }}" == "true" ]]; then
 | 
			
		||||
          sudo docker push ${image}:${TAG}
 | 
			
		||||
          # tag 'latest'
 | 
			
		||||
          sudo docker tag ${image}:${TAG} ${image}:latest
 | 
			
		||||
          sudo docker push ${image}:latest
 | 
			
		||||
          sudo docker rmi -f ${image}:${TAG} ${image}:latest
 | 
			
		||||
        else
 | 
			
		||||
          sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										317
									
								
								.github/workflows/manually_build_for_testing.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										317
									
								
								.github/workflows/manually_build_for_testing.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -1,317 +0,0 @@
 | 
			
		|||
name: Manually Build For Testing
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  workflow_dispatch:
 | 
			
		||||
    inputs:
 | 
			
		||||
      sha:
 | 
			
		||||
        description: 'commit id (SHA-1 hash)'
 | 
			
		||||
        required: true
 | 
			
		||||
        type: string
 | 
			
		||||
      artifact:
 | 
			
		||||
        description: 'select which job to run("all" will make all jobs run)'
 | 
			
		||||
        required: true
 | 
			
		||||
        default: 'all'
 | 
			
		||||
        type: choice
 | 
			
		||||
        options:
 | 
			
		||||
        - all
 | 
			
		||||
        - ipex-llm-cpu
 | 
			
		||||
        - ipex-llm-xpu
 | 
			
		||||
        - ipex-llm-inference-cpp-xpu
 | 
			
		||||
        - ipex-llm-serving-cpu
 | 
			
		||||
        - ipex-llm-serving-xpu
 | 
			
		||||
        - ipex-llm-serving-xpu-tgi
 | 
			
		||||
        - ipex-llm-finetune-lora-cpu
 | 
			
		||||
        - ipex-llm-finetune-qlora-cpu
 | 
			
		||||
        - ipex-llm-finetune-qlora-cpu-k8s
 | 
			
		||||
        - ipex-llm-finetune-xpu
 | 
			
		||||
      tag:
 | 
			
		||||
        description: 'docker image tag (e.g. test)'
 | 
			
		||||
        required: true
 | 
			
		||||
        default: 'test'
 | 
			
		||||
        type: string
 | 
			
		||||
 | 
			
		||||
env:
 | 
			
		||||
  TAG: ${{ github.event.inputs.tag }}
 | 
			
		||||
 | 
			
		||||
permissions:
 | 
			
		||||
  contents: read
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  ipex-llm-finetune-lora-cpu:
 | 
			
		||||
    if: ${{ github.event.inputs.artifact == 'ipex-llm-finetune-lora-cpu' || github.event.inputs.artifact == 'all' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      with:
 | 
			
		||||
        ref: ${{ github.event.inputs.sha }}
 | 
			
		||||
    - name: docker login
 | 
			
		||||
      run: |
 | 
			
		||||
        docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
    - name: ipex-llm-finetune-lora-cpu
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        echo "####### ipex-llm-finetune-lora-cpu ########"
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        export image=intelanalytics/ipex-llm-finetune-lora-cpu
 | 
			
		||||
        cd docker/llm/finetune/lora/cpu/docker
 | 
			
		||||
        sudo docker build \
 | 
			
		||||
          --no-cache=true \
 | 
			
		||||
          --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
          --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
          --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
          -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
        sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
 | 
			
		||||
  ipex-llm-finetune-qlora-cpu:
 | 
			
		||||
    if: ${{ github.event.inputs.artifact == 'ipex-llm-finetune-qlora-cpu' || github.event.inputs.artifact == 'all' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
 | 
			
		||||
    steps:
 | 
			
		||||
      - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          ref: ${{ github.event.inputs.sha }}
 | 
			
		||||
      - name: docker login
 | 
			
		||||
        run: |
 | 
			
		||||
          docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
      - name: ipex-llm-finetune-qlora-cpu
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "##############################################################"
 | 
			
		||||
          echo "####### ipex-llm-finetune-qlora-cpu ########"
 | 
			
		||||
          echo "##############################################################"
 | 
			
		||||
          export image=intelanalytics/ipex-llm-finetune-qlora-cpu
 | 
			
		||||
          cd docker/llm/finetune/qlora/cpu/docker
 | 
			
		||||
          sudo docker build \
 | 
			
		||||
            --no-cache=true \
 | 
			
		||||
            --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
            --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
            --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
            -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
          sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
 | 
			
		||||
  ipex-llm-finetune-qlora-cpu-k8s:
 | 
			
		||||
    if: ${{ inputs.artifact == 'ipex-llm-finetune-qlora-cpu-k8s' || inputs.artifact == 'all' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
 | 
			
		||||
    steps:
 | 
			
		||||
      - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      - name: docker login
 | 
			
		||||
        run: |
 | 
			
		||||
          docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
      - name: ipex-llm-finetune-qlora-cpu-k8s
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "##############################################################"
 | 
			
		||||
          echo "####### ipex-llm-finetune-qlora-cpu-k8s ########"
 | 
			
		||||
          echo "##############################################################"
 | 
			
		||||
          export image=intelanalytics/ipex-llm-finetune-qlora-cpu-k8s
 | 
			
		||||
          cd docker/llm/finetune/qlora/cpu/docker
 | 
			
		||||
          sudo docker build \
 | 
			
		||||
            --no-cache=true \
 | 
			
		||||
            --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
            --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
            --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
            -t ${image}:${TAG} -f ./Dockerfile.k8s .
 | 
			
		||||
          sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
          sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
 | 
			
		||||
  ipex-llm-finetune-xpu:
 | 
			
		||||
    if: ${{ github.event.inputs.artifact == 'ipex-llm-finetune-xpu' || github.event.inputs.artifact == 'all' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      with:
 | 
			
		||||
        ref: ${{ github.event.inputs.sha }}
 | 
			
		||||
    - name: docker login
 | 
			
		||||
      run: |
 | 
			
		||||
        docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
    - name: ipex-llm-finetune-xpu
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        echo "####### ipex-llm-finetune-xpu ########"
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        export image=intelanalytics/ipex-llm-finetune-xpu
 | 
			
		||||
        cd docker/llm/finetune/xpu
 | 
			
		||||
        sudo docker build \
 | 
			
		||||
          --no-cache=true \
 | 
			
		||||
          --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
          --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
          --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
          -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
        sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
 | 
			
		||||
  ipex-llm-xpu:
 | 
			
		||||
    if: ${{ github.event.inputs.artifact == 'ipex-llm-xpu' || github.event.inputs.artifact == 'all' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
    
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      with:
 | 
			
		||||
        ref: ${{ github.event.inputs.sha }}
 | 
			
		||||
    - name: docker login
 | 
			
		||||
      run: |
 | 
			
		||||
        docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
    - name: ipex-llm-xpu
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        echo "####### ipex-llm-xpu ########"
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        export image=intelanalytics/ipex-llm-xpu
 | 
			
		||||
        cd docker/llm/inference/xpu/docker
 | 
			
		||||
        sudo docker build \
 | 
			
		||||
          --no-cache=true \
 | 
			
		||||
          --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
          --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
          --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
          -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
        sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
 | 
			
		||||
  ipex-llm-serving-xpu-tgi:
 | 
			
		||||
    if: ${{ github.event.inputs.artifact == 'ipex-llm-serving-xpu-tgi' || github.event.inputs.artifact == 'all' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
    
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      with:
 | 
			
		||||
        ref: ${{ github.event.inputs.sha }}
 | 
			
		||||
    - name: docker login
 | 
			
		||||
      run: |
 | 
			
		||||
        docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
    - name: ipex-llm-serving-xpu-tgi
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        echo "####### ipex-llm-serving-xpu-tgi ########"
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        export image=intelanalytics/ipex-llm-serving-xpu-tgi
 | 
			
		||||
        cd docker/llm/serving/xpu-tgi
 | 
			
		||||
        sudo docker build \
 | 
			
		||||
          --no-cache=true \
 | 
			
		||||
          --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
          --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
          --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
          -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
        sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
 | 
			
		||||
  ipex-llm-inference-cpp-xpu:
 | 
			
		||||
    if: ${{ github.event.inputs.artifact == 'ipex-llm-inference-cpp-xpu' || github.event.inputs.artifact == 'all' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
    
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      with:
 | 
			
		||||
        ref: ${{ github.event.inputs.sha }}
 | 
			
		||||
    - name: docker login
 | 
			
		||||
      run: |
 | 
			
		||||
        docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
    - name: ipex-llm-inference-cpp-xpu
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        echo "####### ipex-llm-inference-cpp-xpu ########"
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        export image=intelanalytics/ipex-llm-inference-cpp-xpu
 | 
			
		||||
        cd docker/llm/inference-cpp/
 | 
			
		||||
        sudo docker build \
 | 
			
		||||
          --no-cache=true \
 | 
			
		||||
          --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
          --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
          --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
          -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
        sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
 | 
			
		||||
  ipex-llm-cpu:
 | 
			
		||||
    if: ${{ github.event.inputs.artifact == 'ipex-llm-cpu' || github.event.inputs.artifact == 'all' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      with:
 | 
			
		||||
        ref: ${{ github.event.inputs.sha }}
 | 
			
		||||
    - name: docker login
 | 
			
		||||
      run: |
 | 
			
		||||
        docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
    - name: ipex-llm-cpu
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        echo "####### ipex-llm-cpu ########"
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        export image=intelanalytics/ipex-llm-cpu
 | 
			
		||||
        cd docker/llm/inference/cpu/docker
 | 
			
		||||
        sudo docker build \
 | 
			
		||||
          --no-cache=true \
 | 
			
		||||
          --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
          --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
          --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
          -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
        sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
 | 
			
		||||
  ipex-llm-serving-xpu:
 | 
			
		||||
    if: ${{ github.event.inputs.artifact == 'ipex-llm-serving-xpu' || github.event.inputs.artifact == 'all' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire]
 | 
			
		||||
    
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      with:
 | 
			
		||||
        ref: ${{ github.event.inputs.sha }}
 | 
			
		||||
    - name: docker login
 | 
			
		||||
      run: |
 | 
			
		||||
        docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
    - name: ipex-llm-serving-xpu
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        echo "####### ipex-llm-serving-xpu ########"
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        export image=intelanalytics/ipex-llm-serving-xpu
 | 
			
		||||
        cd docker/llm/serving/xpu/docker
 | 
			
		||||
        sudo docker build \
 | 
			
		||||
          --no-cache=true \
 | 
			
		||||
          --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
          --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
          --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
          -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
        sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
 | 
			
		||||
  ipex-llm-serving-cpu:
 | 
			
		||||
    if: ${{ github.event.inputs.artifact == 'ipex-llm-serving-cpu' || github.event.inputs.artifact == 'all' }}
 | 
			
		||||
    runs-on: [self-hosted, Shire, AVX512]
 | 
			
		||||
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      with:
 | 
			
		||||
        ref: ${{ github.event.inputs.sha }}
 | 
			
		||||
    - name: docker login
 | 
			
		||||
      run: |
 | 
			
		||||
        docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
 | 
			
		||||
    - name: ipex-llm-serving-cpu
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        echo "####### ipex-llm-serving-cpu ########"
 | 
			
		||||
        echo "##############################################################"
 | 
			
		||||
        export image=intelanalytics/ipex-llm-serving-cpu
 | 
			
		||||
        cd docker/llm/serving/cpu/docker
 | 
			
		||||
        sudo docker build \
 | 
			
		||||
          --no-cache=true \
 | 
			
		||||
          --build-arg http_proxy=${HTTP_PROXY} \
 | 
			
		||||
          --build-arg https_proxy=${HTTPS_PROXY} \
 | 
			
		||||
          --build-arg no_proxy=${NO_PROXY} \
 | 
			
		||||
          -t ${image}:${TAG} -f ./Dockerfile .
 | 
			
		||||
        sudo docker tag ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        sudo docker push 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
        sudo docker rmi -f ${image}:${TAG} 10.239.45.10/arda/${image}:${TAG}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										56
									
								
								.github/workflows/release-ipex-llm.yaml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										56
									
								
								.github/workflows/release-ipex-llm.yaml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -1,56 +0,0 @@
 | 
			
		|||
name: Release IPEX-LLM Pypi
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  workflow_dispatch:
 | 
			
		||||
    inputs:
 | 
			
		||||
      version:
 | 
			
		||||
        description: 'ipex-llm version (e.g. 2.2.0b1)'
 | 
			
		||||
        required: true
 | 
			
		||||
        default: '2.2.0b0'
 | 
			
		||||
        type: string
 | 
			
		||||
 | 
			
		||||
permissions:
 | 
			
		||||
  contents: read
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  
 | 
			
		||||
  llm-cpp-build:
 | 
			
		||||
    uses: ./.github/workflows/llm-binary-build.yml
 | 
			
		||||
 | 
			
		||||
  ipex-llm-release:
 | 
			
		||||
    if: ${{ github.event_name == 'workflow_dispatch' }} 
 | 
			
		||||
    runs-on: [self-hosted, Bree]
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
    
 | 
			
		||||
    - name: set release version
 | 
			
		||||
      env:
 | 
			
		||||
        DEFAULT_VERSION: '2.2.0b0'
 | 
			
		||||
      run: |
 | 
			
		||||
        echo "RELEASE_VERSION=${{ github.event.inputs.version || env.DEFAULT_VERSION }}" >> $GITHUB_ENV
 | 
			
		||||
 | 
			
		||||
    - name: Set up Python 
 | 
			
		||||
      uses: actions/setup-python@v2
 | 
			
		||||
      with:
 | 
			
		||||
        python-version: '3.7.15'
 | 
			
		||||
 | 
			
		||||
    - name: Install dependencies
 | 
			
		||||
      run: |
 | 
			
		||||
        python -m pip install --upgrade pip
 | 
			
		||||
        pip install build
 | 
			
		||||
        pip install wheel
 | 
			
		||||
        pip install twine
 | 
			
		||||
 | 
			
		||||
    - name: Download llm binary
 | 
			
		||||
      uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
 | 
			
		||||
    - name: Build package
 | 
			
		||||
      run: |
 | 
			
		||||
        echo ${RELEASE_VERSION}
 | 
			
		||||
 | 
			
		||||
        ## windows ##
 | 
			
		||||
        bash python/llm/dev/release_default_windows.sh ${RELEASE_VERSION} true
 | 
			
		||||
 | 
			
		||||
        ## linux ##
 | 
			
		||||
        bash python/llm/dev/release_default_linux.sh ${RELEASE_VERSION} true
 | 
			
		||||
							
								
								
									
										78
									
								
								.github/workflows/release-pypi.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										78
									
								
								.github/workflows/release-pypi.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -1,78 +0,0 @@
 | 
			
		|||
name: Nightly Release
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  # pull_request:
 | 
			
		||||
  #   branches: [ main ]
 | 
			
		||||
  #   paths:
 | 
			
		||||
  #     - '.github/workflows/nightly_build.yml'
 | 
			
		||||
  # schedule:
 | 
			
		||||
  #   - cron: '00 15 * * *'  # GMT time, 15:00 GMT == 23:00 China
 | 
			
		||||
  # workflow_dispatch:
 | 
			
		||||
  workflow_call:
 | 
			
		||||
    inputs:
 | 
			
		||||
      checkout-ref:
 | 
			
		||||
        description: 'ref for checking out, including branch, tag or SHA'
 | 
			
		||||
        required: false
 | 
			
		||||
        type: string
 | 
			
		||||
      release-version:
 | 
			
		||||
        description: 'ipex-llm version (e.g. 2.2.0b1)'
 | 
			
		||||
        required: false
 | 
			
		||||
        type: string
 | 
			
		||||
      schedule-event:
 | 
			
		||||
        description: 'whether it is triggered by schedule event'
 | 
			
		||||
        required: true
 | 
			
		||||
        type: boolean
 | 
			
		||||
permissions:
 | 
			
		||||
  contents: read
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
 | 
			
		||||
  llm-cpp-build:
 | 
			
		||||
    uses: ./.github/workflows/llm-binary-build.yml
 | 
			
		||||
 | 
			
		||||
  ipex-llm-build:
 | 
			
		||||
    # python build can only be published once a day, please do not publish it manually
 | 
			
		||||
    # if: ${{ github.event.schedule || github.event_name == 'workflow_dispatch' }} 
 | 
			
		||||
    runs-on: [self-hosted, Bree]
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    steps:
 | 
			
		||||
    - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
 | 
			
		||||
      with:
 | 
			
		||||
        repository: 'intel-analytics/ipex-llm'
 | 
			
		||||
        ref: ${{ inputs.checkout-ref }}
 | 
			
		||||
 | 
			
		||||
    - name: Set up Python 
 | 
			
		||||
      uses: actions/setup-python@v2
 | 
			
		||||
      with:
 | 
			
		||||
        python-version: '3.7.15'
 | 
			
		||||
 | 
			
		||||
    - name: Install dependencies
 | 
			
		||||
      run: |
 | 
			
		||||
        python -m pip install --upgrade pip
 | 
			
		||||
        pip install build
 | 
			
		||||
        pip install wheel
 | 
			
		||||
        pip install twine
 | 
			
		||||
 | 
			
		||||
    - name: Download llm binary
 | 
			
		||||
      uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
 | 
			
		||||
    - name: set release version
 | 
			
		||||
      run: |
 | 
			
		||||
        if [[ "${{ inputs.schedule-event }}" == "true" ]]; then
 | 
			
		||||
          export TIMESTAMP=`date '+%Y%m%d'`
 | 
			
		||||
          export PYPI_VERSION=2.2.0
 | 
			
		||||
          export RELEASE_VERSION=${PYPI_VERSION}b${TIMESTAMP}
 | 
			
		||||
        else
 | 
			
		||||
          export RELEASE_VERSION=${{ inputs.release-version }}
 | 
			
		||||
        fi
 | 
			
		||||
        echo "RELEASE_VERSION=${RELEASE_VERSION}" >> $GITHUB_ENV
 | 
			
		||||
 | 
			
		||||
    - name: Build package
 | 
			
		||||
      run: |
 | 
			
		||||
        echo ${RELEASE_VERSION}
 | 
			
		||||
 | 
			
		||||
        ## windows ##
 | 
			
		||||
        bash python/llm/dev/release_default_windows.sh ${RELEASE_VERSION} true
 | 
			
		||||
 | 
			
		||||
        ## linux ##
 | 
			
		||||
        bash python/llm/dev/release_default_linux.sh ${RELEASE_VERSION} true
 | 
			
		||||
		Loading…
	
		Reference in a new issue