[LLM] Merge windows & linux nightly test (#8756)
* fix download statement * add check before build wheel * use curl to upload files * windows unittest won't upload converted model * split llm-cli test into windows & linux versions * update tempdir create way * fix nightly converted model name * windows llm-cli starcoder test temply disabled * remove taskset dependency * rename llm_unit_tests_linux to llm_unit_tests
This commit is contained in:
		
							parent
							
								
									dcadd09154
								
							
						
					
					
						commit
						c94bdd3791
					
				
					 7 changed files with 249 additions and 180 deletions
				
			
		
							
								
								
									
										9
									
								
								.github/actions/llm/setup-llm-env/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										9
									
								
								.github/actions/llm/setup-llm-env/action.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -8,7 +8,14 @@ runs:
 | 
			
		|||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        pip install requests
 | 
			
		||||
        bash python/llm/dev/release_default_linux.sh default false
 | 
			
		||||
        if [[ ${{ runner.os }} == 'Linux' ]]; then
 | 
			
		||||
          bash python/llm/dev/release_default_linux.sh default false
 | 
			
		||||
        elif [[ ${{ runner.os }} == 'Windows' ]]; then
 | 
			
		||||
          bash python/llm/dev/release_default_windows.sh default false
 | 
			
		||||
        else
 | 
			
		||||
          echo "Runner os is not supported!!!!!"
 | 
			
		||||
          exit 1
 | 
			
		||||
        fi
 | 
			
		||||
        whl_name=$(ls python/llm/dist)
 | 
			
		||||
        pip install -i https://pypi.python.org/simple --force-reinstall "python/llm/dist/${whl_name}[all]"
 | 
			
		||||
        pip install pytest
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -29,15 +29,15 @@ jobs:
 | 
			
		|||
      - name: Set model directories
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "ORIGIN_DIR=$(pwd)/../llm/origin-models" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "INT4_CKPT_DIR=$(pwd)/../llm/converted-models" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "ORIGIN_DIR=${{ github.workspace }}/../llm/origin-models" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "INT4_CKPT_DIR=${{ github.workspace }}/../llm/converted-models" >> "$GITHUB_ENV"
 | 
			
		||||
      - name: Create model directories
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          if [ ! -d $ORIGIN_DIR];then
 | 
			
		||||
          if [ ! -d $ORIGIN_DIR ]; then
 | 
			
		||||
            mkdir -p $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $INT4_CKPT_DIR];then
 | 
			
		||||
          if [ ! -d $INT4_CKPT_DIR ]; then
 | 
			
		||||
            mkdir -p $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
      - name: Set environment variables
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										80
									
								
								.github/workflows/llm-nightly-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										80
									
								
								.github/workflows/llm-nightly-test.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -23,34 +23,55 @@ on:
 | 
			
		|||
jobs:
 | 
			
		||||
  llm-cpp-build:
 | 
			
		||||
    uses: ./.github/workflows/llm-binary-build.yml
 | 
			
		||||
  llm-nightly-convert-test-avx512:
 | 
			
		||||
    runs-on: [self-hosted, llm, AVX512, ubuntu-20.04-lts]
 | 
			
		||||
  llm-nightly-convert-test:
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        python-version: ["3.9"]
 | 
			
		||||
        include:
 | 
			
		||||
          - os: windows
 | 
			
		||||
            instruction: avx2
 | 
			
		||||
            python-version: "3.9"
 | 
			
		||||
          - os: ubuntu-20.04-lts
 | 
			
		||||
            instruction: avx512
 | 
			
		||||
            python-version: "3.9"
 | 
			
		||||
    runs-on: [self-hosted, llm, "${{matrix.instruction}}", "${{matrix.os}}"]
 | 
			
		||||
    env:
 | 
			
		||||
      ORIGIN_DIR: ./llm/models
 | 
			
		||||
      LLAMA_ORIGIN_PATH: ./llm/models/llama-7b-hf
 | 
			
		||||
      GPTNEOX_ORIGIN_PATH: ./llm/models/gptneox-7b-redpajama-bf16
 | 
			
		||||
      BLOOM_ORIGIN_PATH: ./llm/models/bloomz-7b1
 | 
			
		||||
      STARCODER_ORIGIN_PATH: ./llm/models/gpt_bigcode-santacoder
 | 
			
		||||
 | 
			
		||||
      INT4_CKPT_DIR: ./llm/ggml-actions/nightly
 | 
			
		||||
      LLAMA_INT4_CKPT_PATH: ./llm/ggml-actions/nightly/bigdl_llm_llama_q4_0.bin
 | 
			
		||||
      GPTNEOX_INT4_CKPT_PATH: ./llm/ggml-actions/nightly/bigdl_llm_gptneox_q4_0.bin
 | 
			
		||||
      BLOOM_INT4_CKPT_PATH: ./llm/ggml-actions/nightly/bigdl_llm_bloom_q4_0.bin
 | 
			
		||||
      STARCODER_INT4_CKPT_PATH: ./llm/ggml-actions/nightly/bigdl_llm_starcoder_q4_0.bin
 | 
			
		||||
 | 
			
		||||
      ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Set model directories
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "ORIGIN_DIR=$(pwd)/../llm/origin-models" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "INT4_CKPT_DIR=$(pwd)/../llm/nightly-converted-models" >> "$GITHUB_ENV"
 | 
			
		||||
      - name: Create model directories
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          if [ ! -d $ORIGIN_DIR ]; then
 | 
			
		||||
            mkdir -p $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $INT4_CKPT_DIR ]; then
 | 
			
		||||
            mkdir -p $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
      - name: Set environment variables
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "LLAMA_ORIGIN_PATH=${ORIGIN_DIR}/llama-7b-hf" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "GPTNEOX_ORIGIN_PATH=${ORIGIN_DIR}/gptneox-7b-redpajama-bf16" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "BLOOM_ORIGIN_PATH=${ORIGIN_DIR}/bloomz-7b1" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "STARCODER_ORIGIN_PATH=${ORIGIN_DIR}/gpt_bigcode-santacoder" >> "$GITHUB_ENV"
 | 
			
		||||
 | 
			
		||||
          echo "LLAMA_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_llama_q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "GPTNEOX_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_gptneox_q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "BLOOM_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_bloom_q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "STARCODER_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_starcoder_q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
      - uses: actions/checkout@v3
 | 
			
		||||
      - name: Set up Python ${{ matrix.python-version }}
 | 
			
		||||
        uses: actions/setup-python@v4
 | 
			
		||||
        with:
 | 
			
		||||
          python-version: ${{ matrix.python-version }}
 | 
			
		||||
      - name: Install dependencies
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          python -m pip install --upgrade pip
 | 
			
		||||
          python -m pip install --upgrade setuptools==58.0.4
 | 
			
		||||
| 
						 | 
				
			
			@ -66,19 +87,24 @@ jobs:
 | 
			
		|||
        uses: ./.github/actions/llm/convert-test
 | 
			
		||||
 | 
			
		||||
      - name: Upload ckpt to ftp
 | 
			
		||||
        shell: bash
 | 
			
		||||
        if: runner.os == 'Linux' && github.event_name == 'schedule'
 | 
			
		||||
        run: |
 | 
			
		||||
          apt-get update && apt install -y tnftp
 | 
			
		||||
          tnftp -u ${LLM_FTP_URL}/${INT4_CKPT_DIR:1}/bigdl_llm_llama_7b_q4_0.bin $LLAMA_INT4_CKPT_PATH
 | 
			
		||||
          tnftp -u ${LLM_FTP_URL}/${INT4_CKPT_DIR:1}/bigdl_llm_redpajama_7b_q4_0.bin $GPTNEOX_INT4_CKPT_PATH
 | 
			
		||||
          tnftp -u ${LLM_FTP_URL}/${INT4_CKPT_DIR:1}/bigdl_llm_bloom_7b_q4_0.bin $BLOOM_INT4_CKPT_PATH
 | 
			
		||||
          tnftp -u ${LLM_FTP_URL}/${INT4_CKPT_DIR:1}/bigdl_llm_santacoder_1b_q4_0.bin $STARCODER_INT4_CKPT_PATH
 | 
			
		||||
          curl -T $LLAMA_INT4_CKPT_PATH ${LLM_FTP_URL}/llm/ggml-actions/nightly/bigdl_llm_llama_7b_q4_0.bin
 | 
			
		||||
          curl -T $GPTNEOX_INT4_CKPT_PATH ${LLM_FTP_URL}/llm/ggml-actions/nightly/bigdl_llm_redpajama_7b_q4_0.bin
 | 
			
		||||
          curl -T $BLOOM_INT4_CKPT_PATH ${LLM_FTP_URL}/llm/ggml-actions/nightly/bigdl_llm_bloom_7b_q4_0.bin
 | 
			
		||||
          curl -T $STARCODER_INT4_CKPT_PATH ${LLM_FTP_URL}/llm/ggml-actions/nightly/bigdl_llm_santacoder_1b_q4_0.bin
 | 
			
		||||
      - name: Delete ckpt
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          rm -rf $LLAMA_INT4_CKPT_PATH
 | 
			
		||||
          rm -rf $GPTNEOX_INT4_CKPT_PATH
 | 
			
		||||
          rm -rf $BLOOM_INT4_CKPT_PATH
 | 
			
		||||
          rm -rf $STARCODER_INT4_CKPT_PATH
 | 
			
		||||
 | 
			
		||||
  llm-inference-test-on-linux:
 | 
			
		||||
    needs: llm-nightly-convert-test-avx512
 | 
			
		||||
    uses: ./.github/workflows/llm_unit_tests_linux.yml
 | 
			
		||||
  llm-inference-test-on-windows:
 | 
			
		||||
    needs: llm-nightly-convert-test-avx512
 | 
			
		||||
    uses: ./.github/workflows/llm-nightly-test-windows.yml
 | 
			
		||||
  llm-unit-tests:
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    uses: ./.github/workflows/llm_unit_tests.yml
 | 
			
		||||
  llm-example-test:
 | 
			
		||||
    needs: llm-nightly-convert-test-avx512
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    uses: ./.github/workflows/llm_example_tests.yml
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										171
									
								
								.github/workflows/llm_unit_tests.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										171
									
								
								.github/workflows/llm_unit_tests.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,171 @@
 | 
			
		|||
name: LLM Unit Tests
 | 
			
		||||
 | 
			
		||||
# Cancel previous runs in the PR when you push new commits
 | 
			
		||||
concurrency:
 | 
			
		||||
  group: ${{ github.workflow }}-llm-unittest-${{ github.event.pull_request.number || github.run_id }}
 | 
			
		||||
  cancel-in-progress: true
 | 
			
		||||
 | 
			
		||||
# Controls when the action will run.
 | 
			
		||||
on:
 | 
			
		||||
  # Triggers the workflow on push or pull request events but only for the main branch
 | 
			
		||||
  push:
 | 
			
		||||
    branches: [main]
 | 
			
		||||
    paths:
 | 
			
		||||
      - "python/llm/**"
 | 
			
		||||
      - ".github/workflows/llm_unit_tests.yml"
 | 
			
		||||
      - ".github/workflows/llm-binary-build.yml"
 | 
			
		||||
      - ".github/actions/llm/setup-llm-env/action.yml"
 | 
			
		||||
      - ".github/actions/llm/remove-llm-env/action.yml"
 | 
			
		||||
      - ".github/actions/llm/cli-test-linux/action.yml"
 | 
			
		||||
      - ".github/actions/llm/cli-test-windows/action.yml"
 | 
			
		||||
      - ".github/actions/llm/download-llm-binary/action.yml"
 | 
			
		||||
  pull_request:
 | 
			
		||||
    branches: [main]
 | 
			
		||||
    paths:
 | 
			
		||||
      - "python/llm/**"
 | 
			
		||||
      - ".github/workflows/llm_unit_tests.yml"
 | 
			
		||||
      - ".github/workflows/llm-binary-build.yml"
 | 
			
		||||
      - ".github/actions/llm/setup-llm-env/action.yml"
 | 
			
		||||
      - ".github/actions/llm/remove-llm-env/action.yml"
 | 
			
		||||
      - ".github/actions/llm/cli-test-linux/action.yml"
 | 
			
		||||
      - ".github/actions/llm/cli-test-windows/action.yml"
 | 
			
		||||
      - ".github/actions/llm/download-llm-binary/action.yml"
 | 
			
		||||
  workflow_dispatch:
 | 
			
		||||
  workflow_call:
 | 
			
		||||
 | 
			
		||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
 | 
			
		||||
jobs:
 | 
			
		||||
  llm-cpp-build:
 | 
			
		||||
    uses: ./.github/workflows/llm-binary-build.yml
 | 
			
		||||
  llm-unit-test:
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        include:
 | 
			
		||||
          - os: windows
 | 
			
		||||
            instruction: avx2
 | 
			
		||||
            python-version: "3.9"
 | 
			
		||||
          - os: ubuntu-20.04-lts
 | 
			
		||||
            instruction: avx512
 | 
			
		||||
            python-version: "3.9"
 | 
			
		||||
    runs-on: [self-hosted, llm, "${{matrix.instruction}}", "${{matrix.os}}"]
 | 
			
		||||
    env:
 | 
			
		||||
      THREAD_NUM: 24
 | 
			
		||||
      ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Set model directories
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "DATASET_DIR=${{ github.workspace }}/../llm/datasets" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "ORIGIN_DIR=${{ github.workspace }}/../llm/origin-models" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "INT4_CKPT_DIR=${{ github.workspace }}/../llm/converted-models" >> "$GITHUB_ENV"
 | 
			
		||||
      - name: Create model directories
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          if [ ! -d $DATASET_DIR ]; then
 | 
			
		||||
            mkdir -p $DATASET_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $ORIGIN_DIR ]; then
 | 
			
		||||
            mkdir -p $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $INT4_CKPT_DIR ]; then
 | 
			
		||||
            mkdir -p $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
      - name: Set environment variables
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "SPEECH_DATASET_PATH=${DATASET_DIR}/librispeech_asr_dummy" >> "$GITHUB_ENV"
 | 
			
		||||
 | 
			
		||||
          echo "ORIGINAL_CHATGLM2_6B_PATH=${ORIGIN_DIR}/chatglm2-6b" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "ORIGINAL_REPLIT_CODE_PATH=${ORIGIN_DIR}/replit-code-v1-3b" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "ORIGINAL_WHISPER_TINY_PATH=${ORIGIN_DIR}/whisper-tiny" >> "$GITHUB_ENV"
 | 
			
		||||
 | 
			
		||||
          echo "LLAMA_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_llama_7b_q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "GPTNEOX_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_redpajama_7b_q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "BLOOM_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_bloom_7b_q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "STARCODER_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_santacoder_1b_q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
          echo "CHATGLM_INT4_CKPT_PATH=${INT4_CKPT_DIR}/chatglm2-6b-q4_0.bin" >> "$GITHUB_ENV"
 | 
			
		||||
      - uses: actions/checkout@v3
 | 
			
		||||
      - name: Set up Python ${{ matrix.python-version }}
 | 
			
		||||
        uses: actions/setup-python@v4
 | 
			
		||||
        with:
 | 
			
		||||
          python-version: ${{ matrix.python-version }}
 | 
			
		||||
      - name: Install dependencies
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          python -m pip install --upgrade pip
 | 
			
		||||
          python -m pip install --upgrade setuptools==58.0.4
 | 
			
		||||
          python -m pip install --upgrade wheel
 | 
			
		||||
 | 
			
		||||
      - name: Download llm binary
 | 
			
		||||
        uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM install (all) test
 | 
			
		||||
        uses: ./.github/actions/llm/setup-llm-env
 | 
			
		||||
 | 
			
		||||
      - name: Download ckpt & original models
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          if [ ! -d $LLAMA_INT4_CKPT_PATH ]; then
 | 
			
		||||
            echo "Directory $LLAMA_INT4_CKPT_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            echo "wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/bigdl_llm_llama_7b_q4_0.bin -P $INT4_CKPT_DIR"
 | 
			
		||||
            wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/bigdl_llm_llama_7b_q4_0.bin -P $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $GPTNEOX_INT4_CKPT_PATH ]; then
 | 
			
		||||
            echo "Directory $GPTNEOX_INT4_CKPT_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/bigdl_llm_redpajama_7b_q4_0.bin -P $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $BLOOM_INT4_CKPT_PATH ]; then
 | 
			
		||||
            echo "Directory $BLOOM_INT4_CKPT_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/bigdl_llm_bloom_7b_q4_0.bin -P $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $STARCODER_INT4_CKPT_PATH ]; then
 | 
			
		||||
            echo "Directory $STARCODER_INT4_CKPT_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/bigdl_llm_santacoder_1b_q4_0.bin -P $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          # if [ ! -d $CHATGLM_INT4_CKPT_PATH ]; then
 | 
			
		||||
          #   echo "Directory $CHATGLM_INT4_CKPT_PATH not found. Downloading from FTP server..."
 | 
			
		||||
          #   wget --no-verbose $LLM_FTP_URL/llm/ggml-actions/stable/chatglm2-6b-q4_0.bin -P $INT4_CKPT_DIR
 | 
			
		||||
          # fi
 | 
			
		||||
          if [ ! -d $ORIGINAL_CHATGLM2_6B_PATH ]; then
 | 
			
		||||
            echo "Directory $ORIGINAL_CHATGLM2_6B_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            echo "wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/chatglm2-6b -P $ORIGIN_DIR"            
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/chatglm2-6b -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $ORIGINAL_REPLIT_CODE_PATH ]; then
 | 
			
		||||
            echo "Directory $ORIGINAL_REPLIT_CODE_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            echo "wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/replit-code-v1-3b -P $ORIGIN_DIR"
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/replit-code-v1-3b -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $ORIGINAL_WHISPER_TINY_PATH ]; then
 | 
			
		||||
            echo "Directory $ORIGINAL_WHISPER_TINY_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            echo "wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/whisper-tiny -P $ORIGIN_DIR"
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/whisper-tiny -P $ORIGIN_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $SPEECH_DATASET_PATH ]; then
 | 
			
		||||
            echo "Directory $SPEECH_DATASET_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            echo "wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/librispeech_asr_dummy -P $DATASET_DIR"
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/librispeech_asr_dummy -P $DATASET_DIR
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM cli test (Linux)
 | 
			
		||||
        if: runner.os == 'Linux' 
 | 
			
		||||
        uses: ./.github/actions/llm/cli-test-linux
 | 
			
		||||
      - name: Run LLM cli test (Windows)
 | 
			
		||||
        if: runner.os == 'Windows' 
 | 
			
		||||
        uses: ./.github/actions/llm/cli-test-windows
 | 
			
		||||
        
 | 
			
		||||
      - name: Run LLM inference test
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          python -m pip install einops datasets librosa
 | 
			
		||||
          bash python/llm/test/run-llm-inference-tests.sh
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM langchain test
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          pip install -U langchain==0.0.184
 | 
			
		||||
          pip install -U chromadb==0.3.25
 | 
			
		||||
          pip install -U typing_extensions==4.5.0
 | 
			
		||||
          bash python/llm/test/run-llm-langchain-tests.sh
 | 
			
		||||
							
								
								
									
										131
									
								
								.github/workflows/llm_unit_tests_linux.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										131
									
								
								.github/workflows/llm_unit_tests_linux.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -1,131 +0,0 @@
 | 
			
		|||
name: LLM Unit Tests on Linux
 | 
			
		||||
 | 
			
		||||
# Cancel previous runs in the PR when you push new commits
 | 
			
		||||
concurrency:
 | 
			
		||||
  group: ${{ github.workflow }}-llm-linux-unittest-${{ github.event.pull_request.number || github.run_id }}
 | 
			
		||||
  cancel-in-progress: true
 | 
			
		||||
 | 
			
		||||
# Controls when the action will run.
 | 
			
		||||
on:
 | 
			
		||||
  # Triggers the workflow on push or pull request events but only for the main branch
 | 
			
		||||
  push:
 | 
			
		||||
    branches: [main]
 | 
			
		||||
    paths:
 | 
			
		||||
      - "python/llm/**"
 | 
			
		||||
      - ".github/workflows/llm_unit_tests_linux.yml"
 | 
			
		||||
      - ".github/workflows/llm-binary-build.yml"
 | 
			
		||||
      - ".github/actions/llm/setup-llm-env/action.yml"
 | 
			
		||||
      - ".github/actions/llm/remove-llm-env/action.yml"
 | 
			
		||||
      - ".github/actions/llm/cli-test/action.yml"
 | 
			
		||||
      - ".github/actions/llm/inference-test/action.yml"
 | 
			
		||||
      - ".github/actions/llm/langchain-test/action.yml"
 | 
			
		||||
      - ".github/actions/llm/download-llm-binary/action.yml"
 | 
			
		||||
  pull_request:
 | 
			
		||||
    branches: [main]
 | 
			
		||||
    paths:
 | 
			
		||||
      - "python/llm/**"
 | 
			
		||||
      - ".github/workflows/llm_unit_tests_linux.yml"
 | 
			
		||||
      - ".github/workflows/llm-binary-build.yml"
 | 
			
		||||
      - ".github/actions/llm/setup-llm-env/action.yml"
 | 
			
		||||
      - ".github/actions/llm/remove-llm-env/action.yml"
 | 
			
		||||
      - ".github/actions/llm/cli-test/action.yml"
 | 
			
		||||
      - ".github/actions/llm/inference-test/action.yml"
 | 
			
		||||
      - ".github/actions/llm/langchain-test/action.yml"
 | 
			
		||||
      - ".github/actions/llm/download-llm-binary/action.yml"
 | 
			
		||||
  workflow_dispatch:
 | 
			
		||||
  workflow_call:
 | 
			
		||||
 | 
			
		||||
env:
 | 
			
		||||
  INT4_CKPT_DIR: ./llm/ggml-actions/stable
 | 
			
		||||
  LLAMA_INT4_CKPT_PATH: ./llm/ggml-actions/stable/bigdl_llm_llama_7b_q4_0.bin
 | 
			
		||||
  GPTNEOX_INT4_CKPT_PATH: ./llm/ggml-actions/stable/bigdl_llm_redpajama_7b_q4_0.bin
 | 
			
		||||
  BLOOM_INT4_CKPT_PATH: ./llm/ggml-actions/stable/bigdl_llm_bloom_7b_q4_0.bin
 | 
			
		||||
  STARCODER_INT4_CKPT_PATH: ./llm/ggml-actions/stable/bigdl_llm_santacoder_1b_q4_0.bin
 | 
			
		||||
  CHATGLM_INT4_CKPT_PATH: ./llm/ggml-actions/stable/chatglm2-6b-q4_0.bin
 | 
			
		||||
 | 
			
		||||
  LLM_DIR: ./llm
 | 
			
		||||
  ORIGINAL_CHATGLM2_6B_PATH: ./llm/chatglm2-6b/
 | 
			
		||||
  ORIGINAL_REPLIT_CODE_PATH: ./llm/replit-code-v1-3b/
 | 
			
		||||
  ORIGINAL_WHISPER_TINY_PATH: ./llm/whisper-tiny/
 | 
			
		||||
  SPEECH_DATASET_PATH: ./llm/datasets/librispeech_asr_dummy
 | 
			
		||||
 | 
			
		||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
 | 
			
		||||
jobs:
 | 
			
		||||
  llm-cpp-build:
 | 
			
		||||
    uses: ./.github/workflows/llm-binary-build.yml
 | 
			
		||||
  llm-unit-test-linux:
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        python-version: ["3.9"]
 | 
			
		||||
        instruction: ["AVX512"]
 | 
			
		||||
    runs-on: [self-hosted, llm, "${{matrix.instruction}}", ubuntu-20.04-lts]
 | 
			
		||||
    env:
 | 
			
		||||
      THREAD_NUM: 24
 | 
			
		||||
      ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
 | 
			
		||||
    steps:
 | 
			
		||||
      - uses: actions/checkout@v3
 | 
			
		||||
      - name: Set up Python ${{ matrix.python-version }}
 | 
			
		||||
        uses: actions/setup-python@v4
 | 
			
		||||
        with:
 | 
			
		||||
          python-version: ${{ matrix.python-version }}
 | 
			
		||||
      - name: Install dependencies
 | 
			
		||||
        run: |
 | 
			
		||||
          python -m pip install --upgrade pip
 | 
			
		||||
          python -m pip install --upgrade setuptools==58.0.4
 | 
			
		||||
          python -m pip install --upgrade wheel
 | 
			
		||||
 | 
			
		||||
      - name: Download llm binary
 | 
			
		||||
        uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM install (all) test
 | 
			
		||||
        uses: ./.github/actions/llm/setup-llm-env
 | 
			
		||||
 | 
			
		||||
      - name: Download ckpt & original models
 | 
			
		||||
        run: |
 | 
			
		||||
          if [ ! -d $LLAMA_INT4_CKPT_PATH ]; then
 | 
			
		||||
            echo "Directory $LLAMA_INT4_CKPT_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget --no-verbose $LLM_FTP_URL/${LLAMA_INT4_CKPT_PATH:2} -P $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $GPTNEOX_INT4_CKPT_PATH ]; then
 | 
			
		||||
            echo "Directory $GPTNEOX_INT4_CKPT_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget --no-verbose $LLM_FTP_URL/${GPTNEOX_INT4_CKPT_PATH:2} -P $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $BLOOM_INT4_CKPT_PATH ]; then
 | 
			
		||||
            echo "Directory $BLOOM_INT4_CKPT_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget --no-verbose $LLM_FTP_URL/${BLOOM_INT4_CKPT_PATH:2} -P $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $STARCODER_INT4_CKPT_PATH ]; then
 | 
			
		||||
            echo "Directory $STARCODER_INT4_CKPT_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget --no-verbose $LLM_FTP_URL/${STARCODER_INT4_CKPT_PATH:2} -P $INT4_CKPT_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          # if [ ! -d $CHATGLM_INT4_CKPT_PATH ]; then
 | 
			
		||||
          #   echo "Directory $CHATGLM_INT4_CKPT_PATH not found. Downloading from FTP server..."
 | 
			
		||||
          #   wget --no-verbose $LLM_FTP_URL/${CHATGLM_INT4_CKPT_PATH:2} -P $INT4_CKPT_DIR
 | 
			
		||||
          # fi
 | 
			
		||||
          if [ ! -d $ORIGINAL_CHATGLM2_6B_PATH ]; then
 | 
			
		||||
            echo "Directory $ORIGINAL_CHATGLM2_6B_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/${ORIGINAL_CHATGLM2_6B_PATH:2} -P $LLM_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $ORIGINAL_REPLIT_CODE_PATH ]; then
 | 
			
		||||
            echo "Directory $ORIGINAL_REPLIT_CODE_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/${ORIGINAL_REPLIT_CODE_PATH:2} -P $LLM_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $ORIGINAL_WHISPER_TINY_PATH ]; then
 | 
			
		||||
            echo "Directory $ORIGINAL_WHISPER_TINY_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/${ORIGINAL_WHISPER_TINY_PATH:2} -P $LLM_DIR
 | 
			
		||||
          fi
 | 
			
		||||
          if [ ! -d $SPEECH_DATASET_PATH ]; then
 | 
			
		||||
            echo "Directory $SPEECH_DATASET_PATH not found. Downloading from FTP server..."
 | 
			
		||||
            wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/${SPEECH_DATASET_PATH:2} -P $LLM_DIR
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM cli test
 | 
			
		||||
        uses: ./.github/actions/llm/cli-test
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM inference test
 | 
			
		||||
        uses: ./.github/actions/llm/inference-test
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM langchain test
 | 
			
		||||
        uses: ./.github/actions/llm/langchain-test
 | 
			
		||||
| 
						 | 
				
			
			@ -14,11 +14,11 @@
 | 
			
		|||
# limitations under the License.
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
import pytest
 | 
			
		||||
import os
 | 
			
		||||
import tempfile
 | 
			
		||||
from unittest import TestCase
 | 
			
		||||
import shutil
 | 
			
		||||
 | 
			
		||||
from bigdl.llm import llm_convert
 | 
			
		||||
from bigdl.llm.transformers import AutoModelForCausalLM
 | 
			
		||||
| 
						 | 
				
			
			@ -65,13 +65,11 @@ class TestConvertModel(TestCase):
 | 
			
		|||
        assert os.path.isfile(converted_model_path)
 | 
			
		||||
 | 
			
		||||
    def test_transformer_convert_llama(self):
 | 
			
		||||
        model = AutoModelForCausalLM.from_pretrained(llama_model_path, load_in_4bit=True)
 | 
			
		||||
        tempdir = tempfile.mkdtemp(dir=output_dir)
 | 
			
		||||
        model.save_pretrained(tempdir)
 | 
			
		||||
        model = AutoModelForCausalLM.load_low_bit(tempdir)
 | 
			
		||||
        assert model is not None
 | 
			
		||||
        import shutil
 | 
			
		||||
        shutil.rmtree(tempdir)
 | 
			
		||||
        with tempfile.TemporaryDirectory(dir=output_dir) as tempdir:
 | 
			
		||||
            model = AutoModelForCausalLM.from_pretrained(llama_model_path, load_in_4bit=True)
 | 
			
		||||
            model.save_pretrained(tempdir)
 | 
			
		||||
            newModel = AutoModelForCausalLM.load_low_bit(tempdir)
 | 
			
		||||
            assert newModel is not None
 | 
			
		||||
 | 
			
		||||
    def test_transformer_convert_llama_q5(self):
 | 
			
		||||
        model = AutoModelForCausalLM.from_pretrained(llama_model_path,
 | 
			
		||||
| 
						 | 
				
			
			@ -82,14 +80,12 @@ class TestConvertModel(TestCase):
 | 
			
		|||
                                                     load_in_low_bit="sym_int8")
 | 
			
		||||
 | 
			
		||||
    def test_transformer_convert_llama_save_load(self):
 | 
			
		||||
        model = AutoModelForCausalLM.from_pretrained(llama_model_path,
 | 
			
		||||
                                                     load_in_low_bit="asym_int4")
 | 
			
		||||
        tempdir = tempfile.mkdtemp(dir=output_dir)
 | 
			
		||||
        model.save_low_bit(tempdir)
 | 
			
		||||
        newModel = AutoModelForCausalLM.load_low_bit(tempdir)
 | 
			
		||||
        import shutil
 | 
			
		||||
        shutil.rmtree(tempdir)
 | 
			
		||||
 | 
			
		||||
        with tempfile.TemporaryDirectory(dir=output_dir) as tempdir:
 | 
			
		||||
            model = AutoModelForCausalLM.from_pretrained(llama_model_path,
 | 
			
		||||
                                                        load_in_low_bit="asym_int4")
 | 
			
		||||
            model.save_low_bit(tempdir)
 | 
			
		||||
            newModel = AutoModelForCausalLM.load_low_bit(tempdir)
 | 
			
		||||
            assert newModel is not None
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    pytest.main([__file__])
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -15,7 +15,7 @@ if [ -z "$THREAD_NUM" ]; then
 | 
			
		|||
  THREAD_NUM=2
 | 
			
		||||
fi
 | 
			
		||||
export OMP_NUM_THREADS=$THREAD_NUM
 | 
			
		||||
taskset -c 0-$((THREAD_NUM - 1)) python -m pytest -s ${LLM_INFERENCE_TEST_DIR} -k test_transformers
 | 
			
		||||
python -m pytest -s ${LLM_INFERENCE_TEST_DIR} -k test_transformers
 | 
			
		||||
 | 
			
		||||
now=$(date "+%s")
 | 
			
		||||
time=$((now-start))
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue