[LLM] Change nightly perf to install from pypi (#10027)
* Change to install from pypi and have a check to make sure the installed bigdl-llm version is as expected * Make sure result date is the same as tested bigdl-llm version * Small fixes * Small fix * Small fixes * Small fix * Small fixes * Small updates
This commit is contained in:
		
							parent
							
								
									9978089796
								
							
						
					
					
						commit
						863c3f94d0
					
				
					 1 changed files with 113 additions and 42 deletions
				
			
		
							
								
								
									
										155
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										155
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -8,7 +8,8 @@ concurrency:
 | 
			
		|||
# Controls when the action will run.
 | 
			
		||||
on:
 | 
			
		||||
  schedule:
 | 
			
		||||
    - cron: "00 13 * * *" # GMT time, 13:00 GMT == 21:00 China
 | 
			
		||||
    - cron: "30 16 * * *" # GMT time, 16:30 GMT == 00:30 China
 | 
			
		||||
  # please uncomment it for PR tests
 | 
			
		||||
  # pull_request:
 | 
			
		||||
  #   branches: [main]
 | 
			
		||||
  #   paths:
 | 
			
		||||
| 
						 | 
				
			
			@ -20,13 +21,12 @@ on:
 | 
			
		|||
 | 
			
		||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
 | 
			
		||||
jobs:
 | 
			
		||||
  llm-cpp-build:
 | 
			
		||||
    if: ${{ github.event.schedule || github.event.inputs.artifact == 'llm-cpp-build' || github.event.inputs.artifact == 'all' }}
 | 
			
		||||
    uses: ./.github/workflows/llm-binary-build.yml
 | 
			
		||||
  # llm-cpp-build: # please uncomment it for PR tests
 | 
			
		||||
  #   uses: ./.github/workflows/llm-binary-build.yml
 | 
			
		||||
 | 
			
		||||
  llm-performance-test-on-arc:
 | 
			
		||||
    if: ${{ github.event.schedule || github.event.inputs.artifact == 'llm-performance-test-on-arc' || github.event.inputs.artifact == 'all' }}
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    if: ${{ github.event.schedule || github.event.inputs.artifact == 'llm-performance-test-on-arc' || github.event.inputs.artifact == 'all' }} # please comment it for PR tests
 | 
			
		||||
    # needs: llm-cpp-build # please uncomment it for PR tests
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
| 
						 | 
				
			
			@ -59,13 +59,24 @@ jobs:
 | 
			
		|||
          python -m pip install --upgrade transformers_stream_generator
 | 
			
		||||
          python -m pip install --upgrade tiktoken
 | 
			
		||||
 | 
			
		||||
      - name: Download llm binary
 | 
			
		||||
        uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
      # please uncomment it and comment the "Install BigDL-LLM from Pypi" part for PR tests
 | 
			
		||||
      # - name: Download llm binary
 | 
			
		||||
      #   uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM install (all) test
 | 
			
		||||
        uses: ./.github/actions/llm/setup-llm-env
 | 
			
		||||
        with:
 | 
			
		||||
          extra-dependency: "xpu_2.1"
 | 
			
		||||
      # - name: Run LLM install (all) test
 | 
			
		||||
      #   uses: ./.github/actions/llm/setup-llm-env
 | 
			
		||||
      #   with:
 | 
			
		||||
      #     extra-dependency: "xpu_2.1"
 | 
			
		||||
 | 
			
		||||
      - name: Install BigDL-LLM from Pypi
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
 | 
			
		||||
          test_version_date=`date -d 'yesterday' '+%Y%m%d'`
 | 
			
		||||
          if ! pip show bigdl-llm | grep $test_version_date; then
 | 
			
		||||
            echo "Did not install bigdl-llm with excepted version $test_version_date"
 | 
			
		||||
            exit 1
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
      - name: Test installed xpu version
 | 
			
		||||
        shell: bash
 | 
			
		||||
| 
						 | 
				
			
			@ -76,6 +87,9 @@ jobs:
 | 
			
		|||
      - name: Test on xpu
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          date_for_test_version=$(date -d yesterday +%Y-%m-%d)
 | 
			
		||||
          sed -i "s/date.today()/\"$date_for_test_version\"/g" python/llm/dev/benchmark/all-in-one/run.py
 | 
			
		||||
 | 
			
		||||
          source /opt/intel/oneapi/setvars.sh
 | 
			
		||||
          export USE_XETLA=OFF
 | 
			
		||||
          export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
 | 
			
		||||
| 
						 | 
				
			
			@ -110,8 +124,8 @@ jobs:
 | 
			
		|||
          fi
 | 
			
		||||
          
 | 
			
		||||
  llm-performance-test-on-spr:
 | 
			
		||||
    if: ${{ github.event.schedule || github.event.inputs.artifact == 'llm-performance-test-on-spr' || github.event.inputs.artifact == 'all' }}
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    if: ${{ github.event.schedule || github.event.inputs.artifact == 'llm-performance-test-on-spr' || github.event.inputs.artifact == 'all' }} # please comment it for PR tests
 | 
			
		||||
    # needs: llm-cpp-build # please uncomment it for PR tests
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
| 
						 | 
				
			
			@ -138,15 +152,29 @@ jobs:
 | 
			
		|||
          python -m pip install --upgrade pandas
 | 
			
		||||
          python -m pip install --upgrade einops
 | 
			
		||||
 | 
			
		||||
      - name: Download llm binary
 | 
			
		||||
        uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
      # please uncomment it and comment the "Install BigDL-LLM from Pypi" part for PR tests
 | 
			
		||||
      # - name: Download llm binary
 | 
			
		||||
      #   uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM install (all) test
 | 
			
		||||
        uses: ./.github/actions/llm/setup-llm-env
 | 
			
		||||
      # - name: Run LLM install (all) test
 | 
			
		||||
      #   uses: ./.github/actions/llm/setup-llm-env
 | 
			
		||||
 | 
			
		||||
      - name: Install BigDL-LLM from Pypi
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          pip install --pre --upgrade bigdl-llm[all] -f https://developer.intel.com/ipex-whl-stable-xpu
 | 
			
		||||
          test_version_date=`date -d 'yesterday' '+%Y%m%d'`
 | 
			
		||||
          if ! pip show bigdl-llm | grep $test_version_date; then
 | 
			
		||||
            echo "Did not install bigdl-llm with excepted version $test_version_date"
 | 
			
		||||
            exit 1
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
      - name: Test on cpu
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          date_for_test_version=$(date -d yesterday +%Y-%m-%d)
 | 
			
		||||
          sed -i "s/date.today()/\"$date_for_test_version\"/g" python/llm/dev/benchmark/all-in-one/run.py
 | 
			
		||||
 | 
			
		||||
          mv python/llm/test/benchmark/cpu-perf-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml
 | 
			
		||||
          cd python/llm/dev/benchmark/all-in-one
 | 
			
		||||
          export http_proxy=${HTTP_PROXY}
 | 
			
		||||
| 
						 | 
				
			
			@ -160,8 +188,8 @@ jobs:
 | 
			
		|||
          python csv_to_html.py -f /mnt/disk1/nightly_perf_cpu/
 | 
			
		||||
 | 
			
		||||
  llm-performance-test-on-core:
 | 
			
		||||
    if: ${{ github.event.schedule || github.event.inputs.artifact == 'llm-performance-test-on-core' || github.event.inputs.artifact == 'all' }}
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    if: ${{ github.event.schedule || github.event.inputs.artifact == 'llm-performance-test-on-core' || github.event.inputs.artifact == 'all' }} # please comment it for PR tests
 | 
			
		||||
    # needs: llm-cpp-build # please uncomment it for PR tests
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
| 
						 | 
				
			
			@ -192,15 +220,29 @@ jobs:
 | 
			
		|||
          python -m pip install --upgrade omegaconf pandas
 | 
			
		||||
          python -m pip install --upgrade tiktoken einops transformers_stream_generator
 | 
			
		||||
    
 | 
			
		||||
      - name: Download llm binary
 | 
			
		||||
        uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
      # please uncomment it and comment the "Install BigDL-LLM from Pypi" part for PR tests
 | 
			
		||||
      # - name: Download llm binary
 | 
			
		||||
      #   uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM install (all) test
 | 
			
		||||
        uses: ./.github/actions/llm/setup-llm-env
 | 
			
		||||
      # - name: Run LLM install (all) test
 | 
			
		||||
      #   uses: ./.github/actions/llm/setup-llm-env
 | 
			
		||||
 | 
			
		||||
      - name: Install BigDL-LLM from Pypi
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          pip install --pre --upgrade bigdl-llm[all] -f https://developer.intel.com/ipex-whl-stable-xpu
 | 
			
		||||
          test_version_date=`date -d 'yesterday' '+%Y%m%d'`
 | 
			
		||||
          if ! pip show bigdl-llm | grep $test_version_date; then
 | 
			
		||||
            echo "Did not install bigdl-llm with excepted version $test_version_date"
 | 
			
		||||
            exit 1
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
      - name: Test on core ${{ matrix.platform }}
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          date_for_test_version=$(date -d yesterday +%Y-%m-%d)
 | 
			
		||||
          sed -i "s/date.today()/\"$date_for_test_version\"/g" python/llm/dev/benchmark/all-in-one/run.py
 | 
			
		||||
 | 
			
		||||
          mv python/llm/test/benchmark/core-perf-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml
 | 
			
		||||
          cd python/llm/dev/benchmark/all-in-one
 | 
			
		||||
          export http_proxy=${HTTP_PROXY}
 | 
			
		||||
| 
						 | 
				
			
			@ -218,8 +260,8 @@ jobs:
 | 
			
		|||
          fi
 | 
			
		||||
 | 
			
		||||
  llm-performance-test-on-igpu:
 | 
			
		||||
    if: ${{ github.event.schedule || github.event.inputs.artifact == 'llm-performance-test-on-igpu' || github.event.inputs.artifact == 'all' }}
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    if: ${{ github.event.schedule || github.event.inputs.artifact == 'llm-performance-test-on-igpu' || github.event.inputs.artifact == 'all' }} # please comment it for PR tests
 | 
			
		||||
    # needs: llm-cpp-build # please uncomment it for PR tests
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
| 
						 | 
				
			
			@ -233,16 +275,46 @@ jobs:
 | 
			
		|||
      - uses: actions/checkout@v3
 | 
			
		||||
 | 
			
		||||
      # TODO: Put the bigdl-llm related install process for win gpu into a action function
 | 
			
		||||
      - name: Download llm binary
 | 
			
		||||
        uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
 | 
			
		||||
      - name: Prepare for install bigdl-llm from source
 | 
			
		||||
      # Please uncomment it and commment the install from pypi for PR tests
 | 
			
		||||
      # - name: Download llm binary
 | 
			
		||||
      #   uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
 | 
			
		||||
      # - name: Prepare for install bigdl-llm from source
 | 
			
		||||
      #   shell: bash
 | 
			
		||||
      #   run: |
 | 
			
		||||
      #     sed -i 's/"bigdl-core-xe-21==" + VERSION + "/"bigdl-core-xe-21/g' python/llm/setup.py
 | 
			
		||||
      #     sed -i 's/"bigdl-core-xe-21==" + VERSION/"bigdl-core-xe-21"/g' python/llm/setup.py
 | 
			
		||||
 | 
			
		||||
      # - name: Install bigdl-llm and other related packages (install from source)
 | 
			
		||||
      #   shell: cmd
 | 
			
		||||
      #   run: |
 | 
			
		||||
      #     call conda create -n igpu-perf python=${{ matrix.python-version }} libuv -y
 | 
			
		||||
      #     call conda activate igpu-perf
 | 
			
		||||
 | 
			
		||||
      #     pip install --upgrade pip
 | 
			
		||||
      #     pip install --upgrade wheel
 | 
			
		||||
      #     pip install --upgrade omegaconf pandas
 | 
			
		||||
      #     pip install --upgrade tiktoken einops transformers_stream_generator
 | 
			
		||||
 | 
			
		||||
      #     cd python\llm
 | 
			
		||||
      #     python setup.py clean --all bdist_wheel --win
 | 
			
		||||
      #     if not exist dist\bigdl_llm*.whl (exit /b 1)
 | 
			
		||||
      #     for %%i in (dist\bigdl_llm*.whl) do set whl_name=%%i
 | 
			
		||||
 | 
			
		||||
      #     pip install --pre --upgrade %whl_name%[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
 | 
			
		||||
      #     if %ERRORLEVEL% neq 0 (exit /b 1)
 | 
			
		||||
      #     pip list
 | 
			
		||||
 | 
			
		||||
      #     call conda deactivate
 | 
			
		||||
 | 
			
		||||
      - name: Determine desired bigdl-llm version
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          sed -i 's/"bigdl-core-xe-21==" + VERSION + "/"bigdl-core-xe-21/g' python/llm/setup.py
 | 
			
		||||
          sed -i 's/"bigdl-core-xe-21==" + VERSION/"bigdl-core-xe-21"/g' python/llm/setup.py
 | 
			
		||||
          test_version_date=`date -d 'yesterday' '+%Y%m%d'`
 | 
			
		||||
          echo "TEST_VERSION_DATE=${test_version_date}" >> "$GITHUB_ENV"
 | 
			
		||||
 | 
			
		||||
      - name: Install bigdl-llm and other related packages
 | 
			
		||||
      - name: Install bigdl-llm and other related packages (install from pypi)
 | 
			
		||||
        shell: cmd
 | 
			
		||||
        run: |
 | 
			
		||||
          call conda create -n igpu-perf python=${{ matrix.python-version }} libuv -y
 | 
			
		||||
| 
						 | 
				
			
			@ -253,13 +325,12 @@ jobs:
 | 
			
		|||
          pip install --upgrade omegaconf pandas
 | 
			
		||||
          pip install --upgrade tiktoken einops transformers_stream_generator
 | 
			
		||||
 | 
			
		||||
          cd python\llm
 | 
			
		||||
          python setup.py clean --all bdist_wheel --win
 | 
			
		||||
          if not exist dist\bigdl_llm*.whl (exit /b 1)
 | 
			
		||||
          for %%i in (dist\bigdl_llm*.whl) do set whl_name=%%i
 | 
			
		||||
 | 
			
		||||
          pip install --pre --upgrade %whl_name%[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
 | 
			
		||||
          if %ERRORLEVEL% neq 0 (exit /b 1)
 | 
			
		||||
          pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
 | 
			
		||||
          pip show bigdl-llm | findstr %TEST_VERSION_DATE%
 | 
			
		||||
          if %ERRORLEVEL% neq 0 (
 | 
			
		||||
            echo "Did not install bigdl-llm with excepted version %TEST_VERSION_DATE%"
 | 
			
		||||
            exit /b 1
 | 
			
		||||
          )
 | 
			
		||||
          pip list
 | 
			
		||||
 | 
			
		||||
          call conda deactivate
 | 
			
		||||
| 
						 | 
				
			
			@ -283,10 +354,10 @@ jobs:
 | 
			
		|||
          else
 | 
			
		||||
            echo "CSV_SAVE_PATH=${CSV_PR_PATH}" >> "$GITHUB_ENV"
 | 
			
		||||
          fi
 | 
			
		||||
          cur_date=$(date +%Y-%m-%d)
 | 
			
		||||
          echo "LOG_FILE=${cur_date}_output.txt" >> "$GITHUB_ENV"
 | 
			
		||||
          date_for_test_version=$(date -d yesterday +%Y-%m-%d)
 | 
			
		||||
          echo "LOG_FILE=${date_for_test_version}_output.txt" >> "$GITHUB_ENV"
 | 
			
		||||
 | 
			
		||||
          sed -i "s/date.today()/\"$cur_date\"/g" python/llm/dev/benchmark/all-in-one/run.py
 | 
			
		||||
          sed -i "s/date.today()/\"$date_for_test_version\"/g" python/llm/dev/benchmark/all-in-one/run.py
 | 
			
		||||
 | 
			
		||||
      - name: Prepare igpu perf test (32-32)
 | 
			
		||||
        shell: bash
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue