[LLM] Add perf test for core on Windows (#9397)
* temporary stop other perf test * Add framework for core performance test with one test model * Small fix and add platform control * Comment out lp for now * Add missing ymal file * Small fix * Fix sed contents * Small fix * Small path fixes * Small fix * Add update to ftp * Small upload fix * add chatglm3-6b * LLM: add model names * Keep repo id same as ftp and temporary make baichuan2 first priority * change order * Remove temp if false and separate pr and nightly results * Small fix --------- Co-authored-by: jinbridge <2635480475@qq.com>
This commit is contained in:
		
							parent
							
								
									5d4ec44488
								
							
						
					
					
						commit
						4faf5af8f1
					
				
					 2 changed files with 84 additions and 0 deletions
				
			
		
							
								
								
									
										56
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										56
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -188,3 +188,59 @@ jobs:
 | 
			
		|||
          cp ./*.csv /mnt/disk1/nightly_perf_cpu/
 | 
			
		||||
          cd ../../../test/benchmark
 | 
			
		||||
          python csv_to_html.py -f /mnt/disk1/nightly_perf_cpu/
 | 
			
		||||
 | 
			
		||||
  llm-performance-test-on-core:
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        include:
 | 
			
		||||
          - os: windows
 | 
			
		||||
            platform: dp
 | 
			
		||||
            python-version: "3.9"
 | 
			
		||||
          # - os: windows
 | 
			
		||||
          #   platform: lp
 | 
			
		||||
          #   python-version: "3.9"
 | 
			
		||||
    runs-on: [self-hosted, "${{ matrix.os }}", llm, perf-core, "${{ matrix.platform }}"]
 | 
			
		||||
    env:
 | 
			
		||||
      ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
 | 
			
		||||
      CSV_SAVE_PATH: ${{ github.event.schedule && 'D:/action-runners/nightly_perf_core_' || 'D:/action-runners/pr_perf_core_' }}${{ matrix.platform }}/
 | 
			
		||||
    steps:
 | 
			
		||||
      - uses: actions/checkout@v3
 | 
			
		||||
 | 
			
		||||
      - name: Set up Python ${{ matrix.python-version }}
 | 
			
		||||
        uses: actions/setup-python@v4
 | 
			
		||||
        with:
 | 
			
		||||
          python-version: ${{ matrix.python-version }}
 | 
			
		||||
 | 
			
		||||
      - name: Install dependencies
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          python -m pip install --upgrade pip
 | 
			
		||||
          python -m pip install --upgrade wheel
 | 
			
		||||
          python -m pip install --upgrade omegaconf pandas
 | 
			
		||||
          python -m pip install --upgrade tiktoken einops transformers_stream_generator
 | 
			
		||||
    
 | 
			
		||||
      - name: Download llm binary
 | 
			
		||||
        uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM install (all) test
 | 
			
		||||
        uses: ./.github/actions/llm/setup-llm-env
 | 
			
		||||
 | 
			
		||||
      - name: Test on core ${{ matrix.platform }}
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          mv python/llm/test/benchmark/core-perf-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml
 | 
			
		||||
          cd python/llm/dev/benchmark/all-in-one
 | 
			
		||||
          export http_proxy=${HTTP_PROXY}
 | 
			
		||||
          export https_proxy=${HTTPS_PROXY}
 | 
			
		||||
          # hide time info
 | 
			
		||||
          sed -i 's/str(end - st)/"xxxxxx"/g' run.py
 | 
			
		||||
          python run.py
 | 
			
		||||
          cp ./*.csv $CSV_SAVE_PATH
 | 
			
		||||
          cd ../../../test/benchmark
 | 
			
		||||
          python csv_to_html.py -f $CSV_SAVE_PATH
 | 
			
		||||
          cd ../../dev/benchmark/all-in-one/
 | 
			
		||||
          if [ ${{ github.event.schedule}} ]; then
 | 
			
		||||
            curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/core_${{ matrix.platform }}/
 | 
			
		||||
          fi
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										28
									
								
								python/llm/test/benchmark/core-perf-test.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								python/llm/test/benchmark/core-perf-test.yaml
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,28 @@
 | 
			
		|||
repo_id:
 | 
			
		||||
  - 'THUDM/chatglm2-6b'
 | 
			
		||||
  - 'THUDM/chatglm3-6b'
 | 
			
		||||
  - 'baichuan-inc/Baichuan2-7B-Chat'
 | 
			
		||||
  - 'internlm/internlm-chat-7b-8k'
 | 
			
		||||
  - 'Qwen/Qwen-7B-Chat-10-12'
 | 
			
		||||
  - 'BAAI/AquilaChat2-7B'
 | 
			
		||||
  - 'meta-llama/Llama-2-7b-chat-hf'
 | 
			
		||||
  - 'WisdomShell/CodeShell-7B'
 | 
			
		||||
  - 'tiiuae/falcon-7b-instruct-with-patch'
 | 
			
		||||
local_model_hub: 'D:\llm-models'
 | 
			
		||||
warm_up: 1
 | 
			
		||||
num_trials: 3
 | 
			
		||||
num_beams: 1 # default to greedy search
 | 
			
		||||
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
 | 
			
		||||
in_out_pairs:
 | 
			
		||||
  - '32-32'
 | 
			
		||||
  - '1024-128'
 | 
			
		||||
test_api:
 | 
			
		||||
  - "transformer_int4"
 | 
			
		||||
  # - "native_int4"
 | 
			
		||||
  # - "optimize_model"
 | 
			
		||||
  # - "pytorch_autocast_bf16"
 | 
			
		||||
  # - "ipex_fp16_gpu" # on Intel GPU
 | 
			
		||||
  # - "transformer_int4_gpu"  # on Intel GPU
 | 
			
		||||
  # - "optimize_model_gpu"  # on Intel GPU
 | 
			
		||||
  # - "deepspeed_transformer_int4_cpu" # on Intel SPR Server
 | 
			
		||||
 | 
			
		||||
		Loading…
	
		Reference in a new issue