[LLM] Add nightly igpu perf test for INT4+FP16 1024-128 (#10496)
This commit is contained in:
		
							parent
							
								
									3d59c74a0b
								
							
						
					
					
						commit
						1579ee4421
					
				
					 4 changed files with 146 additions and 0 deletions
				
			
		
							
								
								
									
										91
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										91
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -772,6 +772,97 @@ jobs:
 | 
			
		|||
 | 
			
		||||
          call conda deactivate
 | 
			
		||||
 | 
			
		||||
      - name: Prepare igpu perf test (int4+fp16 1024-128)
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py
 | 
			
		||||
          sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml
 | 
			
		||||
 | 
			
		||||
      - name: Test on igpu (int4+fp16 1024-128)
 | 
			
		||||
        shell: cmd
 | 
			
		||||
        run: |
 | 
			
		||||
          call conda activate igpu-perf
 | 
			
		||||
          pip install transformers==4.31.0
 | 
			
		||||
 | 
			
		||||
          call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
 | 
			
		||||
          set SYCL_CACHE_PERSISTENT=1
 | 
			
		||||
          set BIGDL_LLM_XMX_DISABLED=1
 | 
			
		||||
          REM for llava
 | 
			
		||||
          set TRANSFORMERS_OFFLINE=1
 | 
			
		||||
 | 
			
		||||
          cd python\llm\dev\benchmark\all-in-one
 | 
			
		||||
          move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16.yaml config.yaml
 | 
			
		||||
          set PYTHONIOENCODING=utf-8
 | 
			
		||||
          python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1
 | 
			
		||||
          if %ERRORLEVEL% neq 0 (exit /b 1)
 | 
			
		||||
 | 
			
		||||
          call conda deactivate
 | 
			
		||||
 | 
			
		||||
      - name: Prepare igpu perf test for Mistral (int4+fp16 1024-128)
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          sed -i 's/{today}_test1/{today}_test2/g' python/llm/dev/benchmark/all-in-one/run.py
 | 
			
		||||
          sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_434.yaml
 | 
			
		||||
 | 
			
		||||
      - name: Test on igpu for Mistral (int4+fp16 1024-128)
 | 
			
		||||
        shell: cmd
 | 
			
		||||
        run: |
 | 
			
		||||
          call conda activate igpu-perf
 | 
			
		||||
          pip install transformers==4.34.0
 | 
			
		||||
 | 
			
		||||
          call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
 | 
			
		||||
          set SYCL_CACHE_PERSISTENT=1
 | 
			
		||||
          set BIGDL_LLM_XMX_DISABLED=1
 | 
			
		||||
 | 
			
		||||
          cd python\llm\dev\benchmark\all-in-one
 | 
			
		||||
          move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_434.yaml config.yaml
 | 
			
		||||
          set PYTHONIOENCODING=utf-8
 | 
			
		||||
          python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1
 | 
			
		||||
          if %ERRORLEVEL% neq 0 (exit /b 1)
 | 
			
		||||
 | 
			
		||||
          call conda deactivate
 | 
			
		||||
 | 
			
		||||
      - name: Prepare igpu perf test for Qwen 1.5 (int4+fp16 1024-128)
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py
 | 
			
		||||
          sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml
 | 
			
		||||
 | 
			
		||||
      - name: Test on igpu for Qwen 1.5 (int4+fp16 1024-128)
 | 
			
		||||
        shell: cmd
 | 
			
		||||
        run: |
 | 
			
		||||
          call conda activate igpu-perf
 | 
			
		||||
          pip install transformers==4.37.0
 | 
			
		||||
 | 
			
		||||
          call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
 | 
			
		||||
          set SYCL_CACHE_PERSISTENT=1
 | 
			
		||||
          set BIGDL_LLM_XMX_DISABLED=1
 | 
			
		||||
 | 
			
		||||
          cd python\llm\dev\benchmark\all-in-one
 | 
			
		||||
          move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_437.yaml config.yaml
 | 
			
		||||
          set PYTHONIOENCODING=utf-8
 | 
			
		||||
          python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1
 | 
			
		||||
          if %ERRORLEVEL% neq 0 (exit /b 1)
 | 
			
		||||
 | 
			
		||||
          call conda deactivate
 | 
			
		||||
 | 
			
		||||
      - name: Concat csv and generate html (int4+fp16 1024-128)
 | 
			
		||||
        shell: cmd
 | 
			
		||||
        run: |
 | 
			
		||||
          call conda activate html-gen
 | 
			
		||||
 | 
			
		||||
          cd python\llm\dev\benchmark\all-in-one
 | 
			
		||||
          python ..\..\..\test\benchmark\concat_csv.py
 | 
			
		||||
          if %ERRORLEVEL% neq 0 (exit /b 1)
 | 
			
		||||
          del /q *test*.csv
 | 
			
		||||
          move *.csv %CSV_SAVE_PATH%\1024-128_int4_fp16\
 | 
			
		||||
          cd ..\..\..\test\benchmark
 | 
			
		||||
          python csv_to_html.py -f %CSV_SAVE_PATH%\1024-128_int4_fp16\
 | 
			
		||||
          if %ERRORLEVEL% neq 0 (exit /b 1)
 | 
			
		||||
          move %CSV_SAVE_PATH%\1024-128_int4_fp16\*.html %CSV_SAVE_PATH%
 | 
			
		||||
 | 
			
		||||
          call conda deactivate
 | 
			
		||||
 | 
			
		||||
      - name: Upload results to ftp
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        shell: cmd
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										29
									
								
								python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,29 @@
 | 
			
		|||
repo_id:
 | 
			
		||||
  - 'THUDM/chatglm2-6b'
 | 
			
		||||
  - 'THUDM/chatglm3-6b'
 | 
			
		||||
  - 'baichuan-inc/Baichuan2-7B-Chat'
 | 
			
		||||
  - 'baichuan-inc/Baichuan2-13B-Chat'
 | 
			
		||||
  - 'internlm/internlm-chat-7b-8k'
 | 
			
		||||
  - 'Qwen/Qwen-7B-Chat'
 | 
			
		||||
  - 'BAAI/AquilaChat2-7B'
 | 
			
		||||
  - '01-ai/Yi-6B'
 | 
			
		||||
  - 'meta-llama/Llama-2-7b-chat-hf'
 | 
			
		||||
  - 'meta-llama/Llama-2-13b-chat-hf'
 | 
			
		||||
  - 'WisdomShell/CodeShell-7B-Chat'
 | 
			
		||||
  - 'tiiuae/falcon-7b-instruct-with-patch'
 | 
			
		||||
  - 'mosaicml/mpt-7b-chat'
 | 
			
		||||
  - 'liuhaotian/llava-v1.5-7b'
 | 
			
		||||
  # - 'RWKV/rwkv-4-world-7b'
 | 
			
		||||
  # - 'RWKV/rwkv-5-world-7b'
 | 
			
		||||
  - 'IEITYuan/Yuan2-2B-hf'
 | 
			
		||||
local_model_hub: 'path to your local model hub'
 | 
			
		||||
warm_up: 1
 | 
			
		||||
num_trials: 3
 | 
			
		||||
num_beams: 1 # default to greedy search
 | 
			
		||||
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
 | 
			
		||||
batch_size: 1 # default to 1
 | 
			
		||||
in_out_pairs:
 | 
			
		||||
  - '1024-128'
 | 
			
		||||
test_api:
 | 
			
		||||
  - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows, use fp16 for non-linear layer
 | 
			
		||||
cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,13 @@
 | 
			
		|||
repo_id:
 | 
			
		||||
  - 'mistralai/Mistral-7B-Instruct-v0.1'
 | 
			
		||||
local_model_hub: 'path to your local model hub'
 | 
			
		||||
warm_up: 1
 | 
			
		||||
num_trials: 3
 | 
			
		||||
num_beams: 1 # default to greedy search
 | 
			
		||||
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
 | 
			
		||||
batch_size: 1 # default to 1
 | 
			
		||||
in_out_pairs:
 | 
			
		||||
  - '1024-128'
 | 
			
		||||
test_api:
 | 
			
		||||
  - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows, use fp16 for non-linear layer
 | 
			
		||||
cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,13 @@
 | 
			
		|||
repo_id:
 | 
			
		||||
  - 'Qwen/Qwen1.5-7B-Chat'
 | 
			
		||||
local_model_hub: 'path to your local model hub'
 | 
			
		||||
warm_up: 1
 | 
			
		||||
num_trials: 3
 | 
			
		||||
num_beams: 1 # default to greedy search
 | 
			
		||||
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
 | 
			
		||||
batch_size: 1 # default to 1
 | 
			
		||||
in_out_pairs:
 | 
			
		||||
  - '1024-128'
 | 
			
		||||
test_api:
 | 
			
		||||
  - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows, use fp16 for non-linear layer
 | 
			
		||||
cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)
 | 
			
		||||
		Loading…
	
		Reference in a new issue