Test MiniCPM performance on iGPU in a more stable way (#11573)
* Test MiniCPM performance on iGPU in a more stable way * small fix --------- Co-authored-by: ATMxsp01 <shou.xu@intel.com>
This commit is contained in:
		
							parent
							
								
									0981b72275
								
							
						
					
					
						commit
						13a72dc51d
					
				
					 6 changed files with 17 additions and 12 deletions
				
			
		
							
								
								
									
										9
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										9
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -623,14 +623,19 @@ jobs:
 | 
			
		|||
 | 
			
		||||
          sed -i "s/date.today()/\"$date_for_test_version\"/g" python/llm/dev/benchmark/all-in-one/run.py
 | 
			
		||||
 | 
			
		||||
      - name: Add extra warmup for chatglm3-6b int4+fp32 for more stable results
 | 
			
		||||
      - name: Add extra warmup for chatglm3-6b int4+fp32 & MiniCPM int4+fp16 int4+fp32 for more stable results
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          sed -i '/^\s*result = run_transformer_int4_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming)/ i\
 | 
			
		||||
                  if repo_id in ["THUDM/chatglm3-6b", "THUDM/glm-4-9b-chat"]:\
 | 
			
		||||
                  if repo_id in ["THUDM/chatglm3-6b", "THUDM/glm-4-9b-chat", "openbmb/MiniCPM-1B-sft-bf16", "openbmb/MiniCPM-2B-sft-bf16"]:\
 | 
			
		||||
                      run_transformer_int4_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming)
 | 
			
		||||
          ' python/llm/dev/benchmark/all-in-one/run.py
 | 
			
		||||
          
 | 
			
		||||
          sed -i '/^\s*result = run_transformer_int4_fp16_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming)/ i\
 | 
			
		||||
                  if repo_id in ["openbmb/MiniCPM-1B-sft-bf16", "openbmb/MiniCPM-2B-sft-bf16"]:\
 | 
			
		||||
                      run_transformer_int4_fp16_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming)
 | 
			
		||||
          ' python/llm/dev/benchmark/all-in-one/run.py
 | 
			
		||||
 | 
			
		||||
      # 32-32 int4+fp16
 | 
			
		||||
      - name: Prepare igpu perf test (32-32 int4+fp16)
 | 
			
		||||
        shell: bash
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,6 +1,4 @@
 | 
			
		|||
repo_id:
 | 
			
		||||
  - 'openbmb/MiniCPM-1B-sft-bf16'
 | 
			
		||||
  - 'openbmb/MiniCPM-2B-sft-bf16'
 | 
			
		||||
  - 'THUDM/chatglm3-6b'
 | 
			
		||||
  - 'THUDM/glm-4-9b-chat'
 | 
			
		||||
  - 'baichuan-inc/Baichuan2-7B-Chat'
 | 
			
		||||
| 
						 | 
				
			
			@ -13,6 +11,8 @@ repo_id:
 | 
			
		|||
  - 'RWKV/v5-Eagle-7B-HF'
 | 
			
		||||
  - '01-ai/Yi-6B-Chat'
 | 
			
		||||
  - 'Qwen/Qwen-VL-Chat'
 | 
			
		||||
  - 'openbmb/MiniCPM-1B-sft-bf16'
 | 
			
		||||
  - 'openbmb/MiniCPM-2B-sft-bf16'
 | 
			
		||||
local_model_hub: 'path to your local model hub'
 | 
			
		||||
warm_up: 1
 | 
			
		||||
num_trials: 3
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,6 +1,4 @@
 | 
			
		|||
repo_id:
 | 
			
		||||
  - 'openbmb/MiniCPM-1B-sft-bf16'
 | 
			
		||||
  - 'openbmb/MiniCPM-2B-sft-bf16'
 | 
			
		||||
  - 'THUDM/chatglm3-6b'
 | 
			
		||||
  - 'THUDM/glm-4-9b-chat'
 | 
			
		||||
  - 'baichuan-inc/Baichuan2-7B-Chat'
 | 
			
		||||
| 
						 | 
				
			
			@ -12,6 +10,8 @@ repo_id:
 | 
			
		|||
  - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
 | 
			
		||||
  - '01-ai/Yi-6B-Chat'
 | 
			
		||||
  - 'Qwen/Qwen-VL-Chat'
 | 
			
		||||
  - 'openbmb/MiniCPM-1B-sft-bf16'
 | 
			
		||||
  - 'openbmb/MiniCPM-2B-sft-bf16'
 | 
			
		||||
local_model_hub: 'path to your local model hub'
 | 
			
		||||
warm_up: 1
 | 
			
		||||
num_trials: 3
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,6 +1,4 @@
 | 
			
		|||
repo_id:
 | 
			
		||||
  - 'openbmb/MiniCPM-1B-sft-bf16'
 | 
			
		||||
  - 'openbmb/MiniCPM-2B-sft-bf16'
 | 
			
		||||
  - 'THUDM/chatglm3-6b'
 | 
			
		||||
  - 'THUDM/glm-4-9b-chat'
 | 
			
		||||
  - 'baichuan-inc/Baichuan2-7B-Chat'
 | 
			
		||||
| 
						 | 
				
			
			@ -12,6 +10,8 @@ repo_id:
 | 
			
		|||
  - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
 | 
			
		||||
  - '01-ai/Yi-6B-Chat'
 | 
			
		||||
  - 'Qwen/Qwen-VL-Chat'
 | 
			
		||||
  - 'openbmb/MiniCPM-1B-sft-bf16'
 | 
			
		||||
  - 'openbmb/MiniCPM-2B-sft-bf16'
 | 
			
		||||
local_model_hub: 'path to your local model hub'
 | 
			
		||||
warm_up: 1
 | 
			
		||||
num_trials: 3
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,6 +1,4 @@
 | 
			
		|||
repo_id:
 | 
			
		||||
  - 'openbmb/MiniCPM-1B-sft-bf16'
 | 
			
		||||
  - 'openbmb/MiniCPM-2B-sft-bf16'
 | 
			
		||||
  - 'THUDM/chatglm3-6b'
 | 
			
		||||
  - 'THUDM/glm-4-9b-chat'
 | 
			
		||||
  - 'baichuan-inc/Baichuan2-7B-Chat'
 | 
			
		||||
| 
						 | 
				
			
			@ -12,6 +10,8 @@ repo_id:
 | 
			
		|||
  - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
 | 
			
		||||
  - '01-ai/Yi-6B-Chat'
 | 
			
		||||
  - 'Qwen/Qwen-VL-Chat'
 | 
			
		||||
  - 'openbmb/MiniCPM-1B-sft-bf16'
 | 
			
		||||
  - 'openbmb/MiniCPM-2B-sft-bf16'
 | 
			
		||||
local_model_hub: 'path to your local model hub'
 | 
			
		||||
warm_up: 1
 | 
			
		||||
num_trials: 3
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,6 +1,4 @@
 | 
			
		|||
repo_id:
 | 
			
		||||
  - 'openbmb/MiniCPM-1B-sft-bf16'
 | 
			
		||||
  - 'openbmb/MiniCPM-2B-sft-bf16'
 | 
			
		||||
  - 'THUDM/chatglm3-6b'
 | 
			
		||||
  - 'THUDM/glm-4-9b-chat'
 | 
			
		||||
  - 'baichuan-inc/Baichuan2-7B-Chat'
 | 
			
		||||
| 
						 | 
				
			
			@ -12,6 +10,8 @@ repo_id:
 | 
			
		|||
  - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5'
 | 
			
		||||
  - '01-ai/Yi-6B-Chat'
 | 
			
		||||
  - 'Qwen/Qwen-VL-Chat'
 | 
			
		||||
  - 'openbmb/MiniCPM-1B-sft-bf16'
 | 
			
		||||
  - 'openbmb/MiniCPM-2B-sft-bf16'
 | 
			
		||||
local_model_hub: 'path to your local model hub'
 | 
			
		||||
warm_up: 3
 | 
			
		||||
num_trials: 5
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue