Add extra warmup for THUDM/glm-4-9b-chat in igpu-performance test (#11417)
This commit is contained in:
		
							parent
							
								
									ecb9efde65
								
							
						
					
					
						commit
						75f836f288
					
				
					 1 changed files with 1 additions and 1 deletions
				
			
		
							
								
								
									
										2
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
					@ -562,7 +562,7 @@ jobs:
 | 
				
			||||||
        shell: bash
 | 
					        shell: bash
 | 
				
			||||||
        run: |
 | 
					        run: |
 | 
				
			||||||
          sed -i '/^\s*result = run_transformer_int4_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming)/ i\
 | 
					          sed -i '/^\s*result = run_transformer_int4_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming)/ i\
 | 
				
			||||||
                  if repo_id in ["THUDM/chatglm3-6b"]:\
 | 
					                  if repo_id in ["THUDM/chatglm3-6b", "THUDM/glm-4-9b-chat"]:\
 | 
				
			||||||
                      run_transformer_int4_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming)
 | 
					                      run_transformer_int4_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming)
 | 
				
			||||||
          ' python/llm/dev/benchmark/all-in-one/run.py
 | 
					          ' python/llm/dev/benchmark/all-in-one/run.py
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue