Add new models to benchmark (#11505)
* Add new models to benchmark * remove Qwen/Qwen-VL-Chat to pass the validation --------- Co-authored-by: ATMxsp01 <shou.xu@intel.com>
This commit is contained in:
		
							parent
							
								
									252426793b
								
							
						
					
					
						commit
						64cfed602d
					
				
					 7 changed files with 27 additions and 3 deletions
				
			
		
							
								
								
									
										2
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
					@ -104,6 +104,7 @@ jobs:
 | 
				
			||||||
        shell: bash
 | 
					        shell: bash
 | 
				
			||||||
        # pip install transformers_stream_generator for model internlm-chat-7b-8k
 | 
					        # pip install transformers_stream_generator for model internlm-chat-7b-8k
 | 
				
			||||||
        # pip install tiktoken for model Qwen-7B-Chat-10-12
 | 
					        # pip install tiktoken for model Qwen-7B-Chat-10-12
 | 
				
			||||||
 | 
					        # pip install matplotlib for model Qwen-VL-Chat
 | 
				
			||||||
        run: |
 | 
					        run: |
 | 
				
			||||||
          python -m pip install --upgrade pip
 | 
					          python -m pip install --upgrade pip
 | 
				
			||||||
          python -m pip install --upgrade wheel
 | 
					          python -m pip install --upgrade wheel
 | 
				
			||||||
| 
						 | 
					@ -112,6 +113,7 @@ jobs:
 | 
				
			||||||
          python -m pip install --upgrade einops
 | 
					          python -m pip install --upgrade einops
 | 
				
			||||||
          python -m pip install --upgrade transformers_stream_generator
 | 
					          python -m pip install --upgrade transformers_stream_generator
 | 
				
			||||||
          python -m pip install --upgrade tiktoken
 | 
					          python -m pip install --upgrade tiktoken
 | 
				
			||||||
 | 
					          python -m pip install --upgrade matplotlib
 | 
				
			||||||
 | 
					
 | 
				
			||||||
      # specific for test on certain commits
 | 
					      # specific for test on certain commits
 | 
				
			||||||
      - name: Download llm binary
 | 
					      - name: Download llm binary
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4,8 +4,13 @@ repo_id:
 | 
				
			||||||
  - 'THUDM/chatglm3-6b-4bit'
 | 
					  - 'THUDM/chatglm3-6b-4bit'
 | 
				
			||||||
  - 'baichuan-inc/Baichuan2-7B-Chat'
 | 
					  - 'baichuan-inc/Baichuan2-7B-Chat'
 | 
				
			||||||
  - 'baichuan-inc/Baichuan2-13B-Chat-4bit'
 | 
					  - 'baichuan-inc/Baichuan2-13B-Chat-4bit'
 | 
				
			||||||
#  - 'fnlp/moss-moon-003-sft-4bit' # moss-moon-003-sft cannot work on transformers 4.34+
 | 
					 | 
				
			||||||
  - 'mistralai/Mistral-7B-v0.1'
 | 
					  - 'mistralai/Mistral-7B-v0.1'
 | 
				
			||||||
 | 
					  - 'deepseek-ai/deepseek-coder-6.7b-instruct'
 | 
				
			||||||
 | 
					  - 'THUDM/glm-4-9b-chat'
 | 
				
			||||||
 | 
					  - 'openbmb/MiniCPM-2B-sft-bf16'
 | 
				
			||||||
 | 
					  #- 'Qwen/Qwen-VL-Chat'
 | 
				
			||||||
 | 
					  #- 'SmerkyG/rwkv-5-world-7b' #this model only fp32 is supported for now, fp16 and bf16 are not supported
 | 
				
			||||||
 | 
					  - '01-ai/Yi-6B-Chat'
 | 
				
			||||||
local_model_hub: '/mnt/disk1/models'
 | 
					local_model_hub: '/mnt/disk1/models'
 | 
				
			||||||
warm_up: 1
 | 
					warm_up: 1
 | 
				
			||||||
num_trials: 3
 | 
					num_trials: 3
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4,8 +4,13 @@ repo_id:
 | 
				
			||||||
  - 'THUDM/chatglm3-6b-4bit'
 | 
					  - 'THUDM/chatglm3-6b-4bit'
 | 
				
			||||||
  - 'baichuan-inc/Baichuan2-7B-Chat'
 | 
					  - 'baichuan-inc/Baichuan2-7B-Chat'
 | 
				
			||||||
  - 'baichuan-inc/Baichuan2-13B-Chat-4bit'
 | 
					  - 'baichuan-inc/Baichuan2-13B-Chat-4bit'
 | 
				
			||||||
#  - 'fnlp/moss-moon-003-sft-4bit' # moss-moon-003-sft cannot work on transformers 4.34+
 | 
					 | 
				
			||||||
  - 'mistralai/Mistral-7B-v0.1' #mwj: need to check
 | 
					  - 'mistralai/Mistral-7B-v0.1' #mwj: need to check
 | 
				
			||||||
 | 
					  - 'deepseek-ai/deepseek-coder-6.7b-instruct'
 | 
				
			||||||
 | 
					  - 'THUDM/glm-4-9b-chat'
 | 
				
			||||||
 | 
					  - 'openbmb/MiniCPM-2B-sft-bf16'
 | 
				
			||||||
 | 
					  #- 'Qwen/Qwen-VL-Chat'
 | 
				
			||||||
 | 
					  #- 'SmerkyG/rwkv-5-world-7b' #this model only fp32 is supported for now, fp16 and bf16 are not supported
 | 
				
			||||||
 | 
					  - '01-ai/Yi-6B-Chat'
 | 
				
			||||||
local_model_hub: '/mnt/disk1/models'
 | 
					local_model_hub: '/mnt/disk1/models'
 | 
				
			||||||
warm_up: 1
 | 
					warm_up: 1
 | 
				
			||||||
num_trials: 3
 | 
					num_trials: 3
 | 
				
			||||||
| 
						 | 
					@ -24,6 +29,7 @@ exclude:
 | 
				
			||||||
  - 'baichuan-inc/Baichuan2-7B-Chat:2048'
 | 
					  - 'baichuan-inc/Baichuan2-7B-Chat:2048'
 | 
				
			||||||
  - 'baichuan-inc/Baichuan2-13B-Chat-4bit:1024'
 | 
					  - 'baichuan-inc/Baichuan2-13B-Chat-4bit:1024'
 | 
				
			||||||
  - 'baichuan-inc/Baichuan2-13B-Chat-4bit:2048'
 | 
					  - 'baichuan-inc/Baichuan2-13B-Chat-4bit:2048'
 | 
				
			||||||
 | 
					#  - 'Qwen/Qwen-VL-Chat:2048'
 | 
				
			||||||
#  - 'fnlp/moss-moon-003-sft-4bit:1024'
 | 
					#  - 'fnlp/moss-moon-003-sft-4bit:1024'
 | 
				
			||||||
#  - 'fnlp/moss-moon-003-sft-4bit:2048'
 | 
					#  - 'fnlp/moss-moon-003-sft-4bit:2048'
 | 
				
			||||||
task: 'continuation' # task can be 'continuation', 'QA' and 'summarize'
 | 
					task: 'continuation' # task can be 'continuation', 'QA' and 'summarize'
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4,8 +4,13 @@ repo_id:
 | 
				
			||||||
  - 'THUDM/chatglm3-6b-4bit'
 | 
					  - 'THUDM/chatglm3-6b-4bit'
 | 
				
			||||||
  - 'baichuan-inc/Baichuan2-7B-Chat'
 | 
					  - 'baichuan-inc/Baichuan2-7B-Chat'
 | 
				
			||||||
  - 'baichuan-inc/Baichuan2-13B-Chat-4bit'
 | 
					  - 'baichuan-inc/Baichuan2-13B-Chat-4bit'
 | 
				
			||||||
#  - 'fnlp/moss-moon-003-sft-4bit' # moss-moon-003-sft cannot work on transformers 4.34+
 | 
					 | 
				
			||||||
  - 'mistralai/Mistral-7B-v0.1'
 | 
					  - 'mistralai/Mistral-7B-v0.1'
 | 
				
			||||||
 | 
					  - 'deepseek-ai/deepseek-coder-6.7b-instruct'
 | 
				
			||||||
 | 
					  - 'THUDM/glm-4-9b-chat'
 | 
				
			||||||
 | 
					  - 'openbmb/MiniCPM-2B-sft-bf16'
 | 
				
			||||||
 | 
					  #- 'Qwen/Qwen-VL-Chat'
 | 
				
			||||||
 | 
					  #- 'SmerkyG/rwkv-5-world-7b' #this model only fp32 is supported for now, fp16 and bf16 are not supported
 | 
				
			||||||
 | 
					  - '01-ai/Yi-6B-Chat'
 | 
				
			||||||
local_model_hub: '/mnt/disk1/models'
 | 
					local_model_hub: '/mnt/disk1/models'
 | 
				
			||||||
warm_up: 1
 | 
					warm_up: 1
 | 
				
			||||||
num_trials: 3
 | 
					num_trials: 3
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3,6 +3,8 @@ repo_id:
 | 
				
			||||||
  - 'Qwen/Qwen1.5-7B-Chat'
 | 
					  - 'Qwen/Qwen1.5-7B-Chat'
 | 
				
			||||||
  - 'microsoft/Phi-3-mini-4k-instruct'
 | 
					  - 'microsoft/Phi-3-mini-4k-instruct'
 | 
				
			||||||
  - 'meta-llama/Meta-Llama-3-8B-Instruct'
 | 
					  - 'meta-llama/Meta-Llama-3-8B-Instruct'
 | 
				
			||||||
 | 
					  - 'microsoft/phi-3-vision-128k-instruct'
 | 
				
			||||||
 | 
					  - 'Qwen/Qwen2-7B-Instruct'
 | 
				
			||||||
local_model_hub: '/mnt/disk1/models'
 | 
					local_model_hub: '/mnt/disk1/models'
 | 
				
			||||||
warm_up: 1
 | 
					warm_up: 1
 | 
				
			||||||
num_trials: 3
 | 
					num_trials: 3
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3,6 +3,8 @@ repo_id:
 | 
				
			||||||
  - 'Qwen/Qwen1.5-7B-Chat'
 | 
					  - 'Qwen/Qwen1.5-7B-Chat'
 | 
				
			||||||
  - 'microsoft/Phi-3-mini-4k-instruct'
 | 
					  - 'microsoft/Phi-3-mini-4k-instruct'
 | 
				
			||||||
  - 'meta-llama/Meta-Llama-3-8B-Instruct' # mwj: need to test
 | 
					  - 'meta-llama/Meta-Llama-3-8B-Instruct' # mwj: need to test
 | 
				
			||||||
 | 
					  - 'microsoft/phi-3-vision-128k-instruct'
 | 
				
			||||||
 | 
					  - 'Qwen/Qwen2-7B-Instruct'
 | 
				
			||||||
local_model_hub: '/mnt/disk1/models'
 | 
					local_model_hub: '/mnt/disk1/models'
 | 
				
			||||||
warm_up: 1
 | 
					warm_up: 1
 | 
				
			||||||
num_trials: 3
 | 
					num_trials: 3
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3,6 +3,8 @@ repo_id:
 | 
				
			||||||
  - 'Qwen/Qwen1.5-7B-Chat'
 | 
					  - 'Qwen/Qwen1.5-7B-Chat'
 | 
				
			||||||
  - 'microsoft/Phi-3-mini-4k-instruct'
 | 
					  - 'microsoft/Phi-3-mini-4k-instruct'
 | 
				
			||||||
  - 'meta-llama/Meta-Llama-3-8B-Instruct'
 | 
					  - 'meta-llama/Meta-Llama-3-8B-Instruct'
 | 
				
			||||||
 | 
					  - 'microsoft/phi-3-vision-128k-instruct'
 | 
				
			||||||
 | 
					  - 'Qwen/Qwen2-7B-Instruct'
 | 
				
			||||||
local_model_hub: '/mnt/disk1/models'
 | 
					local_model_hub: '/mnt/disk1/models'
 | 
				
			||||||
warm_up: 1
 | 
					warm_up: 1
 | 
				
			||||||
num_trials: 3
 | 
					num_trials: 3
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue