* update llm benchmark scripts. * change tranformer_bf16 to pytorch_autocast_bf16. * add autocast in transformer int4. * revert autocast. * add "pytorch_autocast_bf16" to doc * fix comments.
		
			
				
	
	
		
			17 lines
		
	
	
		
			No EOL
		
	
	
		
			381 B
		
	
	
	
		
			YAML
		
	
	
	
	
	
			
		
		
	
	
			17 lines
		
	
	
		
			No EOL
		
	
	
		
			381 B
		
	
	
	
		
			YAML
		
	
	
	
	
	
repo_id:
 | 
						|
  - 'THUDM/chatglm-6b'
 | 
						|
  - 'THUDM/chatglm2-6b'
 | 
						|
  - 'meta-llama/Llama-2-7b-chat-hf'
 | 
						|
local_model_hub: 'path to your local model hub'
 | 
						|
warm_up: 1
 | 
						|
num_trials: 3
 | 
						|
in_out_pairs:
 | 
						|
  - '32-32'
 | 
						|
  - '1024-128'
 | 
						|
test_api:
 | 
						|
  - "transformer_int4"
 | 
						|
  - "native_int4"
 | 
						|
  - "optimize_model"
 | 
						|
  - "pytorch_autocast_bf16"
 | 
						|
  # - "transformer_int4_gpu"  # on arc
 | 
						|
  # - "optimize_model_gpu"  # on arc |