LLM: Change benchmark bf16 load format. (#9035)
* LLM: Change benchmark bf16 load format. * comment on bf16 chatglm. * fix.
This commit is contained in:
		
							parent
							
								
									023555fb1f
								
							
						
					
					
						commit
						26213a5829
					
				
					 1 changed files with 3 additions and 5 deletions
				
			
		| 
						 | 
				
			
			@ -176,18 +176,16 @@ def run_pytorch_autocast_bf16(repo_id,
 | 
			
		|||
    st = time.perf_counter()
 | 
			
		||||
    if repo_id in ['THUDM/chatglm-6b', 'THUDM/chatglm2-6b']:
 | 
			
		||||
        # TODO: need verify chatglm family run bf16.
 | 
			
		||||
        model = AutoModel.from_pretrained(model_path, trust_remote_code=True, torch_dtype='auto').float()
 | 
			
		||||
        #model = AutoModel.from_pretrained(model_path, trust_remote_code=True, torch_dtype='auto').bfloat()
 | 
			
		||||
        tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
			
		||||
        invalidInputError(False, "Currently pytorch do not support bfloat16 on cpu for chatglm models.")
 | 
			
		||||
    elif repo_id in ['meta-llama/Llama-2-7b-chat-hf','meta-llama/Llama-2-13b-chat-hf',
 | 
			
		||||
                     'meta-llama/Llama-2-70b-chat-hf','decapoda-research/llama-7b-hf',
 | 
			
		||||
                     'decapoda-research/llama-65b-hf','lmsys/vicuna-7b-v1.5',
 | 
			
		||||
                     'lmsys/vicuna-13b-v1.3','project-baize/merged-baize-30b']:
 | 
			
		||||
        model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True, torch_dtype='auto')
 | 
			
		||||
        model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True, torch_dtype=torch.bfloat16)
 | 
			
		||||
        # Need to use LlamaTokenizer, reason please refer to issue: https://github.com/intel-analytics/BigDL/issues/8944
 | 
			
		||||
        tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
			
		||||
    else:
 | 
			
		||||
        model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True, torch_dtype='auto')
 | 
			
		||||
        model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True, torch_dtype=torch.bfloat16)
 | 
			
		||||
        tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
			
		||||
    end = time.perf_counter()
 | 
			
		||||
    print(">> loading of model costs {}s".format(end - st))
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue