add transpose_value_cache for NPU benchmark (#12092)
				
					
				
			* add `transpose_value_cache` * update * update
This commit is contained in:
		
							parent
							
								
									f7fb3c896c
								
							
						
					
					
						commit
						9650bf616a
					
				
					 2 changed files with 12 additions and 7 deletions
				
			
		| 
						 | 
				
			
			@ -41,3 +41,4 @@ streaming: False # whether output in streaming way (only available now for gpu w
 | 
			
		|||
optimize_model: False # whether apply further optimization on NPU (only available now for transformers_int4_npu_win test_api)
 | 
			
		||||
use_fp16_torch_dtype: True # whether use fp16 for non-linear layer (only available now for "pipeline_parallel_gpu" test_api)
 | 
			
		||||
task: 'continuation' # task can be 'continuation', 'QA' and 'summarize'
 | 
			
		||||
transpose_value_cache: True # whether apply transposed v_cache optimization on NPU (only available now for transformers_int4_npu_win test_api)
 | 
			
		||||
| 
						 | 
				
			
			@ -136,7 +136,7 @@ def preprocess_prompt(tokenizer, in_len, task):
 | 
			
		|||
        input_ids = tokenizer.encode(input_str, return_tensors="pt")    
 | 
			
		||||
    return input_ids
 | 
			
		||||
 | 
			
		||||
def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1, num_trials=3, num_beams=1, low_bit='sym_int4', cpu_embedding=False, batch_size=1, streaming=False, use_fp16_torch_dtype=False, lookahead=False, task='continuation', optimize_model=False):
 | 
			
		||||
def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1, num_trials=3, num_beams=1, low_bit='sym_int4', cpu_embedding=False, batch_size=1, streaming=False, use_fp16_torch_dtype=False, lookahead=False, task='continuation', optimize_model=False, transpose_value_cache=True):
 | 
			
		||||
    # TODO: make a parameter
 | 
			
		||||
    result= {}
 | 
			
		||||
    if test_api == 'transformer_int4':
 | 
			
		||||
| 
						 | 
				
			
			@ -188,7 +188,7 @@ def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1,
 | 
			
		|||
    elif test_api == 'pipeline_parallel_gpu':
 | 
			
		||||
        result = run_pipeline_parallel_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, cpu_embedding, fp16=use_fp16_torch_dtype)
 | 
			
		||||
    elif test_api == 'transformers_int4_npu_win':
 | 
			
		||||
        result = transformers_int4_npu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, optimize_model)
 | 
			
		||||
        result = transformers_int4_npu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, optimize_model, transpose_value_cache)
 | 
			
		||||
    else:
 | 
			
		||||
        invalidInputError(False, "Unknown test_api " + test_api + ", please check your config.yaml.")
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -604,7 +604,8 @@ def transformers_int4_npu_win(repo_id,
 | 
			
		|||
                                 num_beams,
 | 
			
		||||
                                 low_bit,
 | 
			
		||||
                                 batch_size,
 | 
			
		||||
                                 optimize_model):
 | 
			
		||||
                                 optimize_model,
 | 
			
		||||
                                 transpose_value_cache):
 | 
			
		||||
    from ipex_llm.transformers.npu_model import AutoModel, AutoModelForCausalLM
 | 
			
		||||
    from transformers import AutoTokenizer, LlamaTokenizer
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -616,17 +617,17 @@ def transformers_int4_npu_win(repo_id,
 | 
			
		|||
    st = time.perf_counter()
 | 
			
		||||
    if repo_id in CHATGLM_IDS:
 | 
			
		||||
        model = AutoModel.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True,
 | 
			
		||||
                                          optimize_model=optimize_model, max_output_len=max_output_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=True,
 | 
			
		||||
                                          optimize_model=optimize_model, max_output_len=max_output_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=transpose_value_cache,
 | 
			
		||||
                                          torch_dtype=torch.float16, attn_implementation="eager").eval()
 | 
			
		||||
        tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
			
		||||
    elif repo_id in LLAMA_IDS:
 | 
			
		||||
        model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, torch_dtype=torch.float16,
 | 
			
		||||
                                                     optimize_model=optimize_model, max_output_len=max_output_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=True,
 | 
			
		||||
                                                     optimize_model=optimize_model, max_output_len=max_output_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=transpose_value_cache,
 | 
			
		||||
                                                     use_cache=True, attn_implementation="eager").eval()
 | 
			
		||||
        tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
			
		||||
    else:
 | 
			
		||||
        model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, torch_dtype=torch.float16,
 | 
			
		||||
                                                     optimize_model=optimize_model, max_output_len=max_output_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=True,
 | 
			
		||||
                                                     optimize_model=optimize_model, max_output_len=max_output_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=transpose_value_cache,
 | 
			
		||||
                                                     use_cache=True, attn_implementation="eager").eval()
 | 
			
		||||
        tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
			
		||||
    end = time.perf_counter()
 | 
			
		||||
| 
						 | 
				
			
			@ -2033,6 +2034,9 @@ if __name__ == '__main__':
 | 
			
		|||
    if 'optimize_model' in conf:
 | 
			
		||||
        optimize_model = conf['optimize_model']
 | 
			
		||||
    lookahead = False
 | 
			
		||||
    transpose_value_cache = True
 | 
			
		||||
    if 'transpose_value_cache' in conf:
 | 
			
		||||
        transpose_value_cache = conf['transpose_value_cache']
 | 
			
		||||
    
 | 
			
		||||
    import pandas as pd
 | 
			
		||||
    for api in conf.test_api:
 | 
			
		||||
| 
						 | 
				
			
			@ -2058,7 +2062,7 @@ if __name__ == '__main__':
 | 
			
		|||
                if task in ['QA', 'summarize'] and conf['num_beams'] == 1 and batch_size == 1:
 | 
			
		||||
                    lookahead = True
 | 
			
		||||
                run_model(model, api, in_out_pairs, conf['local_model_hub'], conf['warm_up'], conf['num_trials'], conf['num_beams'],
 | 
			
		||||
                      conf['low_bit'], conf['cpu_embedding'], batch_size, streaming, use_fp16_torch_dtype, lookahead, task, optimize_model)
 | 
			
		||||
                      conf['low_bit'], conf['cpu_embedding'], batch_size, streaming, use_fp16_torch_dtype, lookahead, task, optimize_model, transpose_value_cache)
 | 
			
		||||
        df = pd.DataFrame(results, columns=['model', '1st token avg latency (ms)', '2+ avg latency (ms/token)', 'encoder time (ms)',
 | 
			
		||||
                                            'input/output tokens', 'batch_size', 'actual input/output tokens', 'num_beams', 'low_bit', 'cpu_embedding',
 | 
			
		||||
                                            'model loading time (s)', 'peak mem (GB)', 'streaming', 'use_fp16_torch_dtype'])
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue