Update benchmark script for NPU (#11932)
This commit is contained in:
		
							parent
							
								
									730d9ec811
								
							
						
					
					
						commit
						7c8c9a0670
					
				
					 3 changed files with 20 additions and 8 deletions
				
			
		| 
						 | 
					@ -11,6 +11,7 @@ low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
 | 
				
			||||||
batch_size: 1 # default to 1
 | 
					batch_size: 1 # default to 1
 | 
				
			||||||
in_out_pairs:
 | 
					in_out_pairs:
 | 
				
			||||||
  - '32-32'
 | 
					  - '32-32'
 | 
				
			||||||
 | 
					  - '960-64'
 | 
				
			||||||
  - '1024-128'
 | 
					  - '1024-128'
 | 
				
			||||||
test_api:
 | 
					test_api:
 | 
				
			||||||
  - "transformer_int4_fp16_gpu"             # on Intel GPU, transformer-like API, (qtype=int4), (dtype=fp16)
 | 
					  - "transformer_int4_fp16_gpu"             # on Intel GPU, transformer-like API, (qtype=int4), (dtype=fp16)
 | 
				
			||||||
| 
						 | 
					@ -37,5 +38,6 @@ test_api:
 | 
				
			||||||
  # - "transformers_int4_npu_win"           # on Intel NPU for Windows,  transformer-like API, (qtype=int4)
 | 
					  # - "transformers_int4_npu_win"           # on Intel NPU for Windows,  transformer-like API, (qtype=int4)
 | 
				
			||||||
cpu_embedding: False # whether put embedding to CPU
 | 
					cpu_embedding: False # whether put embedding to CPU
 | 
				
			||||||
streaming: False # whether output in streaming way (only available now for gpu win related test_api)
 | 
					streaming: False # whether output in streaming way (only available now for gpu win related test_api)
 | 
				
			||||||
 | 
					optimize_model: False # whether apply further optimization on NPU (only available now for transformers_int4_npu_win test_api)
 | 
				
			||||||
use_fp16_torch_dtype: True # whether use fp16 for non-linear layer (only available now for "pipeline_parallel_gpu" test_api)
 | 
					use_fp16_torch_dtype: True # whether use fp16 for non-linear layer (only available now for "pipeline_parallel_gpu" test_api)
 | 
				
			||||||
task: 'continuation' # task can be 'continuation', 'QA' and 'summarize'
 | 
					task: 'continuation' # task can be 'continuation', 'QA' and 'summarize'
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -136,7 +136,7 @@ def preprocess_prompt(tokenizer, in_len, task):
 | 
				
			||||||
        input_ids = tokenizer.encode(input_str, return_tensors="pt")    
 | 
					        input_ids = tokenizer.encode(input_str, return_tensors="pt")    
 | 
				
			||||||
    return input_ids
 | 
					    return input_ids
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1, num_trials=3, num_beams=1, low_bit='sym_int4', cpu_embedding=False, batch_size=1, streaming=False, use_fp16_torch_dtype=False, lookahead=False, task='continuation'):
 | 
					def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1, num_trials=3, num_beams=1, low_bit='sym_int4', cpu_embedding=False, batch_size=1, streaming=False, use_fp16_torch_dtype=False, lookahead=False, task='continuation', optimize_model=False):
 | 
				
			||||||
    # TODO: make a parameter
 | 
					    # TODO: make a parameter
 | 
				
			||||||
    result= {}
 | 
					    result= {}
 | 
				
			||||||
    if test_api == 'transformer_int4':
 | 
					    if test_api == 'transformer_int4':
 | 
				
			||||||
| 
						 | 
					@ -188,7 +188,7 @@ def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1,
 | 
				
			||||||
    elif test_api == 'pipeline_parallel_gpu':
 | 
					    elif test_api == 'pipeline_parallel_gpu':
 | 
				
			||||||
        result = run_pipeline_parallel_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, cpu_embedding, fp16=use_fp16_torch_dtype)
 | 
					        result = run_pipeline_parallel_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, cpu_embedding, fp16=use_fp16_torch_dtype)
 | 
				
			||||||
    elif test_api == 'transformers_int4_npu_win':
 | 
					    elif test_api == 'transformers_int4_npu_win':
 | 
				
			||||||
        result = transformers_int4_npu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size)
 | 
					        result = transformers_int4_npu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, optimize_model)
 | 
				
			||||||
    else:
 | 
					    else:
 | 
				
			||||||
        invalidInputError(False, "Unknown test_api " + test_api + ", please check your config.yaml.")
 | 
					        invalidInputError(False, "Unknown test_api " + test_api + ", please check your config.yaml.")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -603,24 +603,30 @@ def transformers_int4_npu_win(repo_id,
 | 
				
			||||||
                                 num_trials,
 | 
					                                 num_trials,
 | 
				
			||||||
                                 num_beams,
 | 
					                                 num_beams,
 | 
				
			||||||
                                 low_bit,
 | 
					                                 low_bit,
 | 
				
			||||||
                                 batch_size):
 | 
					                                 batch_size,
 | 
				
			||||||
 | 
					                                 optimize_model):
 | 
				
			||||||
    from ipex_llm.transformers.npu_model import AutoModel, AutoModelForCausalLM
 | 
					    from ipex_llm.transformers.npu_model import AutoModel, AutoModelForCausalLM
 | 
				
			||||||
    from transformers import AutoTokenizer, LlamaTokenizer
 | 
					    from transformers import AutoTokenizer, LlamaTokenizer
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    model_path = get_model_path(repo_id, local_model_hub)
 | 
					    model_path = get_model_path(repo_id, local_model_hub)
 | 
				
			||||||
 | 
					    in_out_len = in_out_pairs[0].split("-")
 | 
				
			||||||
 | 
					    max_output_len = max(int(in_out_len[0]) + int(in_out_len[1]), 1024)
 | 
				
			||||||
    # Load model in 4 bit,
 | 
					    # Load model in 4 bit,
 | 
				
			||||||
    # which convert the relevant layers in the model into INT4 format
 | 
					    # which convert the relevant layers in the model into INT4 format
 | 
				
			||||||
    st = time.perf_counter()
 | 
					    st = time.perf_counter()
 | 
				
			||||||
    if repo_id in CHATGLM_IDS:
 | 
					    if repo_id in CHATGLM_IDS:
 | 
				
			||||||
        model = AutoModel.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True,
 | 
					        model = AutoModel.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, torch_dtype=torch.float16,
 | 
				
			||||||
 | 
					                                          optimize_model=optimize_model, max_output_len=max_output_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=True,
 | 
				
			||||||
                                          torch_dtype='auto', attn_implementation="eager").eval()
 | 
					                                          torch_dtype='auto', attn_implementation="eager").eval()
 | 
				
			||||||
        tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
					        tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
				
			||||||
    elif repo_id in LLAMA_IDS:
 | 
					    elif repo_id in LLAMA_IDS:
 | 
				
			||||||
        model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True,
 | 
					        model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, torch_dtype=torch.float16,
 | 
				
			||||||
 | 
					                                                     optimize_model=optimize_model, max_output_len=max_output_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=True,
 | 
				
			||||||
                                                     use_cache=True, attn_implementation="eager").eval()
 | 
					                                                     use_cache=True, attn_implementation="eager").eval()
 | 
				
			||||||
        tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
					        tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
				
			||||||
    else:
 | 
					    else:
 | 
				
			||||||
        model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True,
 | 
					        model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, torch_dtype=torch.float16,
 | 
				
			||||||
 | 
					                                                     optimize_model=optimize_model, max_output_len=max_output_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=True,
 | 
				
			||||||
                                                     use_cache=True, attn_implementation="eager").eval()
 | 
					                                                     use_cache=True, attn_implementation="eager").eval()
 | 
				
			||||||
        tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
					        tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
				
			||||||
    end = time.perf_counter()
 | 
					    end = time.perf_counter()
 | 
				
			||||||
| 
						 | 
					@ -643,6 +649,7 @@ def transformers_int4_npu_win(repo_id,
 | 
				
			||||||
            true_str = tokenizer.batch_decode(input_ids)[0]
 | 
					            true_str = tokenizer.batch_decode(input_ids)[0]
 | 
				
			||||||
            input_list = [true_str] * batch_size
 | 
					            input_list = [true_str] * batch_size
 | 
				
			||||||
            input_ids = tokenizer(input_list, return_tensors="pt").input_ids
 | 
					            input_ids = tokenizer(input_list, return_tensors="pt").input_ids
 | 
				
			||||||
 | 
					            input_ids = input_ids[:, :in_len]
 | 
				
			||||||
            actual_in_len = input_ids.shape[1]
 | 
					            actual_in_len = input_ids.shape[1]
 | 
				
			||||||
            result[in_out] = []
 | 
					            result[in_out] = []
 | 
				
			||||||
            for i in range(num_trials + warm_up):
 | 
					            for i in range(num_trials + warm_up):
 | 
				
			||||||
| 
						 | 
					@ -2016,12 +2023,15 @@ if __name__ == '__main__':
 | 
				
			||||||
    streaming = False
 | 
					    streaming = False
 | 
				
			||||||
    use_fp16_torch_dtype = False
 | 
					    use_fp16_torch_dtype = False
 | 
				
			||||||
    task = 'continuation'
 | 
					    task = 'continuation'
 | 
				
			||||||
 | 
					    optimize_model = False # only for transformers_int4_npu_win
 | 
				
			||||||
    if 'streaming' in conf:
 | 
					    if 'streaming' in conf:
 | 
				
			||||||
        streaming = conf['streaming']
 | 
					        streaming = conf['streaming']
 | 
				
			||||||
    if 'use_fp16_torch_dtype' in conf:
 | 
					    if 'use_fp16_torch_dtype' in conf:
 | 
				
			||||||
        use_fp16_torch_dtype = conf['use_fp16_torch_dtype']
 | 
					        use_fp16_torch_dtype = conf['use_fp16_torch_dtype']
 | 
				
			||||||
    if 'task' in conf:
 | 
					    if 'task' in conf:
 | 
				
			||||||
        task = conf['task']
 | 
					        task = conf['task']
 | 
				
			||||||
 | 
					    if 'optimize_model' in conf:
 | 
				
			||||||
 | 
					        optimize_model = conf['optimize_model']
 | 
				
			||||||
    lookahead = False
 | 
					    lookahead = False
 | 
				
			||||||
    
 | 
					    
 | 
				
			||||||
    import pandas as pd
 | 
					    import pandas as pd
 | 
				
			||||||
| 
						 | 
					@ -2048,7 +2058,7 @@ if __name__ == '__main__':
 | 
				
			||||||
                if task in ['QA', 'summarize'] and conf['num_beams'] == 1 and batch_size == 1:
 | 
					                if task in ['QA', 'summarize'] and conf['num_beams'] == 1 and batch_size == 1:
 | 
				
			||||||
                    lookahead = True
 | 
					                    lookahead = True
 | 
				
			||||||
                run_model(model, api, in_out_pairs, conf['local_model_hub'], conf['warm_up'], conf['num_trials'], conf['num_beams'],
 | 
					                run_model(model, api, in_out_pairs, conf['local_model_hub'], conf['warm_up'], conf['num_trials'], conf['num_beams'],
 | 
				
			||||||
                      conf['low_bit'], conf['cpu_embedding'], batch_size, streaming, use_fp16_torch_dtype, lookahead, task)
 | 
					                      conf['low_bit'], conf['cpu_embedding'], batch_size, streaming, use_fp16_torch_dtype, lookahead, task, optimize_model)
 | 
				
			||||||
        df = pd.DataFrame(results, columns=['model', '1st token avg latency (ms)', '2+ avg latency (ms/token)', 'encoder time (ms)',
 | 
					        df = pd.DataFrame(results, columns=['model', '1st token avg latency (ms)', '2+ avg latency (ms/token)', 'encoder time (ms)',
 | 
				
			||||||
                                            'input/output tokens', 'batch_size', 'actual input/output tokens', 'num_beams', 'low_bit', 'cpu_embedding',
 | 
					                                            'input/output tokens', 'batch_size', 'actual input/output tokens', 'num_beams', 'low_bit', 'cpu_embedding',
 | 
				
			||||||
                                            'model loading time (s)', 'peak mem (GB)', 'streaming', 'use_fp16_torch_dtype'])
 | 
					                                            'model loading time (s)', 'peak mem (GB)', 'streaming', 'use_fp16_torch_dtype'])
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -117,7 +117,7 @@ class _BaseAutoModelClass:
 | 
				
			||||||
        ignore_argument(kwargs, "pipeline_parallel_stages")
 | 
					        ignore_argument(kwargs, "pipeline_parallel_stages")
 | 
				
			||||||
        optimize_model = kwargs.pop("optimize_model", False)
 | 
					        optimize_model = kwargs.pop("optimize_model", False)
 | 
				
			||||||
        max_output_len = kwargs.pop("max_output_len", 1024)
 | 
					        max_output_len = kwargs.pop("max_output_len", 1024)
 | 
				
			||||||
        max_prompt_len = kwargs.pop("max_prompt_len", max_output_len)
 | 
					        max_prompt_len = kwargs.pop("max_prompt_len", 512)
 | 
				
			||||||
        inter_pp = kwargs.pop("inter_pp", None)
 | 
					        inter_pp = kwargs.pop("inter_pp", None)
 | 
				
			||||||
        intra_pp = kwargs.pop("intra_pp", None)
 | 
					        intra_pp = kwargs.pop("intra_pp", None)
 | 
				
			||||||
        transpose_value_cache = kwargs.pop("transpose_value_cache", True)
 | 
					        transpose_value_cache = kwargs.pop("transpose_value_cache", True)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue