From 7c8c9a067021b265867bd321c96bb484bb2093b4 Mon Sep 17 00:00:00 2001 From: binbin Deng <108676127+plusbang@users.noreply.github.com> Date: Tue, 27 Aug 2024 14:41:14 +0800 Subject: [PATCH] Update benchmark script for NPU (#11932) --- .../llm/dev/benchmark/all-in-one/config.yaml | 2 ++ python/llm/dev/benchmark/all-in-one/run.py | 24 +++++++++++++------ .../src/ipex_llm/transformers/npu_model.py | 2 +- 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/python/llm/dev/benchmark/all-in-one/config.yaml b/python/llm/dev/benchmark/all-in-one/config.yaml index db302e73..e94d001e 100644 --- a/python/llm/dev/benchmark/all-in-one/config.yaml +++ b/python/llm/dev/benchmark/all-in-one/config.yaml @@ -11,6 +11,7 @@ low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) batch_size: 1 # default to 1 in_out_pairs: - '32-32' + - '960-64' - '1024-128' test_api: - "transformer_int4_fp16_gpu" # on Intel GPU, transformer-like API, (qtype=int4), (dtype=fp16) @@ -37,5 +38,6 @@ test_api: # - "transformers_int4_npu_win" # on Intel NPU for Windows, transformer-like API, (qtype=int4) cpu_embedding: False # whether put embedding to CPU streaming: False # whether output in streaming way (only available now for gpu win related test_api) +optimize_model: False # whether apply further optimization on NPU (only available now for transformers_int4_npu_win test_api) use_fp16_torch_dtype: True # whether use fp16 for non-linear layer (only available now for "pipeline_parallel_gpu" test_api) task: 'continuation' # task can be 'continuation', 'QA' and 'summarize' diff --git a/python/llm/dev/benchmark/all-in-one/run.py b/python/llm/dev/benchmark/all-in-one/run.py index 5715908b..3dcb4011 100644 --- a/python/llm/dev/benchmark/all-in-one/run.py +++ b/python/llm/dev/benchmark/all-in-one/run.py @@ -136,7 +136,7 @@ def preprocess_prompt(tokenizer, in_len, task): input_ids = tokenizer.encode(input_str, return_tensors="pt") return input_ids -def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1, num_trials=3, num_beams=1, low_bit='sym_int4', cpu_embedding=False, batch_size=1, streaming=False, use_fp16_torch_dtype=False, lookahead=False, task='continuation'): +def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1, num_trials=3, num_beams=1, low_bit='sym_int4', cpu_embedding=False, batch_size=1, streaming=False, use_fp16_torch_dtype=False, lookahead=False, task='continuation', optimize_model=False): # TODO: make a parameter result= {} if test_api == 'transformer_int4': @@ -188,7 +188,7 @@ def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1, elif test_api == 'pipeline_parallel_gpu': result = run_pipeline_parallel_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, cpu_embedding, fp16=use_fp16_torch_dtype) elif test_api == 'transformers_int4_npu_win': - result = transformers_int4_npu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size) + result = transformers_int4_npu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, optimize_model) else: invalidInputError(False, "Unknown test_api " + test_api + ", please check your config.yaml.") @@ -603,24 +603,30 @@ def transformers_int4_npu_win(repo_id, num_trials, num_beams, low_bit, - batch_size): + batch_size, + optimize_model): from ipex_llm.transformers.npu_model import AutoModel, AutoModelForCausalLM from transformers import AutoTokenizer, LlamaTokenizer model_path = get_model_path(repo_id, local_model_hub) + in_out_len = in_out_pairs[0].split("-") + max_output_len = max(int(in_out_len[0]) + int(in_out_len[1]), 1024) # Load model in 4 bit, # which convert the relevant layers in the model into INT4 format st = time.perf_counter() if repo_id in CHATGLM_IDS: - model = AutoModel.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, + model = AutoModel.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, torch_dtype=torch.float16, + optimize_model=optimize_model, max_output_len=max_output_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=True, torch_dtype='auto', attn_implementation="eager").eval() tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) elif repo_id in LLAMA_IDS: - model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, + model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, torch_dtype=torch.float16, + optimize_model=optimize_model, max_output_len=max_output_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=True, use_cache=True, attn_implementation="eager").eval() tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True) else: - model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, + model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, torch_dtype=torch.float16, + optimize_model=optimize_model, max_output_len=max_output_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=True, use_cache=True, attn_implementation="eager").eval() tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) end = time.perf_counter() @@ -643,6 +649,7 @@ def transformers_int4_npu_win(repo_id, true_str = tokenizer.batch_decode(input_ids)[0] input_list = [true_str] * batch_size input_ids = tokenizer(input_list, return_tensors="pt").input_ids + input_ids = input_ids[:, :in_len] actual_in_len = input_ids.shape[1] result[in_out] = [] for i in range(num_trials + warm_up): @@ -2016,12 +2023,15 @@ if __name__ == '__main__': streaming = False use_fp16_torch_dtype = False task = 'continuation' + optimize_model = False # only for transformers_int4_npu_win if 'streaming' in conf: streaming = conf['streaming'] if 'use_fp16_torch_dtype' in conf: use_fp16_torch_dtype = conf['use_fp16_torch_dtype'] if 'task' in conf: task = conf['task'] + if 'optimize_model' in conf: + optimize_model = conf['optimize_model'] lookahead = False import pandas as pd @@ -2048,7 +2058,7 @@ if __name__ == '__main__': if task in ['QA', 'summarize'] and conf['num_beams'] == 1 and batch_size == 1: lookahead = True run_model(model, api, in_out_pairs, conf['local_model_hub'], conf['warm_up'], conf['num_trials'], conf['num_beams'], - conf['low_bit'], conf['cpu_embedding'], batch_size, streaming, use_fp16_torch_dtype, lookahead, task) + conf['low_bit'], conf['cpu_embedding'], batch_size, streaming, use_fp16_torch_dtype, lookahead, task, optimize_model) df = pd.DataFrame(results, columns=['model', '1st token avg latency (ms)', '2+ avg latency (ms/token)', 'encoder time (ms)', 'input/output tokens', 'batch_size', 'actual input/output tokens', 'num_beams', 'low_bit', 'cpu_embedding', 'model loading time (s)', 'peak mem (GB)', 'streaming', 'use_fp16_torch_dtype']) diff --git a/python/llm/src/ipex_llm/transformers/npu_model.py b/python/llm/src/ipex_llm/transformers/npu_model.py index 49bcfde1..ecfa0f69 100644 --- a/python/llm/src/ipex_llm/transformers/npu_model.py +++ b/python/llm/src/ipex_llm/transformers/npu_model.py @@ -117,7 +117,7 @@ class _BaseAutoModelClass: ignore_argument(kwargs, "pipeline_parallel_stages") optimize_model = kwargs.pop("optimize_model", False) max_output_len = kwargs.pop("max_output_len", 1024) - max_prompt_len = kwargs.pop("max_prompt_len", max_output_len) + max_prompt_len = kwargs.pop("max_prompt_len", 512) inter_pp = kwargs.pop("inter_pp", None) intra_pp = kwargs.pop("intra_pp", None) transpose_value_cache = kwargs.pop("transpose_value_cache", True)