fix three NPU benchmark issues (#12350)

* fix three issues

* limit mixed_precision for CW only
This commit is contained in:
Ruonan Wang 2024-11-06 19:01:01 +08:00 committed by GitHub
parent f24352aef9
commit c267355b35
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 15 additions and 10 deletions

View file

@ -51,6 +51,8 @@ PHI3VISION_IDS = ['microsoft/phi-3-vision-128k-instruct']
QWENVL_IDS = ['Qwen/Qwen-VL-Chat']
MINICPM_IDS = ['openbmb/MiniCPM-1B-sft-bf16 ', 'openbmb/MiniCPM-2B-sft-bf16']
MINICPM_V_IDS = ['openbmb/MiniCPM-V-2_6', 'openbmb/MiniCPM-Llama3-V-2_5']
DUMMY_IDS = ['dummy/dummy-1.5B', 'dummy/dummy-4B']
@ -662,6 +664,7 @@ def transformers_int4_npu_win(repo_id,
# slice the input_ids to ensure the prompt length is required length.
input_ids = tokenizer.encode(input_str, return_tensors="pt")
input_ids = input_ids[:, :in_len]
if repo_id not in MINICPM_IDS:
true_str = tokenizer.batch_decode(input_ids)[0]
input_list = [true_str] * batch_size
input_ids = tokenizer(input_list, return_tensors="pt").input_ids
@ -701,6 +704,7 @@ def transformers_int4_npu_pipeline_win(repo_id,
model_path = get_model_path(repo_id, local_model_hub)
in_out_len = in_out_pairs[0].split("-")
max_context_len = max(int(in_out_len[0]) + int(in_out_len[1]), 1024)
mixed_precision = True if npu_group_size == 0 else False
# Load model in 4 bit,
# which convert the relevant layers in the model into INT4 format
st = time.perf_counter()
@ -708,7 +712,7 @@ def transformers_int4_npu_pipeline_win(repo_id,
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, pipeline=True, torch_dtype=torch.float16,
optimize_model=optimize_model, max_context_len=max_context_len, max_prompt_len=int(in_out_len[0]),
quantization_group_size=npu_group_size, transpose_value_cache=transpose_value_cache,
use_cache=True, attn_implementation="eager").eval()
use_cache=True, attn_implementation="eager", mixed_precision=mixed_precision).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
end = time.perf_counter()
@ -726,6 +730,7 @@ def transformers_int4_npu_pipeline_win(repo_id,
# slice the input_ids to ensure the prompt length is required length.
input_ids = tokenizer.encode(input_str, return_tensors="pt")
input_ids = input_ids[:, :in_len]
if repo_id not in MINICPM_IDS:
true_str = tokenizer.batch_decode(input_ids)[0]
input_list = [true_str] * batch_size
input_ids = tokenizer(input_list, return_tensors="pt").input_ids

View file

@ -118,7 +118,7 @@ def generate(
self.head_dim, self.num_layers,
self.vocab_size,
self.transpose_value_cache,
new_tokens - 1))
new_tokens))
thread.start()
in_pipe_path = "\\\\.\\pipe\\llminputpipe"