Add lookahead in all-in-one (#11142)
* add lookahead in allinone * delete save to csv in run_transformer_int4_gpu * change lookup to lookahead * fix the error of add model.peak_memory * Set transformer_int4_gpu as the default option * add comment of transformer_int4_fp16_lookahead_gpu
This commit is contained in:
parent
83bd9cb681
commit
62b2d8af6b
2 changed files with 36 additions and 12 deletions
|
|
@ -33,7 +33,10 @@ test_api:
|
||||||
# - "bigdl_ipex_int8" # on Intel CPU, (qtype=int8)
|
# - "bigdl_ipex_int8" # on Intel CPU, (qtype=int8)
|
||||||
# - "speculative_cpu" # on Intel CPU, inference with self-speculative decoding
|
# - "speculative_cpu" # on Intel CPU, inference with self-speculative decoding
|
||||||
# - "deepspeed_transformer_int4_cpu" # on Intel CPU, deepspeed autotp inference
|
# - "deepspeed_transformer_int4_cpu" # on Intel CPU, deepspeed autotp inference
|
||||||
|
# - "transformer_int4_fp16_lookahead_gpu" # on Intel GPU, transformer-like API, with lookahead, (qtype=int4), (dtype=fp16)
|
||||||
cpu_embedding: False # whether put embedding to CPU
|
cpu_embedding: False # whether put embedding to CPU
|
||||||
streaming: False # whether output in streaming way (only avaiable now for gpu win related test_api)
|
streaming: False # whether output in streaming way (only avaiable now for gpu win related test_api)
|
||||||
use_fp16_torch_dtype: True # whether use fp16 for non-linear layer (only avaiable now for "pipeline_parallel_gpu" test_api)
|
use_fp16_torch_dtype: True # whether use fp16 for non-linear layer (only avaiable now for "pipeline_parallel_gpu" test_api)
|
||||||
n_gpu: 2 # number of GPUs to use (only avaiable now for "pipeline_parallel_gpu" test_api)
|
n_gpu: 2 # number of GPUs to use (only avaiable now for "pipeline_parallel_gpu" test_api)
|
||||||
|
lookahead: 3
|
||||||
|
max_matching_ngram_size: 2
|
||||||
|
|
|
||||||
|
|
@ -45,9 +45,13 @@ LLAVA_IDS = ['liuhaotian/llava-v1.5-7b']
|
||||||
results = []
|
results = []
|
||||||
excludes = []
|
excludes = []
|
||||||
|
|
||||||
def run_model_in_thread(model, in_out, tokenizer, result, warm_up, num_beams, input_ids, out_len, actual_in_len, num_trials, load_time):
|
def run_model_in_thread(model, in_out, tokenizer, result, warm_up, num_beams, input_ids, out_len, actual_in_len, num_trials, load_time, lookahead):
|
||||||
for i in range(num_trials + warm_up):
|
for i in range(num_trials + warm_up):
|
||||||
st = time.perf_counter()
|
st = time.perf_counter()
|
||||||
|
if lookahead:
|
||||||
|
output_ids = model.generate(input_ids, lookahead=conf.lookahead, do_sample=False, max_matching_ngram_size=conf.max_matching_ngram_size, max_new_tokens=out_len,
|
||||||
|
min_new_tokens=out_len, num_beams=num_beams)
|
||||||
|
else:
|
||||||
output_ids = model.generate(input_ids, do_sample=False, max_new_tokens=out_len,
|
output_ids = model.generate(input_ids, do_sample=False, max_new_tokens=out_len,
|
||||||
min_new_tokens=out_len, num_beams=num_beams)
|
min_new_tokens=out_len, num_beams=num_beams)
|
||||||
torch.xpu.synchronize()
|
torch.xpu.synchronize()
|
||||||
|
|
@ -59,6 +63,10 @@ def run_model_in_thread(model, in_out, tokenizer, result, warm_up, num_beams, in
|
||||||
torch.xpu.empty_cache()
|
torch.xpu.empty_cache()
|
||||||
actual_out_len = output_ids.shape[1] - actual_in_len
|
actual_out_len = output_ids.shape[1] - actual_in_len
|
||||||
if i >= warm_up:
|
if i >= warm_up:
|
||||||
|
if lookahead:
|
||||||
|
result[in_out].append([model.first_token_time, (end - st - model.first_token_time)/model.n_token_generated, 0,
|
||||||
|
actual_in_len, actual_out_len, load_time, 0])
|
||||||
|
else:
|
||||||
result[in_out].append([model.first_cost, model.rest_cost_mean, model.encoder_time,
|
result[in_out].append([model.first_cost, model.rest_cost_mean, model.encoder_time,
|
||||||
actual_in_len, actual_out_len, load_time, model.peak_memory])
|
actual_in_len, actual_out_len, load_time, model.peak_memory])
|
||||||
|
|
||||||
|
|
@ -109,6 +117,8 @@ def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1,
|
||||||
result = run_speculative_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, batch_size)
|
result = run_speculative_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, batch_size)
|
||||||
elif test_api == 'pipeline_parallel_gpu':
|
elif test_api == 'pipeline_parallel_gpu':
|
||||||
result = run_pipeline_parallel_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, cpu_embedding, fp16=use_fp16_torch_dtype, n_gpu=n_gpu)
|
result = run_pipeline_parallel_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, cpu_embedding, fp16=use_fp16_torch_dtype, n_gpu=n_gpu)
|
||||||
|
elif test_api == "transformer_int4_fp16_lookahead_gpu":
|
||||||
|
result = run_transformer_int4_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, cpu_embedding, fp16=True, lookahead=True)
|
||||||
else:
|
else:
|
||||||
invalidInputError(False, "Unknown test_api " + test_api + ", please check your config.yaml.")
|
invalidInputError(False, "Unknown test_api " + test_api + ", please check your config.yaml.")
|
||||||
|
|
||||||
|
|
@ -117,7 +127,7 @@ def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1,
|
||||||
results.append([repo_id,
|
results.append([repo_id,
|
||||||
round(np.mean(result[in_out_pair], axis=0)[0]*1000.0, 2),
|
round(np.mean(result[in_out_pair], axis=0)[0]*1000.0, 2),
|
||||||
round(np.mean(result[in_out_pair], axis=0)[1]*1000.0, 2),
|
round(np.mean(result[in_out_pair], axis=0)[1]*1000.0, 2),
|
||||||
round(np.mean(result[in_out_pair], axis=0)[2]*1000.0, 2),
|
round(np.mean(result[in_out_pair], axis=0)[2]*1000.0, 2) if 'lookahead' not in test_api else 'N/A',
|
||||||
in_out_pair,
|
in_out_pair,
|
||||||
batch_size,
|
batch_size,
|
||||||
f'{int(np.mean(result[in_out_pair], axis=0)[3])}' +
|
f'{int(np.mean(result[in_out_pair], axis=0)[3])}' +
|
||||||
|
|
@ -396,7 +406,8 @@ def run_transformer_int4_gpu(repo_id,
|
||||||
low_bit,
|
low_bit,
|
||||||
batch_size,
|
batch_size,
|
||||||
cpu_embedding,
|
cpu_embedding,
|
||||||
fp16=False):
|
fp16=False,
|
||||||
|
lookahead=False):
|
||||||
from ipex_llm.transformers import AutoModel, AutoModelForCausalLM
|
from ipex_llm.transformers import AutoModel, AutoModelForCausalLM
|
||||||
from transformers import AutoTokenizer, GPTJForCausalLM, LlamaTokenizer
|
from transformers import AutoTokenizer, GPTJForCausalLM, LlamaTokenizer
|
||||||
import intel_extension_for_pytorch as ipex
|
import intel_extension_for_pytorch as ipex
|
||||||
|
|
@ -443,6 +454,7 @@ def run_transformer_int4_gpu(repo_id,
|
||||||
load_time = end - st
|
load_time = end - st
|
||||||
print(">> loading of model costs {}s and {}GB".format(load_time, torch.xpu.memory.memory_reserved()/(1024**3)))
|
print(">> loading of model costs {}s and {}GB".format(load_time, torch.xpu.memory.memory_reserved()/(1024**3)))
|
||||||
|
|
||||||
|
if not lookahead:
|
||||||
model = BenchmarkWrapper(model)
|
model = BenchmarkWrapper(model)
|
||||||
|
|
||||||
result = {}
|
result = {}
|
||||||
|
|
@ -460,6 +472,15 @@ def run_transformer_int4_gpu(repo_id,
|
||||||
# For the sequence length not in [32, 256, 1024, 2048, 8192], it will be truncated from 8192.txt.
|
# For the sequence length not in [32, 256, 1024, 2048, 8192], it will be truncated from 8192.txt.
|
||||||
test_length = min(test_length, 8192)
|
test_length = min(test_length, 8192)
|
||||||
input_str = open(f"prompt/{test_length}.txt", 'r').read()
|
input_str = open(f"prompt/{test_length}.txt", 'r').read()
|
||||||
|
if lookahead:
|
||||||
|
question = "Can you please summarize this article?"
|
||||||
|
question_tokens = tokenizer.encode(question, return_tensors="pt")
|
||||||
|
max_article_len = in_len - question_tokens.size(1)
|
||||||
|
article_ids = tokenizer.encode(input_str, return_tensors="pt")
|
||||||
|
if article_ids.size(1) > max_article_len:
|
||||||
|
article_ids = article_ids[:, :max_article_len]
|
||||||
|
input_ids = torch.cat((article_ids, question_tokens), dim=1)
|
||||||
|
else:
|
||||||
# As different tokenizer has different encodings,
|
# As different tokenizer has different encodings,
|
||||||
# slice the input_ids to ensure the prompt length is required length.
|
# slice the input_ids to ensure the prompt length is required length.
|
||||||
input_ids = tokenizer.encode(input_str, return_tensors="pt")
|
input_ids = tokenizer.encode(input_str, return_tensors="pt")
|
||||||
|
|
@ -469,7 +490,7 @@ def run_transformer_int4_gpu(repo_id,
|
||||||
input_ids = tokenizer(input_list, return_tensors="pt").input_ids.to('xpu')
|
input_ids = tokenizer(input_list, return_tensors="pt").input_ids.to('xpu')
|
||||||
actual_in_len = input_ids.shape[1]
|
actual_in_len = input_ids.shape[1]
|
||||||
result[in_out] = []
|
result[in_out] = []
|
||||||
thread = threading.Thread(target=run_model_in_thread, args=(model, in_out, tokenizer, result, warm_up, num_beams, input_ids, out_len, actual_in_len, num_trials, load_time))
|
thread = threading.Thread(target=run_model_in_thread, args=(model, in_out, tokenizer, result, warm_up, num_beams, input_ids, out_len, actual_in_len, num_trials, load_time, lookahead))
|
||||||
thread.start()
|
thread.start()
|
||||||
thread.join()
|
thread.join()
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue