add config and default value (#11344)
* add config and default value * add config in taml * remove lookahead and max_matching_ngram_size in config * remove streaming and use_fp16_torch_dtype in test yaml * update task in readme * update commit of task
This commit is contained in:
parent
1f39bb84c7
commit
44f22cba70
8 changed files with 22 additions and 18 deletions
|
|
@ -41,6 +41,7 @@ test_api:
|
||||||
- "transformer_int4_gpu" # on Intel GPU, transformer-like API, (qtype=int4)
|
- "transformer_int4_gpu" # on Intel GPU, transformer-like API, (qtype=int4)
|
||||||
cpu_embedding: False # whether put embedding to CPU
|
cpu_embedding: False # whether put embedding to CPU
|
||||||
streaming: False # whether output in streaming way (only avaiable now for gpu win related test_api)
|
streaming: False # whether output in streaming way (only avaiable now for gpu win related test_api)
|
||||||
|
task: 'continuation' # task can be 'continuation', 'QA' and 'summarize'
|
||||||
```
|
```
|
||||||
|
|
||||||
Some parameters in the yaml file that you can configure:
|
Some parameters in the yaml file that you can configure:
|
||||||
|
|
@ -61,6 +62,7 @@ Some parameters in the yaml file that you can configure:
|
||||||
- `streaming`: Whether to output in a streaming way (only available for GPU Windows-related test_api).
|
- `streaming`: Whether to output in a streaming way (only available for GPU Windows-related test_api).
|
||||||
- `use_fp16_torch_dtype`: Whether to use fp16 for the non-linear layer (only available for "pipeline_parallel_gpu" test_api).
|
- `use_fp16_torch_dtype`: Whether to use fp16 for the non-linear layer (only available for "pipeline_parallel_gpu" test_api).
|
||||||
- `n_gpu`: Number of GPUs to use (only available for "pipeline_parallel_gpu" test_api).
|
- `n_gpu`: Number of GPUs to use (only available for "pipeline_parallel_gpu" test_api).
|
||||||
|
- `task`: There are three tasks: `continuation`, `QA` and `summarize`. `continuation` refers to writing additional content based on prompt. `QA` refers to answering questions based on prompt. `summarize` refers to summarizing the prompt.
|
||||||
|
|
||||||
|
|
||||||
```eval_rst
|
```eval_rst
|
||||||
|
|
|
||||||
|
|
@ -56,13 +56,10 @@ test_api:
|
||||||
# - "bigdl_ipex_int8" # on Intel CPU, (qtype=int8)
|
# - "bigdl_ipex_int8" # on Intel CPU, (qtype=int8)
|
||||||
# - "speculative_cpu" # on Intel CPU, inference with self-speculative decoding
|
# - "speculative_cpu" # on Intel CPU, inference with self-speculative decoding
|
||||||
# - "deepspeed_transformer_int4_cpu" # on Intel CPU, deepspeed autotp inference
|
# - "deepspeed_transformer_int4_cpu" # on Intel CPU, deepspeed autotp inference
|
||||||
# - "transformer_int4_fp16_lookahead_gpu" # on Intel GPU, transformer-like API, with lookahead, (qtype=int4), (dtype=fp16)
|
|
||||||
cpu_embedding: False # whether put embedding to CPU
|
cpu_embedding: False # whether put embedding to CPU
|
||||||
streaming: False # whether output in streaming way (only available now for gpu win related test_api)
|
streaming: False # whether output in streaming way (only available now for gpu win related test_api)
|
||||||
use_fp16_torch_dtype: True # whether use fp16 for non-linear layer (only available now for "pipeline_parallel_gpu" test_api)
|
use_fp16_torch_dtype: True # whether use fp16 for non-linear layer (only available now for "pipeline_parallel_gpu" test_api)
|
||||||
lookahead: 3
|
task: 'continuation' # task can be 'continuation', 'QA' and 'summarize'
|
||||||
max_matching_ngram_size: 2
|
|
||||||
task: 'continuation' # when test_api is "transformer_int4_fp16_lookahead_gpu", task could be 'QA', 'continuation' or 'summarize'
|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -35,6 +35,4 @@ test_api:
|
||||||
cpu_embedding: False # whether put embedding to CPU
|
cpu_embedding: False # whether put embedding to CPU
|
||||||
streaming: False # whether output in streaming way (only available now for gpu win related test_api)
|
streaming: False # whether output in streaming way (only available now for gpu win related test_api)
|
||||||
use_fp16_torch_dtype: True # whether use fp16 for non-linear layer (only available now for "pipeline_parallel_gpu" test_api)
|
use_fp16_torch_dtype: True # whether use fp16 for non-linear layer (only available now for "pipeline_parallel_gpu" test_api)
|
||||||
lookahead: 3
|
task: 'continuation' # task can be 'continuation', 'QA' and 'summarize'
|
||||||
max_matching_ngram_size: 2
|
|
||||||
task: 'continuation' # when task is 'continuation', the result is without lookahead. When task is 'QA' or 'summarize', the result is with lookahead
|
|
||||||
|
|
|
||||||
|
|
@ -49,7 +49,7 @@ def run_model_in_thread(model, in_out, tokenizer, result, warm_up, num_beams, in
|
||||||
for i in range(num_trials + warm_up):
|
for i in range(num_trials + warm_up):
|
||||||
st = time.perf_counter()
|
st = time.perf_counter()
|
||||||
if lookahead:
|
if lookahead:
|
||||||
output_ids = model.generate(input_ids, lookahead=conf.lookahead, do_sample=False, max_matching_ngram_size=conf.max_matching_ngram_size, max_new_tokens=out_len,
|
output_ids = model.generate(input_ids, lookahead=3, do_sample=False, max_matching_ngram_size=2, max_new_tokens=out_len,
|
||||||
min_new_tokens=out_len, num_beams=num_beams)
|
min_new_tokens=out_len, num_beams=num_beams)
|
||||||
else:
|
else:
|
||||||
output_ids = model.generate(input_ids, do_sample=False, max_new_tokens=out_len,
|
output_ids = model.generate(input_ids, do_sample=False, max_new_tokens=out_len,
|
||||||
|
|
@ -106,7 +106,7 @@ def preprocess_prompt(tokenizer, in_len, task):
|
||||||
input_ids = tokenizer.encode(input_str, return_tensors="pt")
|
input_ids = tokenizer.encode(input_str, return_tensors="pt")
|
||||||
return input_ids
|
return input_ids
|
||||||
|
|
||||||
def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1, num_trials=3, num_beams=1, low_bit='sym_int4', cpu_embedding=False, batch_size=1, streaming=False, use_fp16_torch_dtype=False, lookahead=False):
|
def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1, num_trials=3, num_beams=1, low_bit='sym_int4', cpu_embedding=False, batch_size=1, streaming=False, use_fp16_torch_dtype=False, lookahead=False, task='continuation'):
|
||||||
# TODO: make a parameter
|
# TODO: make a parameter
|
||||||
result= {}
|
result= {}
|
||||||
if test_api == 'transformer_int4':
|
if test_api == 'transformer_int4':
|
||||||
|
|
@ -118,7 +118,7 @@ def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1,
|
||||||
elif test_api == 'transformer_int4_gpu':
|
elif test_api == 'transformer_int4_gpu':
|
||||||
result = run_transformer_int4_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, cpu_embedding)
|
result = run_transformer_int4_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, cpu_embedding)
|
||||||
elif test_api == 'transformer_int4_fp16_gpu':
|
elif test_api == 'transformer_int4_fp16_gpu':
|
||||||
result = run_transformer_int4_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, cpu_embedding, fp16=True, lookahead=lookahead)
|
result = run_transformer_int4_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, cpu_embedding, fp16=True, lookahead=lookahead, task=task)
|
||||||
elif test_api == 'optimize_model_gpu':
|
elif test_api == 'optimize_model_gpu':
|
||||||
result = run_optimize_model_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size)
|
result = run_optimize_model_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size)
|
||||||
elif test_api == 'pytorch_autocast_bf16':
|
elif test_api == 'pytorch_autocast_bf16':
|
||||||
|
|
@ -441,7 +441,8 @@ def run_transformer_int4_gpu(repo_id,
|
||||||
batch_size,
|
batch_size,
|
||||||
cpu_embedding,
|
cpu_embedding,
|
||||||
fp16=False,
|
fp16=False,
|
||||||
lookahead=False):
|
lookahead=False,
|
||||||
|
task='continuation'):
|
||||||
from ipex_llm.transformers import AutoModel, AutoModelForCausalLM
|
from ipex_llm.transformers import AutoModel, AutoModelForCausalLM
|
||||||
from transformers import AutoTokenizer, GPTJForCausalLM, LlamaTokenizer
|
from transformers import AutoTokenizer, GPTJForCausalLM, LlamaTokenizer
|
||||||
model_path = get_model_path(repo_id, local_model_hub)
|
model_path = get_model_path(repo_id, local_model_hub)
|
||||||
|
|
@ -506,7 +507,7 @@ def run_transformer_int4_gpu(repo_id,
|
||||||
in_out_len = in_out.split("-")
|
in_out_len = in_out.split("-")
|
||||||
in_len = int(in_out_len[0])
|
in_len = int(in_out_len[0])
|
||||||
out_len = int(in_out_len[1])
|
out_len = int(in_out_len[1])
|
||||||
if conf['task'] == 'continuation':
|
if task == 'continuation':
|
||||||
# As different tokenizer has different encodings,
|
# As different tokenizer has different encodings,
|
||||||
# in_len.txt maybe shorter than we need,
|
# in_len.txt maybe shorter than we need,
|
||||||
# use much longer context to make sure input length
|
# use much longer context to make sure input length
|
||||||
|
|
@ -520,8 +521,8 @@ def run_transformer_int4_gpu(repo_id,
|
||||||
# slice the input_ids to ensure the prompt length is required length.
|
# slice the input_ids to ensure the prompt length is required length.
|
||||||
input_ids = tokenizer.encode(input_str, return_tensors="pt")
|
input_ids = tokenizer.encode(input_str, return_tensors="pt")
|
||||||
input_ids = input_ids[:, :in_len]
|
input_ids = input_ids[:, :in_len]
|
||||||
elif conf['task'] == 'summarize' or conf['task'] == 'QA':
|
elif task in ['QA', 'summarize']:
|
||||||
input_ids = preprocess_prompt(tokenizer, in_len, conf['task'])
|
input_ids = preprocess_prompt(tokenizer, in_len, task)
|
||||||
true_str = tokenizer.batch_decode(input_ids)[0]
|
true_str = tokenizer.batch_decode(input_ids)[0]
|
||||||
input_list = [true_str] * batch_size
|
input_list = [true_str] * batch_size
|
||||||
input_ids = tokenizer(input_list, return_tensors="pt").input_ids.to('xpu')
|
input_ids = tokenizer(input_list, return_tensors="pt").input_ids.to('xpu')
|
||||||
|
|
@ -1824,17 +1825,19 @@ def run_pipeline_parallel_gpu(repo_id,
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
global conf
|
|
||||||
conf = OmegaConf.load(f'{current_dir}/config.yaml')
|
conf = OmegaConf.load(f'{current_dir}/config.yaml')
|
||||||
today = date.today()
|
today = date.today()
|
||||||
if 'exclude' in conf:
|
if 'exclude' in conf:
|
||||||
excludes = conf['exclude']
|
excludes = conf['exclude']
|
||||||
streaming = False
|
streaming = False
|
||||||
use_fp16_torch_dtype = False
|
use_fp16_torch_dtype = False
|
||||||
|
task = 'continuation'
|
||||||
if 'streaming' in conf:
|
if 'streaming' in conf:
|
||||||
streaming = conf['streaming']
|
streaming = conf['streaming']
|
||||||
if 'use_fp16_torch_dtype' in conf:
|
if 'use_fp16_torch_dtype' in conf:
|
||||||
use_fp16_torch_dtype = conf['use_fp16_torch_dtype']
|
use_fp16_torch_dtype = conf['use_fp16_torch_dtype']
|
||||||
|
if 'task' in conf:
|
||||||
|
task = conf['task']
|
||||||
lookahead = False
|
lookahead = False
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
@ -1854,10 +1857,10 @@ if __name__ == '__main__':
|
||||||
model_id_input_batch_size = model_id_input + ':' + str(batch_size)
|
model_id_input_batch_size = model_id_input + ':' + str(batch_size)
|
||||||
if model_id_input in excludes or model_id_input_batch_size in excludes:
|
if model_id_input in excludes or model_id_input_batch_size in excludes:
|
||||||
in_out_pairs.remove(in_out)
|
in_out_pairs.remove(in_out)
|
||||||
if conf['task'] in ['QA', 'summarize'] and conf['num_beams'] == 1 and batch_size == 1:
|
if task in ['QA', 'summarize'] and conf['num_beams'] == 1 and batch_size == 1:
|
||||||
lookahead = True
|
lookahead = True
|
||||||
run_model(model, api, in_out_pairs, conf['local_model_hub'], conf['warm_up'], conf['num_trials'], conf['num_beams'],
|
run_model(model, api, in_out_pairs, conf['local_model_hub'], conf['warm_up'], conf['num_trials'], conf['num_beams'],
|
||||||
conf['low_bit'], conf['cpu_embedding'], batch_size, streaming, use_fp16_torch_dtype, lookahead)
|
conf['low_bit'], conf['cpu_embedding'], batch_size, streaming, use_fp16_torch_dtype, lookahead, task)
|
||||||
df = pd.DataFrame(results, columns=['model', '1st token avg latency (ms)', '2+ avg latency (ms/token)', 'encoder time (ms)',
|
df = pd.DataFrame(results, columns=['model', '1st token avg latency (ms)', '2+ avg latency (ms/token)', 'encoder time (ms)',
|
||||||
'input/output tokens', 'batch_size', 'actual input/output tokens', 'num_beams', 'low_bit', 'cpu_embedding',
|
'input/output tokens', 'batch_size', 'actual input/output tokens', 'num_beams', 'low_bit', 'cpu_embedding',
|
||||||
'model loading time (s)', 'peak mem (GB)', 'streaming', 'use_fp16_torch_dtype'])
|
'model loading time (s)', 'peak mem (GB)', 'streaming', 'use_fp16_torch_dtype'])
|
||||||
|
|
|
||||||
|
|
@ -35,4 +35,5 @@ exclude:
|
||||||
- 'bigcode/starcoder-15.5b-4bit:2048'
|
- 'bigcode/starcoder-15.5b-4bit:2048'
|
||||||
# - 'databricks/dolly-v2-12b:2048'
|
# - 'databricks/dolly-v2-12b:2048'
|
||||||
- 'baichuan-inc/Baichuan2-13B-Chat-4bit:2048'
|
- 'baichuan-inc/Baichuan2-13B-Chat-4bit:2048'
|
||||||
- 'bigscience/bloomz-7b1:2048'
|
- 'bigscience/bloomz-7b1:2048'
|
||||||
|
task: 'continuation' # task can be 'continuation', 'QA' and 'summarize'
|
||||||
|
|
@ -36,3 +36,4 @@ exclude:
|
||||||
# - 'fnlp/moss-moon-003-sft-4bit:2048'
|
# - 'fnlp/moss-moon-003-sft-4bit:2048'
|
||||||
- 'baichuan-inc/Baichuan2-13B-Chat-4bit:2048'
|
- 'baichuan-inc/Baichuan2-13B-Chat-4bit:2048'
|
||||||
- 'bigscience/bloomz-7b1:2048'
|
- 'bigscience/bloomz-7b1:2048'
|
||||||
|
task: 'continuation' # task can be 'continuation', 'QA' and 'summarize'
|
||||||
|
|
|
||||||
|
|
@ -17,3 +17,4 @@ in_out_pairs:
|
||||||
test_api:
|
test_api:
|
||||||
- "transformer_int4_fp16_gpu" # on Intel GPU
|
- "transformer_int4_fp16_gpu" # on Intel GPU
|
||||||
cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api)
|
cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api)
|
||||||
|
task: 'continuation' # task can be 'continuation', 'QA' and 'summarize'
|
||||||
|
|
|
||||||
|
|
@ -17,3 +17,4 @@ in_out_pairs:
|
||||||
test_api:
|
test_api:
|
||||||
- "transformer_int4_fp16_gpu" # on Intel GPU
|
- "transformer_int4_fp16_gpu" # on Intel GPU
|
||||||
cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api)
|
cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api)
|
||||||
|
task: 'continuation' # task can be 'continuation', 'QA' and 'summarize'
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue