[LLM] Add exclude option in all-in-one performance test (#9632)
* add exclude option in all-in-one perf test * update arc-perf-test.yaml * Exclude in_out_pairs in main function * fix some bugs * address Kai's comments * define excludes at the beginning * add bloomz:2048 to exclude
This commit is contained in:
parent
9b9cd51de1
commit
16febc949c
2 changed files with 14 additions and 1 deletions
|
|
@ -42,6 +42,7 @@ CHATGLM_IDS = ['THUDM/chatglm-6b', 'THUDM/chatglm2-6b', 'THUDM/chatglm3-6b']
|
||||||
LLAVA_IDS = ['liuhaotian/llava-v1.5-7b']
|
LLAVA_IDS = ['liuhaotian/llava-v1.5-7b']
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
|
excludes = []
|
||||||
|
|
||||||
|
|
||||||
def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1, num_trials=3, num_beams=1, low_bit='sym_int4', cpu_embedding=False):
|
def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1, num_trials=3, num_beams=1, low_bit='sym_int4', cpu_embedding=False):
|
||||||
|
|
@ -748,11 +749,19 @@ if __name__ == '__main__':
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
conf = OmegaConf.load(f'{current_dir}/config.yaml')
|
conf = OmegaConf.load(f'{current_dir}/config.yaml')
|
||||||
today = date.today()
|
today = date.today()
|
||||||
|
if 'exclude' in conf:
|
||||||
|
excludes = conf['exclude']
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
for api in conf.test_api:
|
for api in conf.test_api:
|
||||||
for model in conf.repo_id:
|
for model in conf.repo_id:
|
||||||
run_model(model, api, conf['in_out_pairs'], conf['local_model_hub'], conf['warm_up'], conf['num_trials'], conf['num_beams'],
|
in_out_pairs = conf['in_out_pairs'].copy()
|
||||||
|
if excludes:
|
||||||
|
for in_out in conf['in_out_pairs']:
|
||||||
|
model_id_input = model + ':' + in_out.split('-')[0]
|
||||||
|
if model_id_input in excludes:
|
||||||
|
in_out_pairs.remove(in_out)
|
||||||
|
run_model(model, api, in_out_pairs, conf['local_model_hub'], conf['warm_up'], conf['num_trials'], conf['num_beams'],
|
||||||
conf['low_bit'], conf['cpu_embedding'])
|
conf['low_bit'], conf['cpu_embedding'])
|
||||||
df = pd.DataFrame(results, columns=['model', '1st token avg latency (ms)', '2+ avg latency (ms/token)', 'encoder time (ms)',
|
df = pd.DataFrame(results, columns=['model', '1st token avg latency (ms)', '2+ avg latency (ms/token)', 'encoder time (ms)',
|
||||||
'input/output tokens', 'actual input/output tokens', 'num_beams', 'low_bit', 'cpu_embedding',
|
'input/output tokens', 'actual input/output tokens', 'num_beams', 'low_bit', 'cpu_embedding',
|
||||||
|
|
|
||||||
|
|
@ -27,3 +27,7 @@ in_out_pairs:
|
||||||
test_api:
|
test_api:
|
||||||
- "transformer_int4_gpu" # on Intel GPU
|
- "transformer_int4_gpu" # on Intel GPU
|
||||||
cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api)
|
cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api)
|
||||||
|
exclude:
|
||||||
|
- 'fnlp/moss-moon-003-sft:1024'
|
||||||
|
- 'fnlp/moss-moon-003-sft:2048'
|
||||||
|
- 'bigscience/bloomz-7b1:2048'
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue