add conf batch_size to run_model (#10010)

This commit is contained in:
Xin Qiu 2024-01-26 15:48:48 +08:00 committed by GitHub
parent 421e7cee80
commit 7952bbc919

View file

@ -941,7 +941,7 @@ if __name__ == '__main__':
if model_id_input in excludes: if model_id_input in excludes:
in_out_pairs.remove(in_out) in_out_pairs.remove(in_out)
run_model(model, api, in_out_pairs, conf['local_model_hub'], conf['warm_up'], conf['num_trials'], conf['num_beams'], run_model(model, api, in_out_pairs, conf['local_model_hub'], conf['warm_up'], conf['num_trials'], conf['num_beams'],
conf['low_bit'], conf['cpu_embedding']) conf['low_bit'], conf['cpu_embedding'], conf['batch_size'])
df = pd.DataFrame(results, columns=['model', '1st token avg latency (ms)', '2+ avg latency (ms/token)', 'encoder time (ms)', df = pd.DataFrame(results, columns=['model', '1st token avg latency (ms)', '2+ avg latency (ms/token)', 'encoder time (ms)',
'input/output tokens', 'actual input/output tokens', 'num_beams', 'low_bit', 'cpu_embedding', 'input/output tokens', 'actual input/output tokens', 'num_beams', 'low_bit', 'cpu_embedding',
'peak mem (GB)']) 'peak mem (GB)'])