LLM: add cpu_embedding and peak memory record for deepspeed autotp script (#10621)

This commit is contained in:
binbin Deng 2024-04-02 17:32:50 +08:00 committed by GitHub
parent ba8cc6bd68
commit 27be448920
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -101,7 +101,7 @@ def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1,
elif test_api == 'bigdl_ipex_int8':
result = run_bigdl_ipex_int8(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, batch_size)
elif test_api == 'deepspeed_optimize_model_gpu':
result = run_deepspeed_optimize_model_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size)
result = run_deepspeed_optimize_model_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, cpu_embedding)
elif test_api == 'speculative_cpu':
result = run_speculative_cpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, batch_size)
elif test_api == 'speculative_gpu':
@ -121,7 +121,7 @@ def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1,
low_bit,
cpu_embedding if 'win' in test_api else 'N/A',
round(result[in_out_pair][-1][5], 2),
result[in_out_pair][-1][6] if any(keyword in test_api for keyword in ['int4_gpu', 'int4_fp16_gpu_win', 'int4_loadlowbit_gpu', 'fp16_gpu']) else 'N/A',
result[in_out_pair][-1][6] if any(keyword in test_api for keyword in ['int4_gpu', 'int4_fp16_gpu_win', 'int4_loadlowbit_gpu', 'fp16_gpu', 'deepspeed_optimize_model_gpu']) else 'N/A',
streaming if 'win' in test_api else 'N/A'],
)
@ -1402,7 +1402,8 @@ def run_deepspeed_optimize_model_gpu(repo_id,
num_trials,
num_beams,
low_bit,
batch_size):
batch_size,
cpu_embedding):
def get_int_from_env(env_keys, default):
for e in env_keys:
val = int(os.environ.get(e, -1))
@ -1450,7 +1451,7 @@ def run_deepspeed_optimize_model_gpu(repo_id,
# Use bigdl-llm `optimize_model` to convert the model into optimized low bit format
# Convert the rest of the model into float16 to reduce allreduce traffic
model = optimize_model(model.module.to(f'cpu'), low_bit=low_bit).to(torch.float16)
model = optimize_model(model.module.to(f'cpu'), low_bit=low_bit, cpu_embedding=cpu_embedding).to(torch.float16)
# Next, use XPU as accelerator to speed up inference
current_accel = XPU_Accelerator()
set_accelerator(current_accel)
@ -1466,7 +1467,7 @@ def run_deepspeed_optimize_model_gpu(repo_id,
from deepspeed.comm.comm import init_distributed
init_distributed()
model = BenchmarkWrapper(model)
model = BenchmarkWrapper(model, do_print=True)
result = {}
with torch.inference_mode():
@ -1504,7 +1505,7 @@ def run_deepspeed_optimize_model_gpu(repo_id,
torch.xpu.empty_cache()
if i >= warm_up:
result[in_out].append([model.first_cost, model.rest_cost_mean, model.encoder_time,
actual_in_len, actual_out_len, load_time])
actual_in_len, actual_out_len, load_time, model.peak_memory])
del model
torch.xpu.empty_cache()
return result