From 90f004b80be8dadc1998106c6ac27a0847babede Mon Sep 17 00:00:00 2001 From: Heyang Sun <60865256+Uxito-Ada@users.noreply.github.com> Date: Sun, 4 Feb 2024 15:42:15 +0800 Subject: [PATCH] remove benchmarkwrapper form deepspeed example (#10079) --- python/llm/example/CPU/Deepspeed-AutoTP/deepspeed_autotp.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/llm/example/CPU/Deepspeed-AutoTP/deepspeed_autotp.py b/python/llm/example/CPU/Deepspeed-AutoTP/deepspeed_autotp.py index bf05e1ed..91d11dfa 100644 --- a/python/llm/example/CPU/Deepspeed-AutoTP/deepspeed_autotp.py +++ b/python/llm/example/CPU/Deepspeed-AutoTP/deepspeed_autotp.py @@ -50,7 +50,6 @@ import torch import intel_extension_for_pytorch as ipex import time import argparse -from benchmark_util import BenchmarkWrapper if __name__ == '__main__': parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Llama2 model') @@ -94,7 +93,6 @@ if __name__ == '__main__': model = model.to(f'cpu:{local_rank}') print(model) - model = BenchmarkWrapper(model, do_print=True) # Load tokenizer tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)