Update benchmark util for example using (#11027)

* mv benchmark_util.py to utils/

* remove

* update
This commit is contained in:
Wang, Jian4 2024-05-15 14:16:35 +08:00 committed by GitHub
parent 86cec80b51
commit d9f71f1f53
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 8 additions and 4803 deletions

View file

@ -1,6 +1,6 @@
# Benchmark tool for transformers int4 (separate 1st token and rest)
`benchmark_util.py` is used to provide a simple benchmark tool for transformer int4 model to calculate 1st token performance and the rest on CPU and GPU.
[benchmark_util.py](https://github.com/intel-analytics/ipex-llm/tree/main/python/llm/src/ipex_llm/utils/benchmark_util.py) is used to provide a simple benchmark tool for transformer int4 model to calculate 1st token performance and the rest on CPU and GPU.
## CPU Usage
Just put this file into your benchmark directory, and then wrap your transformer int4 model with `BenchmarkWrapper` (`model = BenchmarkWrapper(model)`).
@ -9,7 +9,7 @@ Take `chatglm-6b` as an example:
import torch
from ipex_llm.transformers import AutoModel
from transformers import AutoTokenizer
from benchmark_util import BenchmarkWrapper
from ipex_llm.utils.benchmark_util import BenchmarkWrapper
model_path ='THUDM/chatglm-6b'
model = AutoModel.from_pretrained(model_path, trust_remote_code=True, load_in_4bit=True)
@ -37,7 +37,7 @@ import torch
import intel_extension_for_pytorch as ipex
from ipex_llm.transformers import AutoModel
from transformers import AutoTokenizer
from benchmark_util import BenchmarkWrapper
from ipex_llm.utils.benchmark_util import BenchmarkWrapper
model_path ='THUDM/chatglm-6b'
model = AutoModel.from_pretrained(model_path, trust_remote_code=True, load_in_4bit=True)
@ -66,7 +66,7 @@ For example, just need to apply following code patch on [Deepspeed Autotp exampl
import torch
import transformers
import deepspeed
+from benchmark_util import BenchmarkWrapper
+from ipex_llm.utils.benchmark_util import BenchmarkWrapper
def get_int_from_env(env_keys, default):
"""Returns the first positive env value found in the `env_keys` list or the default."""

View file

@ -27,10 +27,8 @@ from datetime import date
import os
current_dir = os.path.dirname(os.path.realpath(__file__))
benchmark_util_path = os.path.join(current_dir, '..')
import sys
sys.path.append(benchmark_util_path)
from benchmark_util import BenchmarkWrapper
from ipex_llm.utils.benchmark_util import BenchmarkWrapper
from ipex_llm.utils.common.log4Error import invalidInputError
LLAMA_IDS = ['meta-llama/Llama-2-7b-chat-hf','meta-llama/Llama-2-13b-chat-hf',

View file

@ -28,10 +28,8 @@ from datetime import date
import os
current_dir = os.path.dirname(os.path.realpath(__file__))
benchmark_util_path = os.path.join(current_dir, '..')
import sys
sys.path.append(benchmark_util_path)
from benchmark_util import BenchmarkWrapper
from ipex_llm.utils.benchmark_util import BenchmarkWrapper
from ipex_llm.utils.common.log4Error import invalidInputError
from ipex_llm.utils.common import invalidInputError

View file

@ -21,7 +21,7 @@ SCRIPT_DIR="$( cd "$( dirname "$0" )" && pwd )"
PYTHON_ROOT_DIR="$SCRIPT_DIR/.."
echo $PYTHON_ROOT_DIR
PATHS_TO_CHECK="$SCRIPT_DIR/../../src"
PATTERNS_TO_EXCLUDE="__init__.py,log4Error.py,$SCRIPT_DIR/../../src/ipex_llm/langchain/*,$SCRIPT_DIR/../../src/ipex_llm/transformers/gguf/models/model_implement/yuan2/*"
PATTERNS_TO_EXCLUDE="__init__.py,log4Error.py,$SCRIPT_DIR/../../src/ipex_llm/langchain/*,$SCRIPT_DIR/../../src/ipex_llm/transformers/gguf/models/model_implement/yuan2/*,benchmark_util.py"
PEP8_REPORT_PATH="$PYTHON_ROOT_DIR/test/pep8-report.txt"
PYLINT_REPORT_PATH="$PYTHON_ROOT_DIR/test/pylint-report.txt"
PYLINT_INSTALL_INFO="$PYTHON_ROOT_DIR/test/pylint-info.txt"

View file

@ -31,7 +31,7 @@ from typing import Dict, List, Optional
from transformers.utils import logging
logger = logging.get_logger(__name__)
from benchmark_util import BenchmarkWrapper
from ipex_llm.utils.benchmark_util import BenchmarkWrapper
def get_int_from_env(env_keys, default):