[LLM] Add transformer_autocast_bf16 into all-in-one (#9890)
* Add transformer_autocast_bf16 into all-in-one
This commit is contained in:
parent
4af88a67b9
commit
4f4ce73f31
3 changed files with 77 additions and 1 deletions
|
|
@ -1,21 +1,26 @@
|
||||||
# All in One Benchmark Test
|
# All in One Benchmark Test
|
||||||
|
|
||||||
All in one benchmark test allows users to test all the benchmarks and record them in a result CSV. Users can provide models and related information in `config.yaml`.
|
All in one benchmark test allows users to test all the benchmarks and record them in a result CSV. Users can provide models and related information in `config.yaml`.
|
||||||
|
|
||||||
Before running, make sure to have [bigdl-llm](../../../README.md).
|
Before running, make sure to have [bigdl-llm](../../../README.md).
|
||||||
|
|
||||||
## Dependencies
|
## Dependencies
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install omegaconf
|
pip install omegaconf
|
||||||
pip install pandas
|
pip install pandas
|
||||||
```
|
```
|
||||||
|
|
||||||
Install gperftools to use libtcmalloc.so for MAX GPU to get better performance:
|
Install gperftools to use libtcmalloc.so for MAX GPU to get better performance:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
conda install -c conda-forge -y gperftools=2.10
|
conda install -c conda-forge -y gperftools=2.10
|
||||||
```
|
```
|
||||||
|
|
||||||
## Config
|
## Config
|
||||||
|
|
||||||
Config YAML file has following format
|
Config YAML file has following format
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
repo_id:
|
repo_id:
|
||||||
- 'THUDM/chatglm-6b'
|
- 'THUDM/chatglm-6b'
|
||||||
|
|
@ -35,6 +40,7 @@ test_api:
|
||||||
- "native_int4"
|
- "native_int4"
|
||||||
- "optimize_model"
|
- "optimize_model"
|
||||||
- "pytorch_autocast_bf16"
|
- "pytorch_autocast_bf16"
|
||||||
|
# - "transformer_autocast_bf16"
|
||||||
# - "ipex_fp16_gpu" # on Intel GPU
|
# - "ipex_fp16_gpu" # on Intel GPU
|
||||||
# - "transformer_int4_gpu" # on Intel GPU
|
# - "transformer_int4_gpu" # on Intel GPU
|
||||||
# - "optimize_model_gpu" # on Intel GPU
|
# - "optimize_model_gpu" # on Intel GPU
|
||||||
|
|
@ -44,9 +50,11 @@ cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu w
|
||||||
```
|
```
|
||||||
|
|
||||||
## Run
|
## Run
|
||||||
|
|
||||||
run `python run.py`, this will output results to `results.csv`.
|
run `python run.py`, this will output results to `results.csv`.
|
||||||
|
|
||||||
For SPR performance, run `bash run-spr.sh`.
|
For SPR performance, run `bash run-spr.sh`.
|
||||||
|
|
||||||
> **Note**
|
> **Note**
|
||||||
>
|
>
|
||||||
> The value of `OMP_NUM_THREADS` should be the same as the cpu cores specified by `numactl -C`.
|
> The value of `OMP_NUM_THREADS` should be the same as the cpu cores specified by `numactl -C`.
|
||||||
|
|
|
||||||
|
|
@ -16,6 +16,7 @@ test_api:
|
||||||
- "native_int4"
|
- "native_int4"
|
||||||
- "optimize_model"
|
- "optimize_model"
|
||||||
- "pytorch_autocast_bf16"
|
- "pytorch_autocast_bf16"
|
||||||
|
# - "transformer_autocast_bf16"
|
||||||
# - "ipex_fp16_gpu" # on Intel GPU
|
# - "ipex_fp16_gpu" # on Intel GPU
|
||||||
# - "transformer_int4_gpu" # on Intel GPU
|
# - "transformer_int4_gpu" # on Intel GPU
|
||||||
# - "optimize_model_gpu" # on Intel GPU
|
# - "optimize_model_gpu" # on Intel GPU
|
||||||
|
|
|
||||||
|
|
@ -84,6 +84,8 @@ def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1,
|
||||||
result = run_deepspeed_transformer_int4_cpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit)
|
result = run_deepspeed_transformer_int4_cpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit)
|
||||||
elif test_api == 'transformer_int4_gpu_win':
|
elif test_api == 'transformer_int4_gpu_win':
|
||||||
result = run_transformer_int4_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding)
|
result = run_transformer_int4_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding)
|
||||||
|
elif test_api == 'transformer_autocast_bf16':
|
||||||
|
result = run_transformer_autocast_bf16(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams)
|
||||||
|
|
||||||
for in_out_pair in in_out_pairs:
|
for in_out_pair in in_out_pairs:
|
||||||
if result and result[in_out_pair]:
|
if result and result[in_out_pair]:
|
||||||
|
|
@ -759,6 +761,71 @@ def run_transformer_int4_gpu_win(repo_id,
|
||||||
gc.collect()
|
gc.collect()
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
def run_transformer_autocast_bf16( repo_id,
|
||||||
|
local_model_hub,
|
||||||
|
in_out_pairs,
|
||||||
|
warm_up,
|
||||||
|
num_trials,
|
||||||
|
num_beams):
|
||||||
|
from bigdl.llm.transformers import AutoModel, AutoModelForCausalLM
|
||||||
|
from transformers import AutoTokenizer, LlamaTokenizer
|
||||||
|
|
||||||
|
model_path = get_model_path(repo_id, local_model_hub)
|
||||||
|
# Load model in bf16,
|
||||||
|
# which convert the relevant layers in the model into BF16 format
|
||||||
|
st = time.perf_counter()
|
||||||
|
if repo_id in CHATGLM_IDS:
|
||||||
|
model = AutoModel.from_pretrained(model_path, load_in_low_bit='bf16', trust_remote_code=True, torch_dtype=torch.bfloat16,
|
||||||
|
use_cache=True).eval()
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||||
|
elif repo_id in LLAMA_IDS:
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit='bf16', trust_remote_code=True, torch_dtype=torch.bfloat16,
|
||||||
|
use_cache=True).eval()
|
||||||
|
tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||||
|
else:
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit='bf16', trust_remote_code=True, torch_dtype=torch.bfloat16,
|
||||||
|
use_cache=True).eval()
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||||
|
end = time.perf_counter()
|
||||||
|
print(">> loading of model costs {}s".format(end - st))
|
||||||
|
|
||||||
|
model = BenchmarkWrapper(model)
|
||||||
|
|
||||||
|
result = {}
|
||||||
|
with torch.inference_mode(), torch.autocast("cpu"):
|
||||||
|
for in_out in in_out_pairs:
|
||||||
|
in_out_len = in_out.split("-")
|
||||||
|
in_len = int(in_out_len[0])
|
||||||
|
out_len = int(in_out_len[1])
|
||||||
|
# As different tokenizer has different encodings,
|
||||||
|
# in_len.txt maybe shorter than we need,
|
||||||
|
# use much longer context to make sure input length
|
||||||
|
test_length = min(in_len*2, 8192)
|
||||||
|
while test_length not in [32, 256, 1024, 2048, 8192]:
|
||||||
|
test_length = test_length * 2
|
||||||
|
input_str = open(f"prompt/{test_length}.txt", 'r').read()
|
||||||
|
# As different tokenizer has different encodings,
|
||||||
|
# slice the input_ids to ensure the prompt length is required length.
|
||||||
|
input_ids = tokenizer.encode(input_str, return_tensors="pt")
|
||||||
|
input_ids = input_ids[:, :in_len]
|
||||||
|
true_str = tokenizer.batch_decode(input_ids)[0]
|
||||||
|
input_ids = tokenizer.encode(true_str, return_tensors="pt")
|
||||||
|
actual_in_len = input_ids.shape[1]
|
||||||
|
result[in_out] = []
|
||||||
|
for i in range(num_trials + warm_up):
|
||||||
|
st = time.perf_counter()
|
||||||
|
output_ids = model.generate(input_ids, do_sample=False, max_new_tokens=out_len,
|
||||||
|
num_beams=num_beams)
|
||||||
|
end = time.perf_counter()
|
||||||
|
print("model generate cost: " + str(end - st))
|
||||||
|
output = tokenizer.batch_decode(output_ids)
|
||||||
|
print(output[0])
|
||||||
|
actual_out_len = output_ids.shape[1] - actual_in_len
|
||||||
|
if i >= warm_up:
|
||||||
|
result[in_out].append([model.first_cost, model.rest_cost_mean, model.encoder_time,
|
||||||
|
actual_in_len, actual_out_len])
|
||||||
|
return result
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
conf = OmegaConf.load(f'{current_dir}/config.yaml')
|
conf = OmegaConf.load(f'{current_dir}/config.yaml')
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue