LLM: Add mlp layer unit tests (#10200)
* add mlp layer unit tests * add download baichuan-13b * exclude llama for now * install additional packages * rename bash file * switch to Baichuan2 * delete attention related code * fix name errors in yml file
This commit is contained in:
parent
ca1166a0e5
commit
60e11b6739
4 changed files with 176 additions and 1 deletions
19
.github/workflows/llm_unit_tests.yml
vendored
19
.github/workflows/llm_unit_tests.yml
vendored
|
|
@ -235,6 +235,9 @@ jobs:
|
|||
echo "MPT_7B_ORIGIN_PATH=${ORIGIN_DIR}/mpt-7b-chat" >> "$GITHUB_ENV"
|
||||
echo "WHISPER_TINY_ORIGIN_PATH=${ORIGIN_DIR}/whisper-tiny" >> "$GITHUB_ENV"
|
||||
|
||||
echo "MISTRAL_7B_INSTRUCT_V0_1_ORIGIN_PATH=${ORIGIN_DIR}/Mistral-7B-Instruct-v0.1" >> "$GITHUB_ENV"
|
||||
echo "BAICHUAN2_7B_ORIGIN_PATH=${ORIGIN_DIR}/Baichuan2-7B-Chat" >> "$GITHUB_ENV"
|
||||
echo "QWEN_7B_ORIGIN_PATH=${ORIGIN_DIR}/Qwen-7B-Chat" >> "$GITHUB_ENV"
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
|
||||
|
||||
|
|
@ -303,6 +306,18 @@ jobs:
|
|||
echo "Directory $SPEECH_DATASET_PATH not found. Downloading from FTP server..."
|
||||
wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/librispeech_asr_dummy -P $DATASET_DIR
|
||||
fi
|
||||
if [ ! -d $MISTRAL_7B_INSTRUCT_V0_1_ORIGIN_PATH ]; then
|
||||
echo "Directory $MISTRAL_7B_INSTRUCT_V0_1_ORIGIN_PATH not found. Downloading from FTP server..."
|
||||
wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/Mistral-7B-Instruct-v0.1 -P $ORIGIN_DIR
|
||||
fi
|
||||
if [ ! -d $QWEN_7B_ORIGIN_PATH ]; then
|
||||
echo "Directory $QWEN_7B_ORIGIN_PATH not found. Downloading from FTP server..."
|
||||
wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/Qwen-7B-Chat -P $ORIGIN_DIR
|
||||
fi
|
||||
if [ ! -d $BAICHUAN2_7B_ORIGIN_PATH ]; then
|
||||
echo "Directory $BAICHUAN2_7B_ORIGIN_PATH not found. Downloading from FTP server..."
|
||||
wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/Baichuan2-7B-Chat -P $ORIGIN_DIR
|
||||
fi
|
||||
|
||||
- name: Run LLM inference test
|
||||
shell: bash
|
||||
|
|
@ -313,8 +328,10 @@ jobs:
|
|||
elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then
|
||||
source /opt/intel/oneapi/setvars.sh
|
||||
fi
|
||||
python -m pip install datasets librosa soundfile einops
|
||||
python -m pip install datasets librosa soundfile einops tiktoken transformers_stream_generator
|
||||
bash python/llm/test/run-llm-inference-tests-gpu.sh
|
||||
python -m pip install transformers==4.34.0
|
||||
bash python/llm/test/run-llm-inference-tests-gpu-434.sh
|
||||
|
||||
- name: Run LLM example tests
|
||||
shell: bash
|
||||
|
|
|
|||
129
python/llm/test/inference_gpu/test_transformers_api_mlp.py
Normal file
129
python/llm/test/inference_gpu/test_transformers_api_mlp.py
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
#
|
||||
# Copyright 2016 The BigDL Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import pytest
|
||||
|
||||
import torch
|
||||
from bigdl.llm.transformers import AutoModelForCausalLM, AutoModel
|
||||
from transformers import LlamaTokenizer, AutoTokenizer
|
||||
|
||||
device = os.environ['DEVICE']
|
||||
print(f'Running on {device}')
|
||||
|
||||
PROMPT = "Once upon a time, there existed a little girl who liked to have adventures. She wanted to go to places and meet new people, and have fun"
|
||||
TEST_MODEL_LIST = [
|
||||
("Qwen-7B-Chat", AutoModelForCausalLM, AutoTokenizer, os.environ.get('QWEN_7B_ORIGIN_PATH')),
|
||||
("Mistral-7B-Instruct-v0.1", AutoModelForCausalLM, AutoTokenizer, os.environ.get('MISTRAL_7B_INSTRUCT_V0_1_ORIGIN_PATH'))
|
||||
]
|
||||
|
||||
class Test_Optimize_Gpu_Model:
|
||||
def setup_method(self):
|
||||
self.layer_outputs = []
|
||||
self.pre_layer_outputs = []
|
||||
|
||||
def run_optimize_gpu_model(self, Name, Model, Tokenizer, model_path, MLP_layer, layer_before_MLP, lower_bound):
|
||||
with torch.inference_mode():
|
||||
def pre_forward_hook(module, input, output, layer_name):
|
||||
self.pre_layer_outputs.append(output)
|
||||
|
||||
def forward_hook(module, input, output, layer_name):
|
||||
self.layer_outputs.append(output)
|
||||
|
||||
tokenizer = Tokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
input_ids = tokenizer.encode(PROMPT, return_tensors="pt").to(device)
|
||||
|
||||
model = Model.from_pretrained(model_path,
|
||||
load_in_4bit=True,
|
||||
optimize_model=False,
|
||||
trust_remote_code=True)
|
||||
model = model.to(device)
|
||||
for layer_name, layer_module in model.named_modules():
|
||||
if layer_name == layer_before_MLP:
|
||||
layer_module.register_forward_hook(
|
||||
lambda module, input, output, layer_name=layer_name: pre_forward_hook(module, input,
|
||||
output, layer_name))
|
||||
if layer_name == MLP_layer:
|
||||
layer_module.register_forward_hook(
|
||||
lambda module, input, output, layer_name=layer_name: forward_hook(module, input,
|
||||
output, layer_name))
|
||||
logits_base_model = (model(input_ids)).logits
|
||||
# the list `layer_output` has only one element.
|
||||
layer_tensor = self.layer_outputs.pop()
|
||||
model.to('cpu')
|
||||
|
||||
opt_model = Model.from_pretrained(model_path,
|
||||
load_in_4bit=True,
|
||||
optimize_model=True,
|
||||
trust_remote_code=True)
|
||||
opt_model = opt_model.to(device)
|
||||
|
||||
|
||||
def replace_forward_hook(module, input, output, layer_name):
|
||||
output = self.pre_layer_outputs[0]
|
||||
return output
|
||||
|
||||
for layer_name, layer_module in opt_model.named_modules():
|
||||
if layer_name == layer_before_MLP:
|
||||
layer_module.register_forward_hook(
|
||||
lambda module, input, output, layer_name=layer_name: replace_forward_hook(module, input,
|
||||
output, layer_name))
|
||||
if layer_name == MLP_layer:
|
||||
layer_module.register_forward_hook(
|
||||
lambda module, input, output, layer_name=layer_name: forward_hook(module, input,
|
||||
output, layer_name))
|
||||
logits_optimized_model = (opt_model(input_ids)).logits
|
||||
# the list `layer_output` has only one element.
|
||||
opt_layer_tensor = self.layer_outputs[0]
|
||||
opt_model.to('cpu')
|
||||
|
||||
|
||||
MLP_output_diff = []
|
||||
for i, (t1, t2) in enumerate(zip(layer_tensor, opt_layer_tensor)):
|
||||
if t1 is not None and t2 is not None:
|
||||
if isinstance(t1, torch.Tensor) and isinstance(t2, torch.Tensor):
|
||||
MLP_output_diff.append(t1 - t2)
|
||||
else:
|
||||
# 'past_key_value'is of type tuple as default.
|
||||
for i, (t3, t4) in enumerate(zip(t1, t2)):
|
||||
MLP_output_diff.append(t3 - t4)
|
||||
|
||||
max_diff_tensor = [torch.max(item).item() for item in MLP_output_diff]
|
||||
print(max_diff_tensor)
|
||||
|
||||
assert all(max_diff <= lower_bound for max_diff in max_diff_tensor)
|
||||
|
||||
@pytest.mark.parametrize('Name, Model, Tokenizer, model_path',TEST_MODEL_LIST)
|
||||
def test_dynamic_functions(self, Name, Model, Tokenizer, model_path):
|
||||
if Name == "Qwen-7B-Chat":
|
||||
self.Qwen_7B_gpu_model(Name, Model, Tokenizer, model_path)
|
||||
elif Name == "Mistral-7B-Instruct-v0.1":
|
||||
self.Mistral_7B_Instruct_gpu_model(Name, Model, Tokenizer, model_path)
|
||||
|
||||
|
||||
def Qwen_7B_gpu_model(self, Name, Model, Tokenizer, model_path):
|
||||
# currently only compare the output of the last mlp layer.
|
||||
layer_before_MLP = "transformer.h.31.ln_2"
|
||||
MLP_layer = "transformer.h.31.mlp"
|
||||
lower_bound = 0
|
||||
self.run_optimize_gpu_model(Name, Model, Tokenizer, model_path, MLP_layer, layer_before_MLP, lower_bound)
|
||||
|
||||
def Mistral_7B_Instruct_gpu_model(self, Name, Model, Tokenizer, model_path):
|
||||
# currently only compare the output of the last mlp layer.
|
||||
layer_before_MLP = "model.layers.31.post_attention_layernorm"
|
||||
MLP_layer = "model.layers.31.mlp"
|
||||
lower_bound = 0
|
||||
self.run_optimize_gpu_model(Name, Model, Tokenizer, model_path, MLP_layer, layer_before_MLP, lower_bound)
|
||||
28
python/llm/test/run-llm-inference-tests-gpu-434.sh
Normal file
28
python/llm/test/run-llm-inference-tests-gpu-434.sh
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
#!/bin/bash
|
||||
|
||||
export ANALYTICS_ZOO_ROOT=${ANALYTICS_ZOO_ROOT}
|
||||
export LLM_HOME=${ANALYTICS_ZOO_ROOT}/python/llm/src
|
||||
export LLM_INFERENCE_TEST_DIR=${ANALYTICS_ZOO_ROOT}/python/llm/test/inference_gpu
|
||||
|
||||
export USE_XETLA=OFF
|
||||
export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
|
||||
export DEVICE='xpu'
|
||||
|
||||
set -e
|
||||
|
||||
echo "# Start testing inference"
|
||||
start=$(date "+%s")
|
||||
|
||||
if [ -z "$THREAD_NUM" ]; then
|
||||
THREAD_NUM=2
|
||||
fi
|
||||
export OMP_NUM_THREADS=$THREAD_NUM
|
||||
export BIGDL_LLM_XMX_DISABLED=1
|
||||
pytest ${LLM_INFERENCE_TEST_DIR}/test_transformers_api_mlp.py -v -s -k "Mistral"
|
||||
unset BIGDL_LLM_XMX_DISABLED
|
||||
|
||||
now=$(date "+%s")
|
||||
time=$((now-start))
|
||||
|
||||
echo "Bigdl-llm gpu inference tests for transformers 4.34.0 finished"
|
||||
echo "Time used:$time seconds"
|
||||
|
|
@ -21,6 +21,7 @@ pytest ${LLM_INFERENCE_TEST_DIR}/test_transformers_api.py -v -s
|
|||
export BIGDL_LLM_XMX_DISABLED=1
|
||||
pytest ${LLM_INFERENCE_TEST_DIR}/test_transformers_api_final_logits.py -v -s
|
||||
pytest ${LLM_INFERENCE_TEST_DIR}/test_transformers_api_attention.py -v -s
|
||||
pytest ${LLM_INFERENCE_TEST_DIR}/test_transformers_api_mlp.py -v -s -k "not Mistral"
|
||||
unset BIGDL_LLM_XMX_DISABLED
|
||||
|
||||
now=$(date "+%s")
|
||||
|
|
|
|||
Loading…
Reference in a new issue