diff --git a/python/llm/test/inference_gpu/test_transformers_api.py b/python/llm/test/inference_gpu/test_transformers_api.py index ff090ecc..64b77a2b 100644 --- a/python/llm/test/inference_gpu/test_transformers_api.py +++ b/python/llm/test/inference_gpu/test_transformers_api.py @@ -24,8 +24,6 @@ from transformers import LlamaTokenizer, AutoTokenizer device = os.environ['DEVICE'] print(f'Running on {device}') -if device == 'xpu': - import intel_extension_for_pytorch as ipex @pytest.mark.parametrize('prompt, answer', [ ('What is the capital of France?\n\n', 'Paris') @@ -75,32 +73,36 @@ def test_transformers_auto_model_for_speech_seq2seq_int4(): prompt = "Once upon a time, there existed a little girl who liked to have adventures. She wanted to go to places and meet new people, and have fun" -@pytest.mark.parametrize('Model, Tokenizer, model_path',[ - (AutoModelForCausalLM, AutoTokenizer, os.environ.get('MPT_7B_ORIGIN_PATH')), - (AutoModelForCausalLM, AutoTokenizer, os.environ.get('LLAMA2_7B_ORIGIN_PATH')) - ]) -def test_optimize_model(Model, Tokenizer, model_path): - with torch.inference_mode(): - tokenizer = Tokenizer.from_pretrained(model_path, trust_remote_code=True) - input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device) +# @pytest.mark.parametrize('Model, Tokenizer, model_path',[ +# (AutoModelForCausalLM, AutoTokenizer, os.environ.get('MPT_7B_ORIGIN_PATH')), +# (AutoModelForCausalLM, AutoTokenizer, os.environ.get('LLAMA2_7B_ORIGIN_PATH')) +# ]) +# def test_optimize_model(Model, Tokenizer, model_path): +# with torch.inference_mode(): +# tokenizer = Tokenizer.from_pretrained(model_path, trust_remote_code=True) +# input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device) - model = Model.from_pretrained(model_path, - load_in_4bit=True, - optimize_model=False, - trust_remote_code=True) - model = model.to(device) - logits_base_model = (model(input_ids)).logits - model.to('cpu') # deallocate gpu memory +# model = Model.from_pretrained(model_path, +# load_in_4bit=True, +# optimize_model=False, +# trust_remote_code=True) +# model = model.to(device) +# logits_base_model = (model(input_ids)).logits +# model.to('cpu') # deallocate gpu memory - model = Model.from_pretrained(model_path, - load_in_4bit=True, - optimize_model=True, - trust_remote_code=True) - model = model.to(device) - logits_optimized_model = (model(input_ids)).logits - model.to('cpu') +# model = Model.from_pretrained(model_path, +# load_in_4bit=True, +# optimize_model=True, +# trust_remote_code=True) +# model = model.to(device) +# logits_optimized_model = (model(input_ids)).logits +# model.to('cpu') - assert all(torch.isclose(logits_optimized_model, logits_base_model).tolist()) +# tol = 1e-02 +# num_false = torch.isclose(logits_optimized_model, logits_base_model, rtol=tol, atol=tol)\ +# .flatten().tolist().count(False) +# percent_false = num_false / logits_optimized_model.numel() +# assert percent_false < 1e-02 class Test_Optimize_Gpu_Model: def setup(self): diff --git a/python/llm/test/inference_gpu/test_transformers_api_disable_xmx.py b/python/llm/test/inference_gpu/test_transformers_api_disable_xmx.py new file mode 100644 index 00000000..7afce417 --- /dev/null +++ b/python/llm/test/inference_gpu/test_transformers_api_disable_xmx.py @@ -0,0 +1,58 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import pytest +import torch +from bigdl.llm.transformers import AutoModelForCausalLM +from transformers import AutoTokenizer + +device = os.environ['DEVICE'] +print(f'Running on {device}') + +prompt = "Once upon a time, there existed a little girl who liked to have adventures. She wanted to go to places and meet new people, and have fun" + +@pytest.mark.parametrize('Model, Tokenizer, model_path',[ + (AutoModelForCausalLM, AutoTokenizer, os.environ.get('MPT_7B_ORIGIN_PATH')), + (AutoModelForCausalLM, AutoTokenizer, os.environ.get('LLAMA2_7B_ORIGIN_PATH')) + ]) +def test_optimize_model(Model, Tokenizer, model_path): + with torch.inference_mode(): + tokenizer = Tokenizer.from_pretrained(model_path, trust_remote_code=True) + input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device) + + model = Model.from_pretrained(model_path, + load_in_4bit=True, + optimize_model=False, + trust_remote_code=True) + model = model.to(device) + logits_base_model = (model(input_ids)).logits + model.to('cpu') # deallocate gpu memory + + model = Model.from_pretrained(model_path, + load_in_4bit=True, + optimize_model=True, + trust_remote_code=True) + model = model.to(device) + logits_optimized_model = (model(input_ids)).logits + model.to('cpu') + + tol = 1e-03 + num_false = torch.isclose(logits_optimized_model, logits_base_model, rtol=tol, atol=tol)\ + .flatten().tolist().count(False) + percent_false = num_false / logits_optimized_model.numel() + assert percent_false < 1e-02 + \ No newline at end of file diff --git a/python/llm/test/run-llm-inference-tests-gpu.sh b/python/llm/test/run-llm-inference-tests-gpu.sh index 03f583b4..59ba2a0a 100644 --- a/python/llm/test/run-llm-inference-tests-gpu.sh +++ b/python/llm/test/run-llm-inference-tests-gpu.sh @@ -18,6 +18,9 @@ if [ -z "$THREAD_NUM" ]; then fi export OMP_NUM_THREADS=$THREAD_NUM pytest ${LLM_INFERENCE_TEST_DIR}/test_transformers_api.py -v -s +export BIGDL_LLM_XMX_DISABLED=1 +pytest ${LLM_INFERENCE_TEST_DIR}/test_transformers_api_disable_xmx.py -v -s +unset BIGDL_LLM_XMX_DISABLED now=$(date "+%s") time=$((now-start))