LLM: separate arc ut for disable XMX (#9953)
* separate test_optimize_model api with disabled xmx * delete test_optimize_model in test_transformers_api.py * set env variable in .sh/ put back test_optimize_model * unset env variable * remove env setting in .py * address errors in action * remove import ipex * lower tolerance
This commit is contained in:
		
							parent
							
								
									8d28aa8e2b
								
							
						
					
					
						commit
						50a851e3b3
					
				
					 3 changed files with 88 additions and 25 deletions
				
			
		| 
						 | 
					@ -24,8 +24,6 @@ from transformers import LlamaTokenizer, AutoTokenizer
 | 
				
			||||||
 | 
					
 | 
				
			||||||
device = os.environ['DEVICE']
 | 
					device = os.environ['DEVICE']
 | 
				
			||||||
print(f'Running on {device}')
 | 
					print(f'Running on {device}')
 | 
				
			||||||
if device == 'xpu':
 | 
					 | 
				
			||||||
    import intel_extension_for_pytorch as ipex
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
@pytest.mark.parametrize('prompt, answer', [
 | 
					@pytest.mark.parametrize('prompt, answer', [
 | 
				
			||||||
    ('What is the capital of France?\n\n', 'Paris')
 | 
					    ('What is the capital of France?\n\n', 'Paris')
 | 
				
			||||||
| 
						 | 
					@ -75,32 +73,36 @@ def test_transformers_auto_model_for_speech_seq2seq_int4():
 | 
				
			||||||
 | 
					
 | 
				
			||||||
prompt = "Once upon a time, there existed a little girl who liked to have adventures. She wanted to go to places and meet new people, and have fun"
 | 
					prompt = "Once upon a time, there existed a little girl who liked to have adventures. She wanted to go to places and meet new people, and have fun"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@pytest.mark.parametrize('Model, Tokenizer, model_path',[
 | 
					# @pytest.mark.parametrize('Model, Tokenizer, model_path',[
 | 
				
			||||||
    (AutoModelForCausalLM, AutoTokenizer, os.environ.get('MPT_7B_ORIGIN_PATH')),
 | 
					#     (AutoModelForCausalLM, AutoTokenizer, os.environ.get('MPT_7B_ORIGIN_PATH')),
 | 
				
			||||||
    (AutoModelForCausalLM, AutoTokenizer, os.environ.get('LLAMA2_7B_ORIGIN_PATH'))
 | 
					#     (AutoModelForCausalLM, AutoTokenizer, os.environ.get('LLAMA2_7B_ORIGIN_PATH'))
 | 
				
			||||||
    ])
 | 
					#     ])
 | 
				
			||||||
def test_optimize_model(Model, Tokenizer, model_path):
 | 
					# def test_optimize_model(Model, Tokenizer, model_path):
 | 
				
			||||||
    with torch.inference_mode():
 | 
					#     with torch.inference_mode():
 | 
				
			||||||
        tokenizer = Tokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
					#         tokenizer = Tokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
				
			||||||
        input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
 | 
					#         input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        model = Model.from_pretrained(model_path,
 | 
					#         model = Model.from_pretrained(model_path,
 | 
				
			||||||
                                    load_in_4bit=True,
 | 
					#                                     load_in_4bit=True,
 | 
				
			||||||
                                    optimize_model=False,
 | 
					#                                     optimize_model=False,
 | 
				
			||||||
                                    trust_remote_code=True)
 | 
					#                                     trust_remote_code=True)
 | 
				
			||||||
        model = model.to(device)
 | 
					#         model = model.to(device)
 | 
				
			||||||
        logits_base_model = (model(input_ids)).logits
 | 
					#         logits_base_model = (model(input_ids)).logits
 | 
				
			||||||
        model.to('cpu')  # deallocate gpu memory
 | 
					#         model.to('cpu')  # deallocate gpu memory
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        model = Model.from_pretrained(model_path,
 | 
					#         model = Model.from_pretrained(model_path,
 | 
				
			||||||
                                    load_in_4bit=True,
 | 
					#                                     load_in_4bit=True,
 | 
				
			||||||
                                    optimize_model=True,
 | 
					#                                     optimize_model=True,
 | 
				
			||||||
                                    trust_remote_code=True)
 | 
					#                                     trust_remote_code=True)
 | 
				
			||||||
        model = model.to(device)
 | 
					#         model = model.to(device)
 | 
				
			||||||
        logits_optimized_model = (model(input_ids)).logits
 | 
					#         logits_optimized_model = (model(input_ids)).logits
 | 
				
			||||||
        model.to('cpu')
 | 
					#         model.to('cpu')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        assert all(torch.isclose(logits_optimized_model, logits_base_model).tolist())
 | 
					#         tol = 1e-02
 | 
				
			||||||
 | 
					#         num_false = torch.isclose(logits_optimized_model, logits_base_model, rtol=tol, atol=tol)\
 | 
				
			||||||
 | 
					#             .flatten().tolist().count(False)
 | 
				
			||||||
 | 
					#         percent_false = num_false / logits_optimized_model.numel()
 | 
				
			||||||
 | 
					#         assert percent_false < 1e-02
 | 
				
			||||||
 | 
					
 | 
				
			||||||
class Test_Optimize_Gpu_Model:
 | 
					class Test_Optimize_Gpu_Model:
 | 
				
			||||||
    def setup(self):
 | 
					    def setup(self):
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -0,0 +1,58 @@
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Copyright 2016 The BigDL Authors.
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||||
 | 
					# you may not use this file except in compliance with the License.
 | 
				
			||||||
 | 
					# You may obtain a copy of the License at
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					#     http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Unless required by applicable law or agreed to in writing, software
 | 
				
			||||||
 | 
					# distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||||
 | 
					# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||||
 | 
					# See the License for the specific language governing permissions and
 | 
				
			||||||
 | 
					# limitations under the License.
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import os
 | 
				
			||||||
 | 
					import pytest
 | 
				
			||||||
 | 
					import torch
 | 
				
			||||||
 | 
					from bigdl.llm.transformers import AutoModelForCausalLM
 | 
				
			||||||
 | 
					from transformers import AutoTokenizer
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					device = os.environ['DEVICE']
 | 
				
			||||||
 | 
					print(f'Running on {device}')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					prompt = "Once upon a time, there existed a little girl who liked to have adventures. She wanted to go to places and meet new people, and have fun"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					@pytest.mark.parametrize('Model, Tokenizer, model_path',[
 | 
				
			||||||
 | 
					    (AutoModelForCausalLM, AutoTokenizer, os.environ.get('MPT_7B_ORIGIN_PATH')),
 | 
				
			||||||
 | 
					    (AutoModelForCausalLM, AutoTokenizer, os.environ.get('LLAMA2_7B_ORIGIN_PATH'))
 | 
				
			||||||
 | 
					    ])
 | 
				
			||||||
 | 
					def test_optimize_model(Model, Tokenizer, model_path):
 | 
				
			||||||
 | 
					    with torch.inference_mode():
 | 
				
			||||||
 | 
					        tokenizer = Tokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
				
			||||||
 | 
					        input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        model = Model.from_pretrained(model_path,
 | 
				
			||||||
 | 
					                                    load_in_4bit=True,
 | 
				
			||||||
 | 
					                                    optimize_model=False,
 | 
				
			||||||
 | 
					                                    trust_remote_code=True)
 | 
				
			||||||
 | 
					        model = model.to(device)
 | 
				
			||||||
 | 
					        logits_base_model = (model(input_ids)).logits
 | 
				
			||||||
 | 
					        model.to('cpu')  # deallocate gpu memory
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        model = Model.from_pretrained(model_path,
 | 
				
			||||||
 | 
					                                    load_in_4bit=True,
 | 
				
			||||||
 | 
					                                    optimize_model=True,
 | 
				
			||||||
 | 
					                                    trust_remote_code=True)
 | 
				
			||||||
 | 
					        model = model.to(device)
 | 
				
			||||||
 | 
					        logits_optimized_model = (model(input_ids)).logits
 | 
				
			||||||
 | 
					        model.to('cpu')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        tol = 1e-03
 | 
				
			||||||
 | 
					        num_false = torch.isclose(logits_optimized_model, logits_base_model, rtol=tol, atol=tol)\
 | 
				
			||||||
 | 
					            .flatten().tolist().count(False)
 | 
				
			||||||
 | 
					        percent_false = num_false / logits_optimized_model.numel()
 | 
				
			||||||
 | 
					        assert percent_false < 1e-02
 | 
				
			||||||
 | 
					    
 | 
				
			||||||
| 
						 | 
					@ -18,6 +18,9 @@ if [ -z "$THREAD_NUM" ]; then
 | 
				
			||||||
fi
 | 
					fi
 | 
				
			||||||
export OMP_NUM_THREADS=$THREAD_NUM
 | 
					export OMP_NUM_THREADS=$THREAD_NUM
 | 
				
			||||||
pytest ${LLM_INFERENCE_TEST_DIR}/test_transformers_api.py -v -s
 | 
					pytest ${LLM_INFERENCE_TEST_DIR}/test_transformers_api.py -v -s
 | 
				
			||||||
 | 
					export BIGDL_LLM_XMX_DISABLED=1
 | 
				
			||||||
 | 
					pytest ${LLM_INFERENCE_TEST_DIR}/test_transformers_api_disable_xmx.py -v -s
 | 
				
			||||||
 | 
					unset BIGDL_LLM_XMX_DISABLED
 | 
				
			||||||
 | 
					
 | 
				
			||||||
now=$(date "+%s")
 | 
					now=$(date "+%s")
 | 
				
			||||||
time=$((now-start))
 | 
					time=$((now-start))
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue