[LLM] Fix llm arc ut oom (#9300)
* Move model to cpu after testing so that gpu memory is deallocated * Add code comment --------- Co-authored-by: sgwhat <ge.song@intel.com>
This commit is contained in:
parent
ee5becdd61
commit
cee9eaf542
2 changed files with 4 additions and 1 deletions
|
|
@ -41,6 +41,7 @@ def test_optimize_model(Model, Tokenizer, model_path):
|
|||
trust_remote_code=True)
|
||||
model = model.to(device)
|
||||
logits_base_model = (model(input_ids)).logits
|
||||
model.to('cpu') # deallocate gpu memory
|
||||
|
||||
model = Model.from_pretrained(model_path,
|
||||
load_in_4bit=True,
|
||||
|
|
@ -48,6 +49,7 @@ def test_optimize_model(Model, Tokenizer, model_path):
|
|||
trust_remote_code=True)
|
||||
model = model.to(device)
|
||||
logits_optimized_model = (model(input_ids)).logits
|
||||
model.to('cpu')
|
||||
|
||||
diff = abs(logits_base_model - logits_optimized_model).flatten()
|
||||
|
||||
|
|
|
|||
|
|
@ -41,10 +41,11 @@ def test_completion(Model, Tokenizer, model_path, prompt, answer):
|
|||
load_in_4bit=True,
|
||||
optimize_model=True,
|
||||
trust_remote_code=True)
|
||||
model = model.to(device)
|
||||
model = model.to(device) # deallocate gpu memory
|
||||
|
||||
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
|
||||
output = model.generate(input_ids, max_new_tokens=32)
|
||||
model.to('cpu')
|
||||
output_str = tokenizer.decode(output[0], skip_special_tokens=True)
|
||||
|
||||
assert answer in output_str
|
||||
|
|
|
|||
Loading…
Reference in a new issue