diff --git a/python/llm/dev/benchmark/all-in-one/run.py b/python/llm/dev/benchmark/all-in-one/run.py index 58d60b14..35591f75 100644 --- a/python/llm/dev/benchmark/all-in-one/run.py +++ b/python/llm/dev/benchmark/all-in-one/run.py @@ -375,8 +375,11 @@ def run_transformer_int4_gpu(repo_id, model = model.to('xpu') else: if 'starcoder' in repo_id: + # Load starcoder-15.5b model in bf16 format to avoid CPU OOM. model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, trust_remote_code=True, use_cache=True, torch_dtype=torch.bfloat16).eval() + # Convert the low-bit model back to fp32 for performance considerations. + model = model.float() else: model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, trust_remote_code=True, use_cache=True).eval()