From 7f80db95eb5e3722af3b1fa4c327c55ffcc8a7d8 Mon Sep 17 00:00:00 2001 From: "Xu, Shuo" <100334393+ATMxsp01@users.noreply.github.com> Date: Tue, 23 Jul 2024 09:51:36 +0800 Subject: [PATCH] Change run.py in benchmark to support phi-3-vision in arc-perf (#11638) Co-authored-by: ATMxsp01 --- python/llm/dev/benchmark/all-in-one/run.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/python/llm/dev/benchmark/all-in-one/run.py b/python/llm/dev/benchmark/all-in-one/run.py index ff548594..224679b2 100644 --- a/python/llm/dev/benchmark/all-in-one/run.py +++ b/python/llm/dev/benchmark/all-in-one/run.py @@ -480,6 +480,13 @@ def run_transformer_int4_gpu(repo_id, use_cache=True, cpu_embedding=cpu_embedding, torch_dtype=torch_dtype).eval() tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True) + elif origin_repo_id in PHI3VISION_IDS: + model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, + _attn_implementation="eager", + modules_to_not_convert=["vision_embed_tokens"], + trust_remote_code=True, use_cache=True, + cpu_embedding=cpu_embedding, torch_dtype=torch_dtype).eval() + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) else: if "4bit" in repo_id: model = AutoModelForCausalLM.load_low_bit(model_path, optimize_model=True,