From 028ad4f63c44b4ebcade085b764cc5d8a6f5b2cc Mon Sep 17 00:00:00 2001 From: "Xu, Shuo" <100334393+ATMxsp01@users.noreply.github.com> Date: Wed, 10 Jul 2024 17:26:30 +0800 Subject: [PATCH] Add model phi-3-vision-128k-instruct to iGPU-perf benchmark (#11554) * try to improve MIniCPM performance * Add model phi-3-vision-128k-instruct to iGPU-perf benchmark --------- Co-authored-by: ATMxsp01 --- python/llm/dev/benchmark/all-in-one/run.py | 31 +++++++++++++++++++ python/llm/dev/benchmark/all-in-one/save.py | 8 ++++- .../benchmark/igpu-perf/1024-128_437.yaml | 1 + .../igpu-perf/1024-128_int4_fp16_437.yaml | 1 + .../1024-128_int4_fp16_loadlowbit_437.yaml | 1 + .../igpu-perf/2048-256_int4_fp16_437.yaml | 1 + .../igpu-perf/32-32_int4_fp16_437.yaml | 1 + 7 files changed, 43 insertions(+), 1 deletion(-) diff --git a/python/llm/dev/benchmark/all-in-one/run.py b/python/llm/dev/benchmark/all-in-one/run.py index cfaa1f97..9bc4f574 100644 --- a/python/llm/dev/benchmark/all-in-one/run.py +++ b/python/llm/dev/benchmark/all-in-one/run.py @@ -42,6 +42,8 @@ CHATGLM_IDS = ['THUDM/chatglm-6b', 'THUDM/chatglm2-6b', 'THUDM/chatglm3-6b'] LLAVA_IDS = ['liuhaotian/llava-v1.5-7b'] +PHI3VISION_IDS = ['microsoft/phi-3-vision-128k-instruct'] + results = [] excludes = [] @@ -914,6 +916,13 @@ def run_transformer_int4_gpu_win(repo_id, trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval() tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) model = model.to('xpu') + elif repo_id in PHI3VISION_IDS: + model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, + _attn_implementation="eager", + modules_to_not_convert=["vision_embed_tokens"], + trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval() + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + model = model.to('xpu') else: model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval() @@ -1021,6 +1030,14 @@ def run_transformer_int4_fp16_gpu_win(repo_id, torch_dtype=torch.float16).eval() tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) model = model.to('xpu') + elif repo_id in PHI3VISION_IDS: + model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, + _attn_implementation="eager", + modules_to_not_convert=["vision_embed_tokens"], + trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding, + torch_dtype=torch.float16).eval() + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + model = model.to('xpu') else: model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding, @@ -1125,6 +1142,13 @@ def run_transformer_int4_loadlowbit_gpu_win(repo_id, use_cache=True, cpu_embedding=cpu_embedding).eval() tokenizer = AutoTokenizer.from_pretrained(model_path+'-'+low_bit, trust_remote_code=True) model = model.to('xpu') + elif repo_id in PHI3VISION_IDS: + model = AutoModelForCausalLM.load_low_bit(model_path+'-'+low_bit, optimize_model=True, trust_remote_code=True, + _attn_implementation="eager", + modules_to_not_convert=["vision_embed_tokens"], + use_cache=True, cpu_embedding=cpu_embedding).eval() + tokenizer = AutoTokenizer.from_pretrained(model_path+'-'+low_bit, trust_remote_code=True) + model = model.to('xpu') else: model = AutoModelForCausalLM.load_low_bit(model_path+'-'+low_bit, optimize_model=True, trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval() @@ -1228,6 +1252,13 @@ def run_transformer_int4_fp16_loadlowbit_gpu_win(repo_id, use_cache=True, cpu_embedding=cpu_embedding).eval() tokenizer = AutoTokenizer.from_pretrained(model_path+'-'+low_bit, trust_remote_code=True) model = model.half().to('xpu') + elif repo_id in PHI3VISION_IDS: + model = AutoModelForCausalLM.load_low_bit(model_path+'-'+low_bit, optimize_model=True, trust_remote_code=True, + _attn_implementation="eager", + modules_to_not_convert=["vision_embed_tokens"], + use_cache=True, cpu_embedding=cpu_embedding).eval() + tokenizer = AutoTokenizer.from_pretrained(model_path+'-'+low_bit, trust_remote_code=True) + model = model.half().to('xpu') else: model = AutoModelForCausalLM.load_low_bit(model_path+'-'+low_bit, optimize_model=True, trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval() diff --git a/python/llm/dev/benchmark/all-in-one/save.py b/python/llm/dev/benchmark/all-in-one/save.py index 48aa3d98..4f0ae1d3 100644 --- a/python/llm/dev/benchmark/all-in-one/save.py +++ b/python/llm/dev/benchmark/all-in-one/save.py @@ -23,7 +23,7 @@ import os import sys import gc -from run import LLAMA_IDS, CHATGLM_IDS, LLAVA_IDS, get_model_path +from run import LLAMA_IDS, CHATGLM_IDS, LLAVA_IDS, PHI3VISION_IDS, get_model_path current_dir = os.path.dirname(os.path.realpath(__file__)) @@ -51,6 +51,12 @@ def save_model_in_low_bit(repo_id, model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, optimize_model=True, trust_remote_code=True, use_cache=True).eval() tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + elif repo_id in PHI3VISION_IDS: + model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, + _attn_implementation="eager", + modules_to_not_convert=["vision_embed_tokens"], + trust_remote_code=True, use_cache=True).eval() + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) else: model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, trust_remote_code=True, use_cache=True).eval() diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml index 6019026c..f191801c 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml @@ -3,6 +3,7 @@ repo_id: - 'Qwen/Qwen2-7B-Instruct' - 'microsoft/Phi-3-mini-4k-instruct' - 'microsoft/Phi-3-mini-128k-instruct' + - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml index 12ccaa5d..f9db9131 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml @@ -3,6 +3,7 @@ repo_id: - 'Qwen/Qwen2-7B-Instruct' - 'microsoft/Phi-3-mini-4k-instruct' - 'microsoft/Phi-3-mini-128k-instruct' + - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml index 4401207c..abd17aaa 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml @@ -3,6 +3,7 @@ repo_id: - 'Qwen/Qwen2-7B-Instruct' - 'microsoft/Phi-3-mini-4k-instruct' - 'microsoft/Phi-3-mini-128k-instruct' + - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml index f9ae8540..fd4fbbfa 100644 --- a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml @@ -3,6 +3,7 @@ repo_id: - 'Qwen/Qwen2-7B-Instruct' - 'microsoft/Phi-3-mini-4k-instruct' - 'microsoft/Phi-3-mini-128k-instruct' + - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml index de32d305..93fdc926 100644 --- a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml @@ -3,6 +3,7 @@ repo_id: - 'Qwen/Qwen2-7B-Instruct' - 'microsoft/Phi-3-mini-4k-instruct' - 'microsoft/Phi-3-mini-128k-instruct' + - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 3 num_trials: 5