From 1355b2ce0634cb26f4178537157659134ca51f39 Mon Sep 17 00:00:00 2001 From: "Xu, Shuo" <100334393+ATMxsp01@users.noreply.github.com> Date: Thu, 11 Jul 2024 15:39:02 +0800 Subject: [PATCH] Add model Qwen-VL-Chat to iGPU-perf (#11558) * Add model Qwen-VL-Chat to iGPU-perf * small fix --------- Co-authored-by: ATMxsp01 --- .github/workflows/llm_performance_tests.yml | 4 +-- python/llm/dev/benchmark/all-in-one/run.py | 27 +++++++++++++++++++ python/llm/dev/benchmark/all-in-one/save.py | 7 ++++- .../test/benchmark/igpu-perf/1024-128.yaml | 1 + .../igpu-perf/1024-128_int4_fp16.yaml | 1 + .../1024-128_int4_fp16_loadlowbit.yaml | 1 + .../igpu-perf/2048-256_int4_fp16.yaml | 1 + .../benchmark/igpu-perf/32-32_int4_fp16.yaml | 1 + 8 files changed, 40 insertions(+), 3 deletions(-) diff --git a/.github/workflows/llm_performance_tests.yml b/.github/workflows/llm_performance_tests.yml index 330c8f05..55090cc4 100644 --- a/.github/workflows/llm_performance_tests.yml +++ b/.github/workflows/llm_performance_tests.yml @@ -554,7 +554,7 @@ jobs: pip install --upgrade pip pip install --upgrade wheel pip install --upgrade omegaconf pandas - pip install --upgrade tiktoken einops transformers_stream_generator + pip install --upgrade tiktoken einops transformers_stream_generator matplotlib cd python\llm python setup.py clean --all bdist_wheel --win @@ -584,7 +584,7 @@ jobs: pip install --upgrade pip pip install --upgrade wheel pip install --upgrade omegaconf pandas - pip install --upgrade tiktoken einops transformers_stream_generator + pip install --upgrade tiktoken einops transformers_stream_generator matplotlib pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/ pip show ipex-llm | findstr %TEST_VERSION_DATE% diff --git a/python/llm/dev/benchmark/all-in-one/run.py b/python/llm/dev/benchmark/all-in-one/run.py index 9bc4f574..6563f137 100644 --- a/python/llm/dev/benchmark/all-in-one/run.py +++ b/python/llm/dev/benchmark/all-in-one/run.py @@ -44,6 +44,8 @@ LLAVA_IDS = ['liuhaotian/llava-v1.5-7b'] PHI3VISION_IDS = ['microsoft/phi-3-vision-128k-instruct'] +QWENVL_IDS = ['Qwen/Qwen-VL-Chat'] + results = [] excludes = [] @@ -923,6 +925,12 @@ def run_transformer_int4_gpu_win(repo_id, trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval() tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) model = model.to('xpu') + elif repo_id in QWENVL_IDS: + model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, + modules_to_not_convert=['c_fc', 'out_proj'], + trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval() + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + model = model.to('xpu') else: model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval() @@ -1038,6 +1046,13 @@ def run_transformer_int4_fp16_gpu_win(repo_id, torch_dtype=torch.float16).eval() tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) model = model.to('xpu') + elif repo_id in QWENVL_IDS: + model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, + modules_to_not_convert=['c_fc', 'out_proj'], + trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding, + torch_dtype=torch.float16).eval() + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + model = model.to('xpu') else: model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding, @@ -1149,6 +1164,12 @@ def run_transformer_int4_loadlowbit_gpu_win(repo_id, use_cache=True, cpu_embedding=cpu_embedding).eval() tokenizer = AutoTokenizer.from_pretrained(model_path+'-'+low_bit, trust_remote_code=True) model = model.to('xpu') + elif repo_id in QWENVL_IDS: + model = AutoModelForCausalLM.load_low_bit(model_path+'-'+low_bit, optimize_model=True, trust_remote_code=True, + modules_to_not_convert=['c_fc', 'out_proj'], + use_cache=True, cpu_embedding=cpu_embedding).eval() + tokenizer = AutoTokenizer.from_pretrained(model_path+'-'+low_bit, trust_remote_code=True) + model = model.to('xpu') else: model = AutoModelForCausalLM.load_low_bit(model_path+'-'+low_bit, optimize_model=True, trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval() @@ -1259,6 +1280,12 @@ def run_transformer_int4_fp16_loadlowbit_gpu_win(repo_id, use_cache=True, cpu_embedding=cpu_embedding).eval() tokenizer = AutoTokenizer.from_pretrained(model_path+'-'+low_bit, trust_remote_code=True) model = model.half().to('xpu') + elif repo_id in QWENVL_IDS: + model = AutoModelForCausalLM.load_low_bit(model_path+'-'+low_bit, optimize_model=True, trust_remote_code=True, + modules_to_not_convert=['c_fc', 'out_proj'], + use_cache=True, cpu_embedding=cpu_embedding).eval() + tokenizer = AutoTokenizer.from_pretrained(model_path+'-'+low_bit, trust_remote_code=True) + model = model.half().to('xpu') else: model = AutoModelForCausalLM.load_low_bit(model_path+'-'+low_bit, optimize_model=True, trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval() diff --git a/python/llm/dev/benchmark/all-in-one/save.py b/python/llm/dev/benchmark/all-in-one/save.py index 4f0ae1d3..b02f21ec 100644 --- a/python/llm/dev/benchmark/all-in-one/save.py +++ b/python/llm/dev/benchmark/all-in-one/save.py @@ -23,7 +23,7 @@ import os import sys import gc -from run import LLAMA_IDS, CHATGLM_IDS, LLAVA_IDS, PHI3VISION_IDS, get_model_path +from run import LLAMA_IDS, CHATGLM_IDS, LLAVA_IDS, PHI3VISION_IDS, QWENVL_IDS, get_model_path current_dir = os.path.dirname(os.path.realpath(__file__)) @@ -57,6 +57,11 @@ def save_model_in_low_bit(repo_id, modules_to_not_convert=["vision_embed_tokens"], trust_remote_code=True, use_cache=True).eval() tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + elif repo_id in QWENVL_IDS: + model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, + modules_to_not_convert=['c_fc', 'out_proj'], + trust_remote_code=True, use_cache=True).eval() + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) else: model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, trust_remote_code=True, use_cache=True).eval() diff --git a/python/llm/test/benchmark/igpu-perf/1024-128.yaml b/python/llm/test/benchmark/igpu-perf/1024-128.yaml index b51c9fac..eed025a3 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128.yaml @@ -12,6 +12,7 @@ repo_id: - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - 'RWKV/v5-Eagle-7B-HF' - '01-ai/Yi-6B-Chat' + - 'Qwen/Qwen-VL-Chat' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml index f32b48c0..bf477f9d 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml @@ -11,6 +11,7 @@ repo_id: - 'mistralai/Mistral-7B-Instruct-v0.2' - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - '01-ai/Yi-6B-Chat' + - 'Qwen/Qwen-VL-Chat' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml index 18e4ca5c..2b828a7e 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml @@ -11,6 +11,7 @@ repo_id: - 'mistralai/Mistral-7B-Instruct-v0.2' - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - '01-ai/Yi-6B-Chat' + - 'Qwen/Qwen-VL-Chat' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml index 2fc0ddb1..535005c3 100644 --- a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml @@ -11,6 +11,7 @@ repo_id: - 'mistralai/Mistral-7B-Instruct-v0.2' - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - '01-ai/Yi-6B-Chat' + - 'Qwen/Qwen-VL-Chat' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml index 664b8cbb..ad7fc2d5 100644 --- a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml @@ -11,6 +11,7 @@ repo_id: - 'mistralai/Mistral-7B-Instruct-v0.2' - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - '01-ai/Yi-6B-Chat' + - 'Qwen/Qwen-VL-Chat' local_model_hub: 'path to your local model hub' warm_up: 3 num_trials: 5