Add MiniCPM-V-2_6 to iGPU Perf (#11810)
* Add MiniCPM-V-2_6 to iGPU Perf * keep last model in yaml * fix MINICPM_V_IDS * Restore tested model list * Small fix --------- Co-authored-by: Yuwen Hu <yuwen.hu@intel.com>
This commit is contained in:
parent
96796f95cb
commit
9f17234f3b
7 changed files with 21 additions and 0 deletions
|
|
@ -992,6 +992,13 @@ def run_transformer_int4_gpu_win(repo_id,
|
|||
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
model = model.to('xpu')
|
||||
elif repo_id in MINICPM_V_IDS:
|
||||
model = AutoModel.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit,
|
||||
modules_to_not_convert=["vpm", "resampler"],
|
||||
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
model = model.to('xpu')
|
||||
model = model.llm
|
||||
else:
|
||||
model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit,
|
||||
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
|
||||
|
|
@ -1108,6 +1115,14 @@ def run_transformer_int4_fp16_gpu_win(repo_id,
|
|||
torch_dtype=torch.float16).eval()
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
model = model.to('xpu')
|
||||
elif repo_id in MINICPM_V_IDS:
|
||||
model = AutoModel.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit,
|
||||
modules_to_not_convert=["vpm", "resampler"],
|
||||
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding,
|
||||
torch_dtype=torch.float16).eval()
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
model = model.to('xpu')
|
||||
model = model.llm
|
||||
else:
|
||||
model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit,
|
||||
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding,
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ repo_id:
|
|||
- 'microsoft/Phi-3-mini-4k-instruct'
|
||||
- 'microsoft/Phi-3-mini-128k-instruct'
|
||||
- 'microsoft/phi-3-vision-128k-instruct'
|
||||
- 'openbmb/MiniCPM-V-2_6'
|
||||
local_model_hub: 'path to your local model hub'
|
||||
warm_up: 1
|
||||
num_trials: 3
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ repo_id:
|
|||
- 'microsoft/Phi-3-mini-4k-instruct'
|
||||
- 'microsoft/Phi-3-mini-128k-instruct'
|
||||
- 'microsoft/phi-3-vision-128k-instruct'
|
||||
- 'openbmb/MiniCPM-V-2_6'
|
||||
local_model_hub: 'path to your local model hub'
|
||||
warm_up: 1
|
||||
num_trials: 3
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ repo_id:
|
|||
- 'microsoft/Phi-3-mini-4k-instruct'
|
||||
- 'microsoft/Phi-3-mini-128k-instruct'
|
||||
- 'microsoft/phi-3-vision-128k-instruct'
|
||||
- 'openbmb/MiniCPM-V-2_6'
|
||||
local_model_hub: 'path to your local model hub'
|
||||
warm_up: 1
|
||||
num_trials: 3
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ repo_id:
|
|||
- 'microsoft/Phi-3-mini-4k-instruct'
|
||||
- 'microsoft/Phi-3-mini-128k-instruct'
|
||||
- 'microsoft/phi-3-vision-128k-instruct'
|
||||
- 'openbmb/MiniCPM-V-2_6'
|
||||
local_model_hub: 'path to your local model hub'
|
||||
warm_up: 1
|
||||
num_trials: 3
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ repo_id:
|
|||
- 'microsoft/Phi-3-mini-4k-instruct'
|
||||
- 'microsoft/Phi-3-mini-128k-instruct'
|
||||
- 'microsoft/phi-3-vision-128k-instruct'
|
||||
- 'openbmb/MiniCPM-V-2_6'
|
||||
local_model_hub: 'path to your local model hub'
|
||||
warm_up: 3
|
||||
num_trials: 5
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ repo_id:
|
|||
- 'microsoft/Phi-3-mini-4k-instruct'
|
||||
- 'microsoft/Phi-3-mini-128k-instruct'
|
||||
- 'microsoft/phi-3-vision-128k-instruct'
|
||||
- 'openbmb/MiniCPM-V-2_6'
|
||||
local_model_hub: 'path to your local model hub'
|
||||
warm_up: 1
|
||||
num_trials: 3
|
||||
|
|
|
|||
Loading…
Reference in a new issue