[LLM] win igpu performance for ipex 2.1 and oneapi 2024.0 (#9679)

* Change igpu win tests for ipex 2.1 and oneapi 2024.0

* Qwen model repo id updates; updates model list for 512-64

* Add .eval for win igpu all-in-one benchmark for best performance
This commit is contained in:
Yuwen Hu 2023-12-13 18:52:29 +08:00 committed by GitHub
parent 16febc949c
commit cbdd49f229
6 changed files with 14 additions and 21 deletions

View file

@ -250,7 +250,7 @@ jobs:
if not exist dist\bigdl_llm*.whl (exit /b 1)
for %%i in (dist\bigdl_llm*.whl) do set whl_name=%%i
pip install %whl_name%[xpu] -i %INTERNAL_PYPI_URL% --trusted-host %INTERNAL_PYPI_TRUSTED_HOST% -q
pip install --pre --upgrade %whl_name%[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
if %ERRORLEVEL% neq 0 (exit /b 1)
pip list
@ -291,7 +291,6 @@ jobs:
run: |
call conda activate igpu-perf
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
set SYCL_CACHE_PERSISTENT=1
set BIGDL_LLM_XMX_DISABLED=1
REM for llava
@ -317,7 +316,6 @@ jobs:
pip install transformers==4.34.0
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
set SYCL_CACHE_PERSISTENT=1
set BIGDL_LLM_XMX_DISABLED=1
@ -359,7 +357,6 @@ jobs:
pip install transformers==4.31.0
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
set SYCL_CACHE_PERSISTENT=1
set BIGDL_LLM_XMX_DISABLED=1
REM for llava
@ -385,7 +382,6 @@ jobs:
pip install transformers==4.34.0
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
set SYCL_CACHE_PERSISTENT=1
set BIGDL_LLM_XMX_DISABLED=1
@ -427,7 +423,6 @@ jobs:
pip install transformers==4.31.0
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
set SYCL_CACHE_PERSISTENT=1
set BIGDL_LLM_XMX_DISABLED=1
REM for llava
@ -453,7 +448,6 @@ jobs:
pip install transformers==4.34.0
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
set SYCL_CACHE_PERSISTENT=1
set BIGDL_LLM_XMX_DISABLED=1
@ -494,7 +488,6 @@ jobs:
pip install transformers==4.31.0
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
set SYCL_CACHE_PERSISTENT=1
set BIGDL_LLM_XMX_DISABLED=1
REM for llava

View file

@ -666,12 +666,12 @@ def run_transformer_int4_gpu_win(repo_id,
st = time.perf_counter()
if repo_id in CHATGLM_IDS:
model = AutoModel.from_pretrained(model_path, load_in_low_bit=low_bit, optimize_model=True,
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding)
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = model.to('xpu')
elif repo_id in LLAMA_IDS:
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True,
use_cache=True, cpu_embedding=cpu_embedding)
use_cache=True, cpu_embedding=cpu_embedding).eval()
tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = model.to('xpu')
elif repo_id in LLAVA_IDS:
@ -679,12 +679,12 @@ def run_transformer_int4_gpu_win(repo_id,
sys.path.append(rf"{llava_repo_dir}")
from llava.model.language_model.llava_llama import LlavaLlamaForCausalLM
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, optimize_model=True,
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding)
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = model.to('xpu')
else:
model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit,
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding)
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = model.to('xpu')
if isinstance(model, GPTJForCausalLM):

View file

@ -3,7 +3,7 @@ repo_id:
- 'THUDM/chatglm3-6b'
- 'baichuan-inc/Baichuan2-7B-Chat'
- 'internlm/internlm-chat-7b-8k'
- 'Qwen/Qwen-7B-Chat-10-12'
- 'Qwen/Qwen-7B-Chat'
- 'BAAI/AquilaChat2-7B'
- '01-ai/Yi-6B'
- 'meta-llama/Llama-2-7b-chat-hf'

View file

@ -3,7 +3,7 @@ repo_id:
- 'THUDM/chatglm3-6b'
- 'baichuan-inc/Baichuan2-7B-Chat'
- 'internlm/internlm-chat-7b-8k'
- 'Qwen/Qwen-7B-Chat-10-12'
- 'Qwen/Qwen-7B-Chat'
- 'BAAI/AquilaChat2-7B'
- '01-ai/Yi-6B'
- 'meta-llama/Llama-2-7b-chat-hf'

View file

@ -3,7 +3,7 @@ repo_id:
- 'THUDM/chatglm3-6b'
- 'baichuan-inc/Baichuan2-7B-Chat'
- 'internlm/internlm-chat-7b-8k'
- 'Qwen/Qwen-7B-Chat-10-12'
- 'Qwen/Qwen-7B-Chat'
- 'BAAI/AquilaChat2-7B'
- '01-ai/Yi-6B'
- 'meta-llama/Llama-2-7b-chat-hf'

View file

@ -1,16 +1,16 @@
repo_id:
- 'THUDM/chatglm2-6b'
# - 'THUDM/chatglm3-6b'
- 'THUDM/chatglm3-6b'
# - 'baichuan-inc/Baichuan2-7B-Chat'
# - 'internlm/internlm-chat-7b-8k'
# - 'Qwen/Qwen-7B-Chat-10-12'
# - 'BAAI/AquilaChat2-7B'
- 'internlm/internlm-chat-7b-8k'
# - 'Qwen/Qwen-7B-Chat'
- 'BAAI/AquilaChat2-7B'
- '01-ai/Yi-6B'
# - 'meta-llama/Llama-2-7b-chat-hf'
- 'meta-llama/Llama-2-7b-chat-hf'
# - 'WisdomShell/CodeShell-7B-Chat'
- 'tiiuae/falcon-7b-instruct-with-patch'
- 'mosaicml/mpt-7b-chat'
# - 'liuhaotian/llava-v1.5-7b'
- 'liuhaotian/llava-v1.5-7b'
- 'RWKV/rwkv-4-world-7b'
local_model_hub: 'path to your local model hub'
warm_up: 1