[LLM] win igpu performance for ipex 2.1 and oneapi 2024.0 (#9679)
* Change igpu win tests for ipex 2.1 and oneapi 2024.0 * Qwen model repo id updates; updates model list for 512-64 * Add .eval for win igpu all-in-one benchmark for best performance
This commit is contained in:
parent
16febc949c
commit
cbdd49f229
6 changed files with 14 additions and 21 deletions
9
.github/workflows/llm_performance_tests.yml
vendored
9
.github/workflows/llm_performance_tests.yml
vendored
|
|
@ -250,7 +250,7 @@ jobs:
|
||||||
if not exist dist\bigdl_llm*.whl (exit /b 1)
|
if not exist dist\bigdl_llm*.whl (exit /b 1)
|
||||||
for %%i in (dist\bigdl_llm*.whl) do set whl_name=%%i
|
for %%i in (dist\bigdl_llm*.whl) do set whl_name=%%i
|
||||||
|
|
||||||
pip install %whl_name%[xpu] -i %INTERNAL_PYPI_URL% --trusted-host %INTERNAL_PYPI_TRUSTED_HOST% -q
|
pip install --pre --upgrade %whl_name%[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
|
||||||
if %ERRORLEVEL% neq 0 (exit /b 1)
|
if %ERRORLEVEL% neq 0 (exit /b 1)
|
||||||
pip list
|
pip list
|
||||||
|
|
||||||
|
|
@ -291,7 +291,6 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
call conda activate igpu-perf
|
call conda activate igpu-perf
|
||||||
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
|
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
|
||||||
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
|
|
||||||
set SYCL_CACHE_PERSISTENT=1
|
set SYCL_CACHE_PERSISTENT=1
|
||||||
set BIGDL_LLM_XMX_DISABLED=1
|
set BIGDL_LLM_XMX_DISABLED=1
|
||||||
REM for llava
|
REM for llava
|
||||||
|
|
@ -317,7 +316,6 @@ jobs:
|
||||||
pip install transformers==4.34.0
|
pip install transformers==4.34.0
|
||||||
|
|
||||||
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
|
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
|
||||||
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
|
|
||||||
set SYCL_CACHE_PERSISTENT=1
|
set SYCL_CACHE_PERSISTENT=1
|
||||||
set BIGDL_LLM_XMX_DISABLED=1
|
set BIGDL_LLM_XMX_DISABLED=1
|
||||||
|
|
||||||
|
|
@ -359,7 +357,6 @@ jobs:
|
||||||
pip install transformers==4.31.0
|
pip install transformers==4.31.0
|
||||||
|
|
||||||
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
|
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
|
||||||
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
|
|
||||||
set SYCL_CACHE_PERSISTENT=1
|
set SYCL_CACHE_PERSISTENT=1
|
||||||
set BIGDL_LLM_XMX_DISABLED=1
|
set BIGDL_LLM_XMX_DISABLED=1
|
||||||
REM for llava
|
REM for llava
|
||||||
|
|
@ -385,7 +382,6 @@ jobs:
|
||||||
pip install transformers==4.34.0
|
pip install transformers==4.34.0
|
||||||
|
|
||||||
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
|
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
|
||||||
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
|
|
||||||
set SYCL_CACHE_PERSISTENT=1
|
set SYCL_CACHE_PERSISTENT=1
|
||||||
set BIGDL_LLM_XMX_DISABLED=1
|
set BIGDL_LLM_XMX_DISABLED=1
|
||||||
|
|
||||||
|
|
@ -427,7 +423,6 @@ jobs:
|
||||||
pip install transformers==4.31.0
|
pip install transformers==4.31.0
|
||||||
|
|
||||||
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
|
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
|
||||||
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
|
|
||||||
set SYCL_CACHE_PERSISTENT=1
|
set SYCL_CACHE_PERSISTENT=1
|
||||||
set BIGDL_LLM_XMX_DISABLED=1
|
set BIGDL_LLM_XMX_DISABLED=1
|
||||||
REM for llava
|
REM for llava
|
||||||
|
|
@ -453,7 +448,6 @@ jobs:
|
||||||
pip install transformers==4.34.0
|
pip install transformers==4.34.0
|
||||||
|
|
||||||
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
|
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
|
||||||
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
|
|
||||||
set SYCL_CACHE_PERSISTENT=1
|
set SYCL_CACHE_PERSISTENT=1
|
||||||
set BIGDL_LLM_XMX_DISABLED=1
|
set BIGDL_LLM_XMX_DISABLED=1
|
||||||
|
|
||||||
|
|
@ -494,7 +488,6 @@ jobs:
|
||||||
pip install transformers==4.31.0
|
pip install transformers==4.31.0
|
||||||
|
|
||||||
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
|
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat"
|
||||||
set SYCL_ENABLE_DEFAULT_CONTEXTS=1
|
|
||||||
set SYCL_CACHE_PERSISTENT=1
|
set SYCL_CACHE_PERSISTENT=1
|
||||||
set BIGDL_LLM_XMX_DISABLED=1
|
set BIGDL_LLM_XMX_DISABLED=1
|
||||||
REM for llava
|
REM for llava
|
||||||
|
|
|
||||||
|
|
@ -666,12 +666,12 @@ def run_transformer_int4_gpu_win(repo_id,
|
||||||
st = time.perf_counter()
|
st = time.perf_counter()
|
||||||
if repo_id in CHATGLM_IDS:
|
if repo_id in CHATGLM_IDS:
|
||||||
model = AutoModel.from_pretrained(model_path, load_in_low_bit=low_bit, optimize_model=True,
|
model = AutoModel.from_pretrained(model_path, load_in_low_bit=low_bit, optimize_model=True,
|
||||||
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding)
|
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
|
||||||
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||||
model = model.to('xpu')
|
model = model.to('xpu')
|
||||||
elif repo_id in LLAMA_IDS:
|
elif repo_id in LLAMA_IDS:
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True,
|
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True,
|
||||||
use_cache=True, cpu_embedding=cpu_embedding)
|
use_cache=True, cpu_embedding=cpu_embedding).eval()
|
||||||
tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||||
model = model.to('xpu')
|
model = model.to('xpu')
|
||||||
elif repo_id in LLAVA_IDS:
|
elif repo_id in LLAVA_IDS:
|
||||||
|
|
@ -679,12 +679,12 @@ def run_transformer_int4_gpu_win(repo_id,
|
||||||
sys.path.append(rf"{llava_repo_dir}")
|
sys.path.append(rf"{llava_repo_dir}")
|
||||||
from llava.model.language_model.llava_llama import LlavaLlamaForCausalLM
|
from llava.model.language_model.llava_llama import LlavaLlamaForCausalLM
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, optimize_model=True,
|
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, optimize_model=True,
|
||||||
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding)
|
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
|
||||||
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||||
model = model.to('xpu')
|
model = model.to('xpu')
|
||||||
else:
|
else:
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit,
|
model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit,
|
||||||
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding)
|
trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval()
|
||||||
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||||
model = model.to('xpu')
|
model = model.to('xpu')
|
||||||
if isinstance(model, GPTJForCausalLM):
|
if isinstance(model, GPTJForCausalLM):
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@ repo_id:
|
||||||
- 'THUDM/chatglm3-6b'
|
- 'THUDM/chatglm3-6b'
|
||||||
- 'baichuan-inc/Baichuan2-7B-Chat'
|
- 'baichuan-inc/Baichuan2-7B-Chat'
|
||||||
- 'internlm/internlm-chat-7b-8k'
|
- 'internlm/internlm-chat-7b-8k'
|
||||||
- 'Qwen/Qwen-7B-Chat-10-12'
|
- 'Qwen/Qwen-7B-Chat'
|
||||||
- 'BAAI/AquilaChat2-7B'
|
- 'BAAI/AquilaChat2-7B'
|
||||||
- '01-ai/Yi-6B'
|
- '01-ai/Yi-6B'
|
||||||
- 'meta-llama/Llama-2-7b-chat-hf'
|
- 'meta-llama/Llama-2-7b-chat-hf'
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@ repo_id:
|
||||||
- 'THUDM/chatglm3-6b'
|
- 'THUDM/chatglm3-6b'
|
||||||
- 'baichuan-inc/Baichuan2-7B-Chat'
|
- 'baichuan-inc/Baichuan2-7B-Chat'
|
||||||
- 'internlm/internlm-chat-7b-8k'
|
- 'internlm/internlm-chat-7b-8k'
|
||||||
- 'Qwen/Qwen-7B-Chat-10-12'
|
- 'Qwen/Qwen-7B-Chat'
|
||||||
- 'BAAI/AquilaChat2-7B'
|
- 'BAAI/AquilaChat2-7B'
|
||||||
- '01-ai/Yi-6B'
|
- '01-ai/Yi-6B'
|
||||||
- 'meta-llama/Llama-2-7b-chat-hf'
|
- 'meta-llama/Llama-2-7b-chat-hf'
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@ repo_id:
|
||||||
- 'THUDM/chatglm3-6b'
|
- 'THUDM/chatglm3-6b'
|
||||||
- 'baichuan-inc/Baichuan2-7B-Chat'
|
- 'baichuan-inc/Baichuan2-7B-Chat'
|
||||||
- 'internlm/internlm-chat-7b-8k'
|
- 'internlm/internlm-chat-7b-8k'
|
||||||
- 'Qwen/Qwen-7B-Chat-10-12'
|
- 'Qwen/Qwen-7B-Chat'
|
||||||
- 'BAAI/AquilaChat2-7B'
|
- 'BAAI/AquilaChat2-7B'
|
||||||
- '01-ai/Yi-6B'
|
- '01-ai/Yi-6B'
|
||||||
- 'meta-llama/Llama-2-7b-chat-hf'
|
- 'meta-llama/Llama-2-7b-chat-hf'
|
||||||
|
|
|
||||||
|
|
@ -1,16 +1,16 @@
|
||||||
repo_id:
|
repo_id:
|
||||||
- 'THUDM/chatglm2-6b'
|
- 'THUDM/chatglm2-6b'
|
||||||
# - 'THUDM/chatglm3-6b'
|
- 'THUDM/chatglm3-6b'
|
||||||
# - 'baichuan-inc/Baichuan2-7B-Chat'
|
# - 'baichuan-inc/Baichuan2-7B-Chat'
|
||||||
# - 'internlm/internlm-chat-7b-8k'
|
- 'internlm/internlm-chat-7b-8k'
|
||||||
# - 'Qwen/Qwen-7B-Chat-10-12'
|
# - 'Qwen/Qwen-7B-Chat'
|
||||||
# - 'BAAI/AquilaChat2-7B'
|
- 'BAAI/AquilaChat2-7B'
|
||||||
- '01-ai/Yi-6B'
|
- '01-ai/Yi-6B'
|
||||||
# - 'meta-llama/Llama-2-7b-chat-hf'
|
- 'meta-llama/Llama-2-7b-chat-hf'
|
||||||
# - 'WisdomShell/CodeShell-7B-Chat'
|
# - 'WisdomShell/CodeShell-7B-Chat'
|
||||||
- 'tiiuae/falcon-7b-instruct-with-patch'
|
- 'tiiuae/falcon-7b-instruct-with-patch'
|
||||||
- 'mosaicml/mpt-7b-chat'
|
- 'mosaicml/mpt-7b-chat'
|
||||||
# - 'liuhaotian/llava-v1.5-7b'
|
- 'liuhaotian/llava-v1.5-7b'
|
||||||
- 'RWKV/rwkv-4-world-7b'
|
- 'RWKV/rwkv-4-world-7b'
|
||||||
local_model_hub: 'path to your local model hub'
|
local_model_hub: 'path to your local model hub'
|
||||||
warm_up: 1
|
warm_up: 1
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue