diff --git a/.github/workflows/llm_performance_tests.yml b/.github/workflows/llm_performance_tests.yml index ea6b690f..ad328d36 100644 --- a/.github/workflows/llm_performance_tests.yml +++ b/.github/workflows/llm_performance_tests.yml @@ -250,7 +250,7 @@ jobs: if not exist dist\bigdl_llm*.whl (exit /b 1) for %%i in (dist\bigdl_llm*.whl) do set whl_name=%%i - pip install %whl_name%[xpu] -i %INTERNAL_PYPI_URL% --trusted-host %INTERNAL_PYPI_TRUSTED_HOST% -q + pip install --pre --upgrade %whl_name%[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu if %ERRORLEVEL% neq 0 (exit /b 1) pip list @@ -291,7 +291,6 @@ jobs: run: | call conda activate igpu-perf call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" - set SYCL_ENABLE_DEFAULT_CONTEXTS=1 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 REM for llava @@ -317,7 +316,6 @@ jobs: pip install transformers==4.34.0 call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" - set SYCL_ENABLE_DEFAULT_CONTEXTS=1 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 @@ -359,7 +357,6 @@ jobs: pip install transformers==4.31.0 call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" - set SYCL_ENABLE_DEFAULT_CONTEXTS=1 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 REM for llava @@ -385,7 +382,6 @@ jobs: pip install transformers==4.34.0 call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" - set SYCL_ENABLE_DEFAULT_CONTEXTS=1 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 @@ -427,7 +423,6 @@ jobs: pip install transformers==4.31.0 call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" - set SYCL_ENABLE_DEFAULT_CONTEXTS=1 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 REM for llava @@ -453,7 +448,6 @@ jobs: pip install transformers==4.34.0 call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" - set SYCL_ENABLE_DEFAULT_CONTEXTS=1 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 @@ -494,7 +488,6 @@ jobs: pip install transformers==4.31.0 call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" - set SYCL_ENABLE_DEFAULT_CONTEXTS=1 set SYCL_CACHE_PERSISTENT=1 set BIGDL_LLM_XMX_DISABLED=1 REM for llava diff --git a/python/llm/dev/benchmark/all-in-one/run.py b/python/llm/dev/benchmark/all-in-one/run.py index eb24dac7..3ab3d3ab 100644 --- a/python/llm/dev/benchmark/all-in-one/run.py +++ b/python/llm/dev/benchmark/all-in-one/run.py @@ -666,12 +666,12 @@ def run_transformer_int4_gpu_win(repo_id, st = time.perf_counter() if repo_id in CHATGLM_IDS: model = AutoModel.from_pretrained(model_path, load_in_low_bit=low_bit, optimize_model=True, - trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding) + trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval() tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) model = model.to('xpu') elif repo_id in LLAMA_IDS: model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, - use_cache=True, cpu_embedding=cpu_embedding) + use_cache=True, cpu_embedding=cpu_embedding).eval() tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True) model = model.to('xpu') elif repo_id in LLAVA_IDS: @@ -679,12 +679,12 @@ def run_transformer_int4_gpu_win(repo_id, sys.path.append(rf"{llava_repo_dir}") from llava.model.language_model.llava_llama import LlavaLlamaForCausalLM model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, optimize_model=True, - trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding) + trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval() tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) model = model.to('xpu') else: model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, - trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding) + trust_remote_code=True, use_cache=True, cpu_embedding=cpu_embedding).eval() tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) model = model.to('xpu') if isinstance(model, GPTJForCausalLM): diff --git a/python/llm/test/benchmark/igpu-perf/32-256.yaml b/python/llm/test/benchmark/igpu-perf/32-256.yaml index faa2a153..1947f24e 100644 --- a/python/llm/test/benchmark/igpu-perf/32-256.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-256.yaml @@ -3,7 +3,7 @@ repo_id: - 'THUDM/chatglm3-6b' - 'baichuan-inc/Baichuan2-7B-Chat' - 'internlm/internlm-chat-7b-8k' - - 'Qwen/Qwen-7B-Chat-10-12' + - 'Qwen/Qwen-7B-Chat' - 'BAAI/AquilaChat2-7B' - '01-ai/Yi-6B' - 'meta-llama/Llama-2-7b-chat-hf' diff --git a/python/llm/test/benchmark/igpu-perf/32-32.yaml b/python/llm/test/benchmark/igpu-perf/32-32.yaml index 2108899e..93a8a918 100644 --- a/python/llm/test/benchmark/igpu-perf/32-32.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-32.yaml @@ -3,7 +3,7 @@ repo_id: - 'THUDM/chatglm3-6b' - 'baichuan-inc/Baichuan2-7B-Chat' - 'internlm/internlm-chat-7b-8k' - - 'Qwen/Qwen-7B-Chat-10-12' + - 'Qwen/Qwen-7B-Chat' - 'BAAI/AquilaChat2-7B' - '01-ai/Yi-6B' - 'meta-llama/Llama-2-7b-chat-hf' diff --git a/python/llm/test/benchmark/igpu-perf/32-512.yaml b/python/llm/test/benchmark/igpu-perf/32-512.yaml index 8d8c2212..897b76a2 100644 --- a/python/llm/test/benchmark/igpu-perf/32-512.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-512.yaml @@ -3,7 +3,7 @@ repo_id: - 'THUDM/chatglm3-6b' - 'baichuan-inc/Baichuan2-7B-Chat' - 'internlm/internlm-chat-7b-8k' - - 'Qwen/Qwen-7B-Chat-10-12' + - 'Qwen/Qwen-7B-Chat' - 'BAAI/AquilaChat2-7B' - '01-ai/Yi-6B' - 'meta-llama/Llama-2-7b-chat-hf' diff --git a/python/llm/test/benchmark/igpu-perf/512-64.yaml b/python/llm/test/benchmark/igpu-perf/512-64.yaml index 5f9c6a41..78f1d837 100644 --- a/python/llm/test/benchmark/igpu-perf/512-64.yaml +++ b/python/llm/test/benchmark/igpu-perf/512-64.yaml @@ -1,16 +1,16 @@ repo_id: - 'THUDM/chatglm2-6b' - # - 'THUDM/chatglm3-6b' + - 'THUDM/chatglm3-6b' # - 'baichuan-inc/Baichuan2-7B-Chat' - # - 'internlm/internlm-chat-7b-8k' - # - 'Qwen/Qwen-7B-Chat-10-12' - # - 'BAAI/AquilaChat2-7B' + - 'internlm/internlm-chat-7b-8k' + # - 'Qwen/Qwen-7B-Chat' + - 'BAAI/AquilaChat2-7B' - '01-ai/Yi-6B' - # - 'meta-llama/Llama-2-7b-chat-hf' + - 'meta-llama/Llama-2-7b-chat-hf' # - 'WisdomShell/CodeShell-7B-Chat' - 'tiiuae/falcon-7b-instruct-with-patch' - 'mosaicml/mpt-7b-chat' - # - 'liuhaotian/llava-v1.5-7b' + - 'liuhaotian/llava-v1.5-7b' - 'RWKV/rwkv-4-world-7b' local_model_hub: 'path to your local model hub' warm_up: 1