diff --git a/docs/mddocs/Quickstart/llama_cpp_quickstart.md b/docs/mddocs/Quickstart/llama_cpp_quickstart.md index cb0bdedb..388efac6 100644 --- a/docs/mddocs/Quickstart/llama_cpp_quickstart.md +++ b/docs/mddocs/Quickstart/llama_cpp_quickstart.md @@ -18,6 +18,9 @@ See the demo of running LLaMA2-7B on Intel Arc GPU below. > > Our latest version is consistent with [a1631e5](https://github.com/ggerganov/llama.cpp/commit/a1631e53f6763e17da522ba219b030d8932900bd) of llama.cpp. +> [!NOTE] +> Starting from `ipex-llm[cpp]==2.2.0b20240912`, oneAPI dependency of `ipex-llm[cpp]` on Windows will switch from `2024.0.0` to `2024.2.1` . + ## Table of Contents - [Prerequisites](./llama_cpp_quickstart.md#0-prerequisites) - [Install IPEX-LLM for llama.cpp](./llama_cpp_quickstart.md#1-install-ipex-llm-for-llamacpp) diff --git a/docs/mddocs/Quickstart/ollama_quickstart.md b/docs/mddocs/Quickstart/ollama_quickstart.md index e2ab75d8..7fa12216 100644 --- a/docs/mddocs/Quickstart/ollama_quickstart.md +++ b/docs/mddocs/Quickstart/ollama_quickstart.md @@ -18,6 +18,9 @@ See the demo of running LLaMA2-7B on Intel Arc GPU below. > > Our current version is consistent with [v0.3.6](https://github.com/ollama/ollama/releases/tag/v0.3.6) of ollama. +> [!NOTE] +> Starting from `ipex-llm[cpp]==2.2.0b20240912`, oneAPI dependency of `ipex-llm[cpp]` on Windows will switch from `2024.0.0` to `2024.2.1` . + ## Table of Contents - [Install IPEX-LLM for Ollama](./ollama_quickstart.md#1-install-ipex-llm-for-ollama) - [Initialize Ollama](./ollama_quickstart.md#2-initialize-ollama) diff --git a/python/llm/setup.py b/python/llm/setup.py index 1eca7b27..c01f188c 100644 --- a/python/llm/setup.py +++ b/python/llm/setup.py @@ -277,6 +277,9 @@ def setup_package(): oneapi_2024_0_requires = ["dpcpp-cpp-rt==2024.0.2;platform_system=='Windows'", "mkl-dpcpp==2024.0.0;platform_system=='Windows'", "onednn==2024.0.0;platform_system=='Windows'"] + oneapi_2024_2_requires = ["dpcpp-cpp-rt==2024.2.1;platform_system=='Windows'", + "mkl-dpcpp==2024.2.1;platform_system=='Windows'", + "onednn==2024.2.1;platform_system=='Windows'"] # Linux install with --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ xpu_21_requires = copy.deepcopy(all_requires) for exclude_require in cpu_torch_version: @@ -294,8 +297,8 @@ def setup_package(): cpp_requires = ["bigdl-core-cpp==" + CORE_XE_VERSION, - "onednn-devel==2024.0.0;platform_system=='Windows'"] - cpp_requires += oneapi_2024_0_requires + "onednn-devel==2024.2.1;platform_system=='Windows'"] + cpp_requires += oneapi_2024_2_requires serving_requires = ['py-cpuinfo'] serving_requires += SERVING_DEP