From 23fc888abe6d71341786ddc6e4b7e491ece2efc9 Mon Sep 17 00:00:00 2001 From: Yuwen Hu <54161268+Oscilloscope98@users.noreply.github.com> Date: Tue, 9 Jan 2024 15:38:47 +0800 Subject: [PATCH] Update llm gpu xpu default related info to PyTorch 2.1 (#9866) --- python/llm/example/GPU/Deepspeed-AutoTP/README.md | 3 +-- .../Advanced-Quantizations/AWQ/README.md | 2 +- .../Advanced-Quantizations/GGUF/README.md | 3 +-- .../Advanced-Quantizations/GPTQ/README.md | 2 +- .../Model/aquila/README.md | 3 +-- .../Model/aquila2/README.md | 3 +-- .../Model/baichuan/README.md | 3 +-- .../Model/baichuan2/README.md | 3 +-- .../Model/bluelm/README.md | 3 +-- .../Model/chatglm2/README.md | 6 ++---- .../Model/chatglm3/README.md | 6 ++---- .../Model/chinese-llama2/README.md | 3 +-- .../Model/codellama/readme.md | 3 +-- .../Model/distil-whisper/README.md | 3 +-- .../Model/dolly-v1/README.md | 3 +-- .../Model/dolly-v2/README.md | 3 +-- .../Model/falcon/README.md | 5 ++--- .../Model/flan-t5/README.md | 3 +-- .../Model/gpt-j/readme.md | 3 +-- .../Model/internlm/README.md | 3 +-- .../Model/llama2/README.md | 3 +-- .../Model/mistral/README.md | 3 +-- .../Model/mixtral/README.md | 3 +-- .../Model/mpt/README.md | 3 +-- .../Model/phi-1_5/README.md | 3 +-- .../Model/qwen-vl/README.md | 3 +-- .../Model/qwen/README.md | 3 +-- .../Model/replit/README.md | 3 +-- .../Model/solar-10.7b/README.md | 3 +-- .../Model/starcoder/readme.md | 3 +-- .../Model/vicuna/README.md | 3 +-- .../Model/voiceassistant/README.md | 3 +-- .../Model/whisper/readme.md | 3 +-- .../HF-Transformers-AutoModels/Model/yi/README.md | 3 +-- .../More-Data-Types/README.md | 3 +-- .../GPU/PyTorch-Models/Model/aquila2/README.md | 3 +-- .../GPU/PyTorch-Models/Model/baichuan/README.md | 3 +-- .../GPU/PyTorch-Models/Model/baichuan2/README.md | 3 +-- .../GPU/PyTorch-Models/Model/bluelm/README.md | 3 +-- .../GPU/PyTorch-Models/Model/chatglm2/README.md | 6 ++---- .../GPU/PyTorch-Models/Model/chatglm3/README.md | 6 ++---- .../GPU/PyTorch-Models/Model/codellama/README.md | 3 +-- .../PyTorch-Models/Model/distil-whisper/README.md | 3 +-- .../GPU/PyTorch-Models/Model/dolly-v1/README.md | 3 +-- .../GPU/PyTorch-Models/Model/dolly-v2/README.md | 3 +-- .../GPU/PyTorch-Models/Model/flan-t5/README.md | 3 +-- .../GPU/PyTorch-Models/Model/llama2/README.md | 3 +-- .../GPU/PyTorch-Models/Model/llava/README.md | 3 +-- .../GPU/PyTorch-Models/Model/mistral/README.md | 3 +-- .../GPU/PyTorch-Models/Model/mixtral/README.md | 3 +-- .../GPU/PyTorch-Models/Model/qwen-vl/README.md | 3 +-- .../GPU/PyTorch-Models/Model/replit/README.md | 3 +-- .../PyTorch-Models/Model/solar-10.7b/README.md | 3 +-- .../GPU/PyTorch-Models/Model/starcoder/README.md | 3 +-- .../example/GPU/PyTorch-Models/Model/yi/README.md | 3 +-- .../GPU/PyTorch-Models/More-Data-Types/README.md | 3 +-- .../GPU/PyTorch-Models/Save-Load/README.md | 3 +-- python/llm/example/GPU/QLoRA-FineTuning/README.md | 3 +-- .../GPU/QLoRA-FineTuning/alpaca-qlora/README.md | 5 ++--- python/llm/example/GPU/README.md | 15 +-------------- python/llm/example/GPU/vLLM-Serving/README.md | 1 + 61 files changed, 67 insertions(+), 140 deletions(-) diff --git a/python/llm/example/GPU/Deepspeed-AutoTP/README.md b/python/llm/example/GPU/Deepspeed-AutoTP/README.md index 4da831f7..50f02c1b 100644 --- a/python/llm/example/GPU/Deepspeed-AutoTP/README.md +++ b/python/llm/example/GPU/Deepspeed-AutoTP/README.md @@ -13,8 +13,7 @@ To run this example with BigDL-LLM on Intel GPUs, we have some recommended requi conda create -n llm python=3.9 conda activate llm # below command will install intel_extension_for_pytorch==2.1.10+xpu as default -# you can install specific ipex/torch version for your need -pip install --pre --upgrade bigdl-llm[xpu_2.1] -f https://developer.intel.com/ipex-whl-stable-xpu +pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install oneccl_bind_pt==2.1.100 -f https://developer.intel.com/ipex-whl-stable-xpu # configures OneAPI environment variables source /opt/intel/oneapi/setvars.sh diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/README.md index 64df39e3..ed329582 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/AWQ/README.md @@ -30,7 +30,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm - +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install transformers==4.35.0 pip install autoawq==0.1.8 --no-deps diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/GGUF/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/GGUF/README.md index 8740ac2c..adf42f5f 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/GGUF/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/GGUF/README.md @@ -26,8 +26,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install transformers==4.34.0 # upgrade transformers ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/GPTQ/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/GPTQ/README.md index 280fab96..765aa9a4 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/GPTQ/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Advanced-Quantizations/GPTQ/README.md @@ -11,7 +11,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm - +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install transformers==4.34.0 BUILD_CUDA_EXT=0 pip install git+https://github.com/PanQiWei/AutoGPTQ.git@1de9ab6 diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/aquila/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/aquila/README.md index 56a8fc6e..64f1c91c 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/aquila/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/aquila/README.md @@ -17,8 +17,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` ### 2. Configures OneAPI environment variables diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/aquila2/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/aquila2/README.md index 4dc60c70..65a54eb8 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/aquila2/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/aquila2/README.md @@ -17,8 +17,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` ### 2. Configures OneAPI environment variables diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan/README.md index e264fdb7..d513c6ed 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan/README.md @@ -11,8 +11,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install transformers_stream_generator # additional package required for Baichuan-13B-Chat to conduct generation ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan2/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan2/README.md index f4a74302..888eeafd 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan2/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/baichuan2/README.md @@ -11,8 +11,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install transformers_stream_generator # additional package required for Baichuan-7B-Chat to conduct generation ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/bluelm/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/bluelm/README.md index 9521086a..6fef35fa 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/bluelm/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/bluelm/README.md @@ -11,8 +11,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/chatglm2/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/chatglm2/README.md index 5fbc72db..379d1dbb 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/chatglm2/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/chatglm2/README.md @@ -12,8 +12,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` @@ -73,8 +72,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/chatglm3/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/chatglm3/README.md index f2e34d19..a6d09fa6 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/chatglm3/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/chatglm3/README.md @@ -12,8 +12,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` @@ -74,8 +73,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/chinese-llama2/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/chinese-llama2/README.md index d795ad98..dc6af0d1 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/chinese-llama2/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/chinese-llama2/README.md @@ -11,8 +11,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` ### 2. Configures OneAPI environment variables diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codellama/readme.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codellama/readme.md index 84ff0040..b101b28e 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codellama/readme.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codellama/readme.md @@ -11,8 +11,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install transformers==4.34.1 # CodeLlamaTokenizer is supported in higher version of transformers ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/distil-whisper/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/distil-whisper/README.md index 55ffd1b3..51962dfb 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/distil-whisper/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/distil-whisper/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install datasets soundfile librosa # required by audio processing ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/dolly-v1/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/dolly-v1/README.md index d1a1be39..1f91ce53 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/dolly-v1/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/dolly-v1/README.md @@ -13,8 +13,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` ### 2. Configures OneAPI environment variables diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/dolly-v2/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/dolly-v2/README.md index 7db4b604..9213e3e9 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/dolly-v2/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/dolly-v2/README.md @@ -11,8 +11,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/falcon/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/falcon/README.md index 695cd170..83f26317 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/falcon/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/falcon/README.md @@ -12,9 +12,8 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need -pip install bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default +pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install einops # additional package required for falcon-7b-instruct to conduct generation ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/flan-t5/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/flan-t5/README.md index a7d58fe5..ff40ca53 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/flan-t5/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/flan-t5/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/gpt-j/readme.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/gpt-j/readme.md index 7ee1fbb4..47268641 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/gpt-j/readme.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/gpt-j/readme.md @@ -11,8 +11,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` ### 2. Configures OneAPI environment variables diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/internlm/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/internlm/README.md index 9e4cd903..de78ed60 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/internlm/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/internlm/README.md @@ -11,8 +11,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/llama2/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/llama2/README.md index 6d7bef2e..d2cf1698 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/llama2/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/llama2/README.md @@ -11,8 +11,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` ### 2. Configures OneAPI environment variables diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/mistral/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/mistral/README.md index f01efdaa..3a7f5a13 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/mistral/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/mistral/README.md @@ -16,8 +16,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu # Refer to https://huggingface.co/mistralai/Mistral-7B-v0.1#troubleshooting, please make sure you are using a stable version of Transformers, 4.34.0 or newer. diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/mixtral/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/mixtral/README.md index 7cda38c2..9909a0d0 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/mixtral/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/mixtral/README.md @@ -16,8 +16,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu # Please make sure you are using a stable version of Transformers, 4.36.0 or newer. diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/mpt/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/mpt/README.md index c7594390..12fbf26c 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/mpt/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/mpt/README.md @@ -11,8 +11,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install einops # additional package required for mpt-7b-chat and mpt-30b-chat to conduct generation ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/phi-1_5/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/phi-1_5/README.md index 09d07df7..5f313258 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/phi-1_5/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/phi-1_5/README.md @@ -11,8 +11,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install einops # additional package required for phi-1_5 to conduct generation ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/qwen-vl/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/qwen-vl/README.md index 19ca669f..a116c161 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/qwen-vl/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/qwen-vl/README.md @@ -13,8 +13,7 @@ After installing conda, create a Python environment for BigDL-LLM: ```bash conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install accelerate tiktoken einops transformers_stream_generator==0.0.4 scipy torchvision pillow tensorboard matplotlib # additional package required for Qwen-VL-Chat to conduct generation ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/qwen/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/qwen/README.md index b43617cd..e304abea 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/qwen/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/qwen/README.md @@ -11,8 +11,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install tiktoken einops transformers_stream_generator # additional package required for Qwen-7B-Chat to conduct generation ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/replit/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/replit/README.md index a1f631a4..ba673e76 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/replit/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/replit/README.md @@ -13,8 +13,7 @@ After installing conda, create a Python environment for BigDL-LLM: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/solar-10.7b/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/solar-10.7b/README.md index d2fa9652..150702d6 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/solar-10.7b/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/solar-10.7b/README.md @@ -11,8 +11,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install transformers==4.35.2 # required by SOLAR-10.7B ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/starcoder/readme.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/starcoder/readme.md index ad8000cc..07402edf 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/starcoder/readme.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/starcoder/readme.md @@ -11,8 +11,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/vicuna/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/vicuna/README.md index 0ea9149b..5f8a6968 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/vicuna/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/vicuna/README.md @@ -13,8 +13,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` ### 2. Configures OneAPI environment variables diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/voiceassistant/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/voiceassistant/README.md index fbdf4a66..074626f3 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/voiceassistant/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/voiceassistant/README.md @@ -13,8 +13,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install librosa soundfile datasets pip install accelerate diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/whisper/readme.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/whisper/readme.md index 901a5b9e..c14ba423 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/whisper/readme.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/whisper/readme.md @@ -12,8 +12,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install datasets soundfile librosa # required by audio processing ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/yi/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/yi/README.md index 51fed84e..3bd1d788 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/yi/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/yi/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install einops # additional package required for Yi-6B to conduct generation ``` diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/More-Data-Types/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/More-Data-Types/README.md index 240202f0..3fd08df5 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/More-Data-Types/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/More-Data-Types/README.md @@ -8,8 +8,7 @@ We suggest using conda to manage environment: conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/aquila2/README.md b/python/llm/example/GPU/PyTorch-Models/Model/aquila2/README.md index c25faa20..16844169 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/aquila2/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/aquila2/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/baichuan/README.md b/python/llm/example/GPU/PyTorch-Models/Model/baichuan/README.md index a6528cfc..ecbeb0d1 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/baichuan/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/baichuan/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install transformers_stream_generator # additional package required for Baichuan-13B-Chat to conduct generation ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/baichuan2/README.md b/python/llm/example/GPU/PyTorch-Models/Model/baichuan2/README.md index 64e3bf85..f7a9f07e 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/baichuan2/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/baichuan2/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install transformers_stream_generator # additional package required for Baichuan2-7B-Chat to conduct generation ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/bluelm/README.md b/python/llm/example/GPU/PyTorch-Models/Model/bluelm/README.md index 6056ed84..3ce5839a 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/bluelm/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/bluelm/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/chatglm2/README.md b/python/llm/example/GPU/PyTorch-Models/Model/chatglm2/README.md index be03f814..741803b7 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/chatglm2/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/chatglm2/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` @@ -73,8 +72,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/chatglm3/README.md b/python/llm/example/GPU/PyTorch-Models/Model/chatglm3/README.md index 825ecd88..a3c05252 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/chatglm3/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/chatglm3/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` @@ -72,8 +71,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/codellama/README.md b/python/llm/example/GPU/PyTorch-Models/Model/codellama/README.md index 1f83a680..244387d7 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/codellama/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/codellama/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install transformers==4.34.1 # CodeLlamaTokenizer is supported in higher version of transformers ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/distil-whisper/README.md b/python/llm/example/GPU/PyTorch-Models/Model/distil-whisper/README.md index 1b3bb935..19bb7ee1 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/distil-whisper/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/distil-whisper/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install datasets soundfile librosa # required by audio processing ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/dolly-v1/README.md b/python/llm/example/GPU/PyTorch-Models/Model/dolly-v1/README.md index 15dd8b1e..e85a97c1 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/dolly-v1/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/dolly-v1/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/dolly-v2/README.md b/python/llm/example/GPU/PyTorch-Models/Model/dolly-v2/README.md index 72fa6c1b..83ce9da8 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/dolly-v2/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/dolly-v2/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/flan-t5/README.md b/python/llm/example/GPU/PyTorch-Models/Model/flan-t5/README.md index a7d58fe5..ff40ca53 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/flan-t5/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/flan-t5/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/llama2/README.md b/python/llm/example/GPU/PyTorch-Models/Model/llama2/README.md index 325f6488..c15cd720 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/llama2/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/llama2/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/llava/README.md b/python/llm/example/GPU/PyTorch-Models/Model/llava/README.md index 485605e1..216d248c 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/llava/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/llava/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu git clone -b v1.1.1 --depth=1 https://github.com/haotian-liu/LLaVA.git # clone the llava libary diff --git a/python/llm/example/GPU/PyTorch-Models/Model/mistral/README.md b/python/llm/example/GPU/PyTorch-Models/Model/mistral/README.md index 0713eb8c..3cde5fc9 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/mistral/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/mistral/README.md @@ -16,8 +16,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu # Refer to https://huggingface.co/mistralai/Mistral-7B-v0.1#troubleshooting, please make sure you are using a stable version of Transformers, 4.34.0 or newer. diff --git a/python/llm/example/GPU/PyTorch-Models/Model/mixtral/README.md b/python/llm/example/GPU/PyTorch-Models/Model/mixtral/README.md index 791cffaf..04764477 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/mixtral/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/mixtral/README.md @@ -16,8 +16,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu # Please make sure you are using a stable version of Transformers, 4.36.0 or newer. diff --git a/python/llm/example/GPU/PyTorch-Models/Model/qwen-vl/README.md b/python/llm/example/GPU/PyTorch-Models/Model/qwen-vl/README.md index 9b7f2606..6f463f20 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/qwen-vl/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/qwen-vl/README.md @@ -13,8 +13,7 @@ After installing conda, create a Python environment for BigDL-LLM: ```bash conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install accelerate tiktoken einops transformers_stream_generator==0.0.4 scipy torchvision pillow tensorboard matplotlib # additional package required for Qwen-VL-Chat to conduct generation ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/replit/README.md b/python/llm/example/GPU/PyTorch-Models/Model/replit/README.md index 123da69d..4d683566 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/replit/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/replit/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/solar-10.7b/README.md b/python/llm/example/GPU/PyTorch-Models/Model/solar-10.7b/README.md index cf0ea048..a2d5d12c 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/solar-10.7b/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/solar-10.7b/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install transformers==4.35.2 # required by SOLAR-10.7B ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/starcoder/README.md b/python/llm/example/GPU/PyTorch-Models/Model/starcoder/README.md index 7f4d14ab..9cc4ea3f 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/starcoder/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/starcoder/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Model/yi/README.md b/python/llm/example/GPU/PyTorch-Models/Model/yi/README.md index 51fed84e..3bd1d788 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/yi/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/yi/README.md @@ -14,8 +14,7 @@ After installing conda, create a Python environment for BigDL-LLM: conda create -n llm python=3.9 # recommend to use Python 3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install einops # additional package required for Yi-6B to conduct generation ``` diff --git a/python/llm/example/GPU/PyTorch-Models/More-Data-Types/README.md b/python/llm/example/GPU/PyTorch-Models/More-Data-Types/README.md index d49eb84f..864e2291 100644 --- a/python/llm/example/GPU/PyTorch-Models/More-Data-Types/README.md +++ b/python/llm/example/GPU/PyTorch-Models/More-Data-Types/README.md @@ -12,8 +12,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/PyTorch-Models/Save-Load/README.md b/python/llm/example/GPU/PyTorch-Models/Save-Load/README.md index f9402bf2..504167e2 100644 --- a/python/llm/example/GPU/PyTorch-Models/Save-Load/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Save-Load/README.md @@ -12,8 +12,7 @@ We suggest using conda to manage environment: ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu ``` diff --git a/python/llm/example/GPU/QLoRA-FineTuning/README.md b/python/llm/example/GPU/QLoRA-FineTuning/README.md index 28777b52..86c75137 100644 --- a/python/llm/example/GPU/QLoRA-FineTuning/README.md +++ b/python/llm/example/GPU/QLoRA-FineTuning/README.md @@ -14,8 +14,7 @@ This example is ported from [bnb-4bit-training](https://colab.research.google.co ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install datasets transformers==4.34.0 pip install peft==0.5.0 diff --git a/python/llm/example/GPU/QLoRA-FineTuning/alpaca-qlora/README.md b/python/llm/example/GPU/QLoRA-FineTuning/alpaca-qlora/README.md index 8cb6bd63..8d93167e 100644 --- a/python/llm/example/GPU/QLoRA-FineTuning/alpaca-qlora/README.md +++ b/python/llm/example/GPU/QLoRA-FineTuning/alpaca-qlora/README.md @@ -10,12 +10,11 @@ To run this example with BigDL-LLM on Intel GPUs, we have some recommended requi ```bash conda create -n llm python=3.9 conda activate llm -# below command will install intel_extension_for_pytorch==2.0.110+xpu as default -# you can install specific ipex/torch version for your need +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu pip install datasets transformers==4.34.0 pip install fire peft==0.5.0 -pip install oneccl_bind_pt==2.0.100 -f https://developer.intel.com/ipex-whl-stable-xpu # necessary to run distributed finetuning +pip install oneccl_bind_pt==2.1.100 -f https://developer.intel.com/ipex-whl-stable-xpu # necessary to run distributed finetuning pip install accelerate==0.23.0 pip install bitsandbytes scipy ``` diff --git a/python/llm/example/GPU/README.md b/python/llm/example/GPU/README.md index 7ff7359b..65b27dee 100644 --- a/python/llm/example/GPU/README.md +++ b/python/llm/example/GPU/README.md @@ -19,17 +19,4 @@ This folder contains examples of running BigDL-LLM on Intel GPU: - Ubuntu 20.04 or later (Ubuntu 22.04 is preferred) ## Requirements -To apply Intel GPU acceleration, there’re several steps for tools installation and environment preparation. See the [GPU installation guide](https://bigdl.readthedocs.io/en/latest/doc/LLM/Overview/install_gpu.html) for mode details. - -Step 1, please refer to our [driver installation](https://dgpu-docs.intel.com/driver/installation.html) for general purpose GPU capabilities. -> **Note**: IPEX 2.0.110+xpu requires Intel GPU Driver version is [Stable 647.21](https://dgpu-docs.intel.com/releases/stable_647_21_20230714.html). - -Step 2, you also need to download and install [Intel® oneAPI Base Toolkit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit-download.html). OneMKL and DPC++ compiler are needed, others are optional. -> **Note**: IPEX 2.0.110+xpu requires Intel® oneAPI Base Toolkit's version == 2023.2.0. - -## Best Known Configuration on Linux -For better performance, it is recommended to set environment variables on Linux: -```bash -export USE_XETLA=OFF -export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 -``` +To apply Intel GPU acceleration, there’re several steps for tools installation and environment preparation. See the [GPU installation guide](https://bigdl.readthedocs.io/en/latest/doc/LLM/Overview/install_gpu.html) for mode details. \ No newline at end of file diff --git a/python/llm/example/GPU/vLLM-Serving/README.md b/python/llm/example/GPU/vLLM-Serving/README.md index 44af9e7e..dabe161c 100644 --- a/python/llm/example/GPU/vLLM-Serving/README.md +++ b/python/llm/example/GPU/vLLM-Serving/README.md @@ -38,6 +38,7 @@ pip3 install psutil pip3 install sentencepiece # Required for LLaMA tokenizer. pip3 install numpy pip3 install "transformers>=4.33.1" # Required for Code Llama. +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade "bigdl-llm[xpu]" -f https://developer.intel.com/ipex-whl-stable-xpu pip3 install fastapi pip3 install "uvicorn[standard]"