LLM:Add qwen moe example libs md (#10828)

This commit is contained in:
Wang, Jian4 2024-04-22 10:03:19 +08:00 committed by GitHub
parent 1edb19c1dd
commit 5f95054f97
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 16 additions and 0 deletions

View file

@ -15,6 +15,10 @@ conda activate llm
pip install --pre --upgrade ipex-llm[all] # install ipex-llm with 'all' option pip install --pre --upgrade ipex-llm[all] # install ipex-llm with 'all' option
pip install transformers==4.37.0 # install the transformers which support Qwen2 pip install transformers==4.37.0 # install the transformers which support Qwen2
# only for Qwen1.5-MoE-A2.7B
pip install transformers==4.40.0
pip install trl==0.8.1
``` ```
### 2. Run ### 2. Run

View file

@ -16,6 +16,10 @@ conda activate llm
pip install --pre --upgrade ipex-llm[all] # install the latest ipex-llm nightly build with 'all' option pip install --pre --upgrade ipex-llm[all] # install the latest ipex-llm nightly build with 'all' option
pip install transformers==4.37.0 # install transformers which supports Qwen2 pip install transformers==4.37.0 # install transformers which supports Qwen2
# only for Qwen1.5-MoE-A2.7B
pip install transformers==4.40.0
pip install trl==0.8.1
``` ```
### 2. Run ### 2. Run

View file

@ -16,6 +16,10 @@ conda activate llm
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install transformers==4.37.0 # install transformers which supports Qwen2 pip install transformers==4.37.0 # install transformers which supports Qwen2
# only for Qwen1.5-MoE-A2.7B
pip install transformers==4.40.0
pip install trl==0.8.1
``` ```
#### 1.2 Installation on Windows #### 1.2 Installation on Windows

View file

@ -30,6 +30,10 @@ pip install dpcpp-cpp-rt==2024.0.2 mkl-dpcpp==2024.0.0 onednn==2024.0.0
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install transformers==4.37.0 # install transformers which supports Qwen2 pip install transformers==4.37.0 # install transformers which supports Qwen2
# only for Qwen1.5-MoE-A2.7B
pip install transformers==4.40.0
pip install trl==0.8.1
``` ```
### 2. Configures OneAPI environment variables for Linux ### 2. Configures OneAPI environment variables for Linux