Remove accelerate 0.23.0 install command in readme and docker (#11333)
*ipex-llm's accelerate has been upgraded to 0.23.0. Remove accelerate 0.23.0 install command in README and docker。
This commit is contained in:
		
							parent
							
								
									ef4b6519fb
								
							
						
					
					
						commit
						de4bb97b4f
					
				
					 17 changed files with 2 additions and 17 deletions
				
			
		
							
								
								
									
										2
									
								
								.github/workflows/llm_unit_tests.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/llm_unit_tests.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
					@ -381,7 +381,7 @@ jobs:
 | 
				
			||||||
        shell: bash
 | 
					        shell: bash
 | 
				
			||||||
        run: |
 | 
					        run: |
 | 
				
			||||||
          python -m pip uninstall datasets -y
 | 
					          python -m pip uninstall datasets -y
 | 
				
			||||||
          python -m pip install transformers==4.36.0 datasets peft==0.10.0 accelerate==0.23.0
 | 
					          python -m pip install transformers==4.36.0 datasets peft==0.10.0
 | 
				
			||||||
          python -m pip install bitsandbytes scipy
 | 
					          python -m pip install bitsandbytes scipy
 | 
				
			||||||
          # Specific oneapi position on arc ut test machines
 | 
					          # Specific oneapi position on arc ut test machines
 | 
				
			||||||
          if [[ "$RUNNER_OS" == "Linux" ]]; then
 | 
					          if [[ "$RUNNER_OS" == "Linux" ]]; then
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -50,7 +50,6 @@ RUN mkdir -p /ipex_llm/data && mkdir -p /ipex_llm/model && \
 | 
				
			||||||
    # install huggingface dependencies
 | 
					    # install huggingface dependencies
 | 
				
			||||||
    pip install datasets transformers==4.36.0 && \
 | 
					    pip install datasets transformers==4.36.0 && \
 | 
				
			||||||
    pip install fire peft==0.10.0 && \
 | 
					    pip install fire peft==0.10.0 && \
 | 
				
			||||||
    pip install accelerate==0.23.0 && \
 | 
					 | 
				
			||||||
    pip install bitsandbytes && \
 | 
					    pip install bitsandbytes && \
 | 
				
			||||||
    # get qlora example code
 | 
					    # get qlora example code
 | 
				
			||||||
    cd /ipex_llm && \
 | 
					    cd /ipex_llm && \
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -63,7 +63,6 @@ RUN mkdir -p /ipex_llm/data && mkdir -p /ipex_llm/model && \
 | 
				
			||||||
    # install huggingface dependencies
 | 
					    # install huggingface dependencies
 | 
				
			||||||
    pip install datasets transformers==4.36.0 && \
 | 
					    pip install datasets transformers==4.36.0 && \
 | 
				
			||||||
    pip install fire peft==0.10.0 && \
 | 
					    pip install fire peft==0.10.0 && \
 | 
				
			||||||
    pip install accelerate==0.23.0 && \
 | 
					 | 
				
			||||||
    # install basic dependencies
 | 
					    # install basic dependencies
 | 
				
			||||||
    apt-get update && apt-get install -y curl wget gpg gpg-agent && \
 | 
					    apt-get update && apt-get install -y curl wget gpg gpg-agent && \
 | 
				
			||||||
    # Install Intel oneAPI keys.
 | 
					    # Install Intel oneAPI keys.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -41,7 +41,7 @@ RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRO
 | 
				
			||||||
    rm -rf IPEX-LLM && \
 | 
					    rm -rf IPEX-LLM && \
 | 
				
			||||||
    # install transformers & peft dependencies
 | 
					    # install transformers & peft dependencies
 | 
				
			||||||
    pip install transformers==4.36.0 && \
 | 
					    pip install transformers==4.36.0 && \
 | 
				
			||||||
    pip install peft==0.10.0 datasets accelerate==0.23.0 && \
 | 
					    pip install peft==0.10.0 datasets && \
 | 
				
			||||||
    pip install bitsandbytes scipy fire && \
 | 
					    pip install bitsandbytes scipy fire && \
 | 
				
			||||||
    # Prepare accelerate config
 | 
					    # Prepare accelerate config
 | 
				
			||||||
    mkdir -p /root/.cache/huggingface/accelerate && \
 | 
					    mkdir -p /root/.cache/huggingface/accelerate && \
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -216,7 +216,6 @@ pip install -e .
 | 
				
			||||||
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
 | 
					# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
 | 
				
			||||||
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
 | 
					pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
 | 
				
			||||||
# install transformers etc
 | 
					# install transformers etc
 | 
				
			||||||
pip install accelerate==0.23.0
 | 
					 | 
				
			||||||
# to avoid https://github.com/OpenAccess-AI-Collective/axolotl/issues/1544
 | 
					# to avoid https://github.com/OpenAccess-AI-Collective/axolotl/issues/1544
 | 
				
			||||||
pip install datasets==2.15.0
 | 
					pip install datasets==2.15.0
 | 
				
			||||||
pip install transformers==4.37.0
 | 
					pip install transformers==4.37.0
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -22,7 +22,6 @@ pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pyt
 | 
				
			||||||
pip install transformers==4.36.0
 | 
					pip install transformers==4.36.0
 | 
				
			||||||
pip install peft==0.10.0
 | 
					pip install peft==0.10.0
 | 
				
			||||||
pip install datasets
 | 
					pip install datasets
 | 
				
			||||||
pip install accelerate==0.23.0
 | 
					 | 
				
			||||||
pip install bitsandbytes scipy
 | 
					pip install bitsandbytes scipy
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -10,7 +10,6 @@ conda activate llm
 | 
				
			||||||
pip install --pre --upgrade ipex-llm[all]
 | 
					pip install --pre --upgrade ipex-llm[all]
 | 
				
			||||||
pip install datasets transformers==4.36.0
 | 
					pip install datasets transformers==4.36.0
 | 
				
			||||||
pip install fire peft==0.10.0
 | 
					pip install fire peft==0.10.0
 | 
				
			||||||
pip install accelerate==0.23.0
 | 
					 | 
				
			||||||
pip install bitsandbytes scipy
 | 
					pip install bitsandbytes scipy
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -19,7 +19,6 @@ conda activate llm
 | 
				
			||||||
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
 | 
					pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
 | 
				
			||||||
pip install transformers==4.36.0 datasets
 | 
					pip install transformers==4.36.0 datasets
 | 
				
			||||||
pip install trl peft==0.10.0
 | 
					pip install trl peft==0.10.0
 | 
				
			||||||
pip install accelerate==0.23.0
 | 
					 | 
				
			||||||
pip install bitsandbytes
 | 
					pip install bitsandbytes
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -17,7 +17,6 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte
 | 
				
			||||||
pip install transformers==4.36.0 datasets
 | 
					pip install transformers==4.36.0 datasets
 | 
				
			||||||
pip install fire peft==0.10.0
 | 
					pip install fire peft==0.10.0
 | 
				
			||||||
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
 | 
					pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
 | 
				
			||||||
pip install accelerate==0.23.0
 | 
					 | 
				
			||||||
pip install bitsandbytes scipy
 | 
					pip install bitsandbytes scipy
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -13,7 +13,6 @@ conda create -n llm python=3.11
 | 
				
			||||||
conda activate llm
 | 
					conda activate llm
 | 
				
			||||||
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
 | 
					# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
 | 
				
			||||||
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
 | 
					pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
 | 
				
			||||||
pip install accelerate==0.23.0
 | 
					 | 
				
			||||||
pip install bitsandbytes==0.43.0
 | 
					pip install bitsandbytes==0.43.0
 | 
				
			||||||
pip install datasets==2.18.0
 | 
					pip install datasets==2.18.0
 | 
				
			||||||
pip install --upgrade transformers==4.36.0
 | 
					pip install --upgrade transformers==4.36.0
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -15,7 +15,6 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte
 | 
				
			||||||
pip install transformers==4.36.0 datasets
 | 
					pip install transformers==4.36.0 datasets
 | 
				
			||||||
pip install fire peft==0.10.0
 | 
					pip install fire peft==0.10.0
 | 
				
			||||||
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
 | 
					pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
 | 
				
			||||||
pip install accelerate==0.23.0
 | 
					 | 
				
			||||||
pip install bitsandbytes scipy
 | 
					pip install bitsandbytes scipy
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -15,7 +15,6 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte
 | 
				
			||||||
pip install transformers==4.36.0 datasets
 | 
					pip install transformers==4.36.0 datasets
 | 
				
			||||||
pip install fire peft==0.10.0
 | 
					pip install fire peft==0.10.0
 | 
				
			||||||
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
 | 
					pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
 | 
				
			||||||
pip install accelerate==0.23.0
 | 
					 | 
				
			||||||
pip install bitsandbytes scipy
 | 
					pip install bitsandbytes scipy
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -18,7 +18,6 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte
 | 
				
			||||||
pip install transformers==4.36.0 datasets
 | 
					pip install transformers==4.36.0 datasets
 | 
				
			||||||
pip install fire peft==0.10.0
 | 
					pip install fire peft==0.10.0
 | 
				
			||||||
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
 | 
					pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
 | 
				
			||||||
pip install accelerate==0.23.0
 | 
					 | 
				
			||||||
pip install bitsandbytes scipy
 | 
					pip install bitsandbytes scipy
 | 
				
			||||||
# configures OneAPI environment variables
 | 
					# configures OneAPI environment variables
 | 
				
			||||||
source /opt/intel/oneapi/setvars.sh # necessary to run before installing deepspeed
 | 
					source /opt/intel/oneapi/setvars.sh # necessary to run before installing deepspeed
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -19,7 +19,6 @@ conda activate llm
 | 
				
			||||||
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
 | 
					pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
 | 
				
			||||||
pip install transformers==4.36.0 datasets
 | 
					pip install transformers==4.36.0 datasets
 | 
				
			||||||
pip install peft==0.10.0
 | 
					pip install peft==0.10.0
 | 
				
			||||||
pip install accelerate==0.23.0
 | 
					 | 
				
			||||||
pip install bitsandbytes scipy
 | 
					pip install bitsandbytes scipy
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -19,7 +19,6 @@ conda activate llm
 | 
				
			||||||
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
 | 
					pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
 | 
				
			||||||
pip install transformers==4.36.0 datasets
 | 
					pip install transformers==4.36.0 datasets
 | 
				
			||||||
pip install peft==0.10.0
 | 
					pip install peft==0.10.0
 | 
				
			||||||
pip install accelerate==0.23.0
 | 
					 | 
				
			||||||
pip install bitsandbytes scipy trl
 | 
					pip install bitsandbytes scipy trl
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -15,7 +15,6 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte
 | 
				
			||||||
pip install transformers==4.36.0 datasets
 | 
					pip install transformers==4.36.0 datasets
 | 
				
			||||||
pip install fire peft==0.10.0
 | 
					pip install fire peft==0.10.0
 | 
				
			||||||
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
 | 
					pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
 | 
				
			||||||
pip install accelerate==0.23.0
 | 
					 | 
				
			||||||
pip install bitsandbytes scipy
 | 
					pip install bitsandbytes scipy
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -132,7 +132,6 @@ pip install -e .
 | 
				
			||||||
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
 | 
					# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
 | 
				
			||||||
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
 | 
					pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
 | 
				
			||||||
# install transformers etc
 | 
					# install transformers etc
 | 
				
			||||||
pip install accelerate==0.23.0
 | 
					 | 
				
			||||||
# to avoid https://github.com/OpenAccess-AI-Collective/axolotl/issues/1544
 | 
					# to avoid https://github.com/OpenAccess-AI-Collective/axolotl/issues/1544
 | 
				
			||||||
pip install datasets==2.15.0
 | 
					pip install datasets==2.15.0
 | 
				
			||||||
pip install transformers==4.37.0
 | 
					pip install transformers==4.37.0
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue