Upgrade Peft to 0.10.0 in finetune examples and docker (#10930)
* Upgrade Peft to 0.10.0 in finetune examples. * Upgrade Peft to 0.10.0 in docker.
This commit is contained in:
parent
d7ca5d935b
commit
c11170b96f
11 changed files with 22 additions and 22 deletions
|
|
@ -48,8 +48,8 @@ RUN mkdir -p /ipex_llm/data && mkdir -p /ipex_llm/model && \
|
|||
pip install https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_stable/cpu/intel_extension_for_pytorch-2.1.0%2Bcpu-cp311-cp311-linux_x86_64.whl && \
|
||||
pip install oneccl_bind_pt -f https://developer.intel.com/ipex-whl-stable && \
|
||||
# install huggingface dependencies
|
||||
pip install datasets transformers==4.35.0 && \
|
||||
pip install fire peft==0.5.0 && \
|
||||
pip install datasets transformers==4.36.0 && \
|
||||
pip install fire peft==0.10.0 && \
|
||||
pip install accelerate==0.23.0 && \
|
||||
pip install bitsandbytes && \
|
||||
# get qlora example code
|
||||
|
|
|
|||
|
|
@ -61,8 +61,8 @@ RUN mkdir -p /ipex_llm/data && mkdir -p /ipex_llm/model && \
|
|||
pip install intel_extension_for_pytorch==2.0.100 && \
|
||||
pip install oneccl_bind_pt -f https://developer.intel.com/ipex-whl-stable && \
|
||||
# install huggingface dependencies
|
||||
pip install datasets transformers==4.35.0 && \
|
||||
pip install fire peft==0.5.0 && \
|
||||
pip install datasets transformers==4.36.0 && \
|
||||
pip install fire peft==0.10.0 && \
|
||||
pip install accelerate==0.23.0 && \
|
||||
# install basic dependencies
|
||||
apt-get update && apt-get install -y curl wget gpg gpg-agent && \
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ ARG http_proxy
|
|||
ARG https_proxy
|
||||
ENV TZ=Asia/Shanghai
|
||||
ARG PIP_NO_CACHE_DIR=false
|
||||
ENV TRANSFORMERS_COMMIT_ID=95fe0f5
|
||||
ENV TRANSFORMERS_COMMIT_ID=1466677
|
||||
|
||||
# retrive oneapi repo public key
|
||||
RUN curl -fsSL https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB | gpg --dearmor | tee /usr/share/keyrings/intel-oneapi-archive-keyring.gpg && \
|
||||
|
|
@ -33,7 +33,7 @@ RUN curl -fsSL https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-P
|
|||
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ && \
|
||||
# install huggingface dependencies
|
||||
pip install git+https://github.com/huggingface/transformers.git@${TRANSFORMERS_COMMIT_ID} && \
|
||||
pip install peft==0.5.0 datasets accelerate==0.23.0 && \
|
||||
pip install peft==0.6.0 datasets accelerate==0.23.0 && \
|
||||
pip install bitsandbytes scipy && \
|
||||
git clone https://github.com/intel-analytics/IPEX-LLM.git && \
|
||||
mv IPEX-LLM/python/llm/example/GPU/LLM-Finetuning/common /common && \
|
||||
|
|
|
|||
|
|
@ -19,8 +19,8 @@ This example is ported from [bnb-4bit-training](https://colab.research.google.co
|
|||
conda create -n llm python=3.11
|
||||
conda activate llm
|
||||
pip install --pre --upgrade ipex-llm[all]
|
||||
pip install transformers==4.34.0
|
||||
pip install peft==0.5.0
|
||||
pip install transformers==4.36.0
|
||||
pip install peft==0.10.0
|
||||
pip install datasets
|
||||
pip install accelerate==0.23.0
|
||||
pip install bitsandbytes scipy
|
||||
|
|
|
|||
|
|
@ -8,8 +8,8 @@ This example ports [Alpaca-LoRA](https://github.com/tloen/alpaca-lora/tree/main)
|
|||
conda create -n llm python=3.11
|
||||
conda activate llm
|
||||
pip install --pre --upgrade ipex-llm[all]
|
||||
pip install datasets transformers==4.35.0
|
||||
pip install fire peft==0.5.0
|
||||
pip install datasets transformers==4.36.0
|
||||
pip install fire peft==0.10.0
|
||||
pip install accelerate==0.23.0
|
||||
pip install bitsandbytes scipy
|
||||
```
|
||||
|
|
|
|||
|
|
@ -17,8 +17,8 @@ conda create -n llm python=3.11
|
|||
conda activate llm
|
||||
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
|
||||
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
pip install transformers==4.34.0 datasets
|
||||
pip install trl peft==0.5.0
|
||||
pip install transformers==4.36.0 datasets
|
||||
pip install trl peft==0.10.0
|
||||
pip install accelerate==0.23.0
|
||||
pip install bitsandbytes
|
||||
```
|
||||
|
|
|
|||
|
|
@ -12,8 +12,8 @@ conda create -n llm python=3.11
|
|||
conda activate llm
|
||||
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
|
||||
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
pip install transformers==4.34.0 datasets
|
||||
pip install fire peft==0.5.0
|
||||
pip install transformers==4.36.0 datasets
|
||||
pip install fire peft==0.10.0
|
||||
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
|
||||
pip install accelerate==0.23.0
|
||||
pip install bitsandbytes scipy
|
||||
|
|
|
|||
|
|
@ -12,8 +12,8 @@ conda create -n llm python=3.11
|
|||
conda activate llm
|
||||
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
|
||||
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
pip install transformers==4.34.0 datasets
|
||||
pip install fire peft==0.5.0
|
||||
pip install transformers==4.36.0 datasets
|
||||
pip install fire peft==0.10.0
|
||||
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
|
||||
pip install accelerate==0.23.0
|
||||
pip install bitsandbytes scipy
|
||||
|
|
|
|||
|
|
@ -17,8 +17,8 @@ conda create -n llm python=3.11
|
|||
conda activate llm
|
||||
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
|
||||
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
pip install transformers==4.34.0 datasets
|
||||
pip install peft==0.5.0
|
||||
pip install transformers==4.36.0 datasets
|
||||
pip install peft==0.10.0
|
||||
pip install accelerate==0.23.0
|
||||
pip install bitsandbytes scipy
|
||||
```
|
||||
|
|
|
|||
|
|
@ -17,8 +17,8 @@ conda create -n llm python=3.11
|
|||
conda activate llm
|
||||
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
|
||||
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
pip install transformers==4.34.0 datasets
|
||||
pip install peft==0.5.0
|
||||
pip install transformers==4.36.0 datasets
|
||||
pip install peft==0.10.0
|
||||
pip install accelerate==0.23.0
|
||||
pip install bitsandbytes scipy trl
|
||||
```
|
||||
|
|
|
|||
|
|
@ -12,8 +12,8 @@ conda create -n llm python=3.11
|
|||
conda activate llm
|
||||
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
|
||||
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
pip install transformers==4.34.0 datasets
|
||||
pip install fire peft==0.5.0
|
||||
pip install transformers==4.36.0 datasets
|
||||
pip install fire peft==0.10.0
|
||||
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
|
||||
pip install accelerate==0.23.0
|
||||
pip install bitsandbytes scipy
|
||||
|
|
|
|||
Loading…
Reference in a new issue