From 30f668c206eeea29b16d1d7dbd2bd1aa1db53807 Mon Sep 17 00:00:00 2001 From: Jinhe Date: Thu, 31 Oct 2024 15:59:40 +0800 Subject: [PATCH] updated transformers & accelerate requirements (#12301) --- .../example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md index 4f32e50e..621c478e 100644 --- a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md +++ b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md @@ -15,8 +15,8 @@ conda create -n llm python=3.11 conda activate llm # below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ -pip install transformers==4.36.0 datasets -pip install fire peft==0.10.0 +pip install transformers==4.36.1 datasets +pip install fire peft==0.10.0 accelerate==0.23.0 pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning pip install bitsandbytes scipy # configures OneAPI environment variables