From 023679459eb099107a7eb9e403b5fd554d31ec43 Mon Sep 17 00:00:00 2001 From: Yuwen Hu <54161268+Oscilloscope98@users.noreply.github.com> Date: Tue, 9 Jan 2024 18:05:03 +0800 Subject: [PATCH] [LLM] Small fixes for finetune related examples and UTs (#9870) --- .github/workflows/llm_unit_tests.yml | 3 ++- python/llm/example/GPU/QLoRA-FineTuning/README.md | 2 +- python/llm/example/GPU/QLoRA-FineTuning/alpaca-qlora/README.md | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/llm_unit_tests.yml b/.github/workflows/llm_unit_tests.yml index 65ff2856..9c1bf86d 100644 --- a/.github/workflows/llm_unit_tests.yml +++ b/.github/workflows/llm_unit_tests.yml @@ -324,7 +324,8 @@ jobs: - name: Run LLM example tests shell: bash run: | - python -m pip install transformers==4.34.0 peft==0.5.0 accelerate==0.23.0 + python -m pip uninstall datasets -y + python -m pip install transformers==4.34.0 datasets peft==0.5.0 accelerate==0.23.0 python -m pip install bitsandbytes scipy # Specific oneapi position on arc ut test machines if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then diff --git a/python/llm/example/GPU/QLoRA-FineTuning/README.md b/python/llm/example/GPU/QLoRA-FineTuning/README.md index 86c75137..efa0a2cc 100644 --- a/python/llm/example/GPU/QLoRA-FineTuning/README.md +++ b/python/llm/example/GPU/QLoRA-FineTuning/README.md @@ -16,7 +16,7 @@ conda create -n llm python=3.9 conda activate llm # below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu -pip install datasets transformers==4.34.0 +pip install transformers==4.34.0 datasets pip install peft==0.5.0 pip install accelerate==0.23.0 pip install bitsandbytes scipy diff --git a/python/llm/example/GPU/QLoRA-FineTuning/alpaca-qlora/README.md b/python/llm/example/GPU/QLoRA-FineTuning/alpaca-qlora/README.md index 8d93167e..2c62cc94 100644 --- a/python/llm/example/GPU/QLoRA-FineTuning/alpaca-qlora/README.md +++ b/python/llm/example/GPU/QLoRA-FineTuning/alpaca-qlora/README.md @@ -12,7 +12,7 @@ conda create -n llm python=3.9 conda activate llm # below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu -pip install datasets transformers==4.34.0 +pip install transformers==4.34.0 datasets pip install fire peft==0.5.0 pip install oneccl_bind_pt==2.1.100 -f https://developer.intel.com/ipex-whl-stable-xpu # necessary to run distributed finetuning pip install accelerate==0.23.0