[LLM] Small fixes for finetune related examples and UTs (#9870)
This commit is contained in:
parent
b2aa267f50
commit
023679459e
3 changed files with 4 additions and 3 deletions
3
.github/workflows/llm_unit_tests.yml
vendored
3
.github/workflows/llm_unit_tests.yml
vendored
|
|
@ -324,7 +324,8 @@ jobs:
|
||||||
- name: Run LLM example tests
|
- name: Run LLM example tests
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
python -m pip install transformers==4.34.0 peft==0.5.0 accelerate==0.23.0
|
python -m pip uninstall datasets -y
|
||||||
|
python -m pip install transformers==4.34.0 datasets peft==0.5.0 accelerate==0.23.0
|
||||||
python -m pip install bitsandbytes scipy
|
python -m pip install bitsandbytes scipy
|
||||||
# Specific oneapi position on arc ut test machines
|
# Specific oneapi position on arc ut test machines
|
||||||
if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then
|
if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ conda create -n llm python=3.9
|
||||||
conda activate llm
|
conda activate llm
|
||||||
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
|
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
|
||||||
pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
|
pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
|
||||||
pip install datasets transformers==4.34.0
|
pip install transformers==4.34.0 datasets
|
||||||
pip install peft==0.5.0
|
pip install peft==0.5.0
|
||||||
pip install accelerate==0.23.0
|
pip install accelerate==0.23.0
|
||||||
pip install bitsandbytes scipy
|
pip install bitsandbytes scipy
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@ conda create -n llm python=3.9
|
||||||
conda activate llm
|
conda activate llm
|
||||||
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
|
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
|
||||||
pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
|
pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu
|
||||||
pip install datasets transformers==4.34.0
|
pip install transformers==4.34.0 datasets
|
||||||
pip install fire peft==0.5.0
|
pip install fire peft==0.5.0
|
||||||
pip install oneccl_bind_pt==2.1.100 -f https://developer.intel.com/ipex-whl-stable-xpu # necessary to run distributed finetuning
|
pip install oneccl_bind_pt==2.1.100 -f https://developer.intel.com/ipex-whl-stable-xpu # necessary to run distributed finetuning
|
||||||
pip install accelerate==0.23.0
|
pip install accelerate==0.23.0
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue