diff --git a/python/llm/example/GPU/LLM-Finetuning/LISA/README.md b/python/llm/example/GPU/LLM-Finetuning/LISA/README.md index c02fdd5f..c9c83445 100644 --- a/python/llm/example/GPU/LLM-Finetuning/LISA/README.md +++ b/python/llm/example/GPU/LLM-Finetuning/LISA/README.md @@ -13,10 +13,8 @@ conda create -n llm python=3.11 conda activate llm # below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ -pip install bitsandbytes==0.43.0 -pip install datasets==2.18.0 -pip install --upgrade transformers==4.36.0 -pip install scipy fire +pip install transformers==4.45.0 "trl<0.12.0" datasets +pip install bitsandbytes==0.45.1 scipy fire ``` ### 2. LISA Finetune @@ -51,23 +49,23 @@ Optional parameters for `lisa_finetuning.py`: ```log ...... -{'loss': 1.8391, 'learning_rate': 1.9967238104745695e-05, 'epoch': 0.03} -{'loss': 1.8242, 'learning_rate': 1.9869167087338908e-05, 'epoch': 0.05} +{'loss': 1.8391, 'learning_rate': 1.9967238104745695e-05, 'epoch': 0.03} +{'loss': 1.8242, 'learning_rate': 1.9869167087338908e-05, 'epoch': 0.05} 5%|██████▉ | 20/388 [xx:xx