Fix trl version and padding in trl qlora example (#12368)
* Change trl to 0.9.6 * Enable padding to avoid padding related errors.
This commit is contained in:
parent
fad15c8ca0
commit
2dfcc36825
2 changed files with 4 additions and 1 deletions
|
|
@ -19,7 +19,7 @@ conda activate llm
|
||||||
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||||
pip install transformers==4.36.0 datasets
|
pip install transformers==4.36.0 datasets
|
||||||
pip install peft==0.10.0
|
pip install peft==0.10.0
|
||||||
pip install bitsandbytes scipy "trl<0.12.0"
|
pip install bitsandbytes scipy trl==0.9.6
|
||||||
```
|
```
|
||||||
|
|
||||||
### 2. Configures OneAPI environment variables
|
### 2. Configures OneAPI environment variables
|
||||||
|
|
|
||||||
|
|
@ -105,6 +105,9 @@ if __name__ == "__main__":
|
||||||
gradient_checkpointing=True, # can further reduce memory but slower
|
gradient_checkpointing=True, # can further reduce memory but slower
|
||||||
),
|
),
|
||||||
dataset_text_field="instruction",
|
dataset_text_field="instruction",
|
||||||
|
data_collator=transformers.DataCollatorForSeq2Seq(
|
||||||
|
tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True
|
||||||
|
),
|
||||||
)
|
)
|
||||||
model.config.use_cache = False # silence the warnings. Please re-enable for inference!
|
model.config.use_cache = False # silence the warnings. Please re-enable for inference!
|
||||||
result = trainer.train()
|
result = trainer.train()
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue