fix lisa finetune example (#12775)
This commit is contained in:
parent
2e5f2e5dda
commit
c0d6b282b8
2 changed files with 14 additions and 15 deletions
|
|
@ -13,10 +13,8 @@ conda create -n llm python=3.11
|
|||
conda activate llm
|
||||
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
|
||||
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
pip install bitsandbytes==0.43.0
|
||||
pip install datasets==2.18.0
|
||||
pip install --upgrade transformers==4.36.0
|
||||
pip install scipy fire
|
||||
pip install transformers==4.45.0 "trl<0.12.0" datasets
|
||||
pip install bitsandbytes==0.45.1 scipy fire
|
||||
```
|
||||
|
||||
### 2. LISA Finetune
|
||||
|
|
|
|||
|
|
@ -90,9 +90,10 @@ def train(
|
|||
model = AutoModelForCausalLM.from_pretrained(
|
||||
base_model,
|
||||
load_in_low_bit="bf16",
|
||||
optimize_model=True,
|
||||
optimize_model=False,
|
||||
torch_dtype=torch.bfloat16,
|
||||
trust_remote_code=True
|
||||
trust_remote_code=True,
|
||||
modules_to_not_convert=["lm_head"], # avoid optimize lm_head
|
||||
)
|
||||
|
||||
model = model.to("xpu")
|
||||
|
|
|
|||
Loading…
Reference in a new issue