LLM: fix deepspeed error of finetuning on xpu (#10484)
This commit is contained in:
parent
021d77fd22
commit
28c315a5b9
2 changed files with 3 additions and 3 deletions
|
|
@ -32,7 +32,7 @@
|
|||
|
||||
import os
|
||||
from typing import List
|
||||
|
||||
os.environ["ACCELERATE_USE_XPU"] = "true"
|
||||
import fire
|
||||
import torch
|
||||
import transformers
|
||||
|
|
|
|||
|
|
@ -7,10 +7,10 @@
|
|||
"contiguous_gradients": true,
|
||||
"overlap_comm": true
|
||||
},
|
||||
"bp16": {
|
||||
"bf16": {
|
||||
"enabled": true
|
||||
},
|
||||
"train_micro_batch_size_per_gpu": "auto",
|
||||
"gradient_accumulation_steps": "auto"
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
Reference in a new issue