LLM: fix deepspeed error of finetuning on xpu (#10484)
This commit is contained in:
parent
021d77fd22
commit
28c315a5b9
2 changed files with 3 additions and 3 deletions
|
|
@ -32,7 +32,7 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
from typing import List
|
from typing import List
|
||||||
|
os.environ["ACCELERATE_USE_XPU"] = "true"
|
||||||
import fire
|
import fire
|
||||||
import torch
|
import torch
|
||||||
import transformers
|
import transformers
|
||||||
|
|
|
||||||
|
|
@ -7,10 +7,10 @@
|
||||||
"contiguous_gradients": true,
|
"contiguous_gradients": true,
|
||||||
"overlap_comm": true
|
"overlap_comm": true
|
||||||
},
|
},
|
||||||
"bp16": {
|
"bf16": {
|
||||||
"enabled": true
|
"enabled": true
|
||||||
},
|
},
|
||||||
"train_micro_batch_size_per_gpu": "auto",
|
"train_micro_batch_size_per_gpu": "auto",
|
||||||
"gradient_accumulation_steps": "auto"
|
"gradient_accumulation_steps": "auto"
|
||||||
}
|
}
|
||||||
|
|
||||||
Loading…
Reference in a new issue