From 28c315a5b918c59a567dd36663831eb01c90e746 Mon Sep 17 00:00:00 2001 From: Ruonan Wang Date: Thu, 21 Mar 2024 09:46:25 +0800 Subject: [PATCH] LLM: fix deepspeed error of finetuning on xpu (#10484) --- .../QLoRA/alpaca-qlora/alpaca_qlora_finetuning.py | 2 +- .../LLM-Finetuning/QLoRA/alpaca-qlora/deepspeed_zero2.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/alpaca_qlora_finetuning.py b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/alpaca_qlora_finetuning.py index 6743971c..41f7dc88 100644 --- a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/alpaca_qlora_finetuning.py +++ b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/alpaca_qlora_finetuning.py @@ -32,7 +32,7 @@ import os from typing import List - +os.environ["ACCELERATE_USE_XPU"] = "true" import fire import torch import transformers diff --git a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/deepspeed_zero2.json b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/deepspeed_zero2.json index ed4ddcdf..10a62b69 100644 --- a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/deepspeed_zero2.json +++ b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/deepspeed_zero2.json @@ -7,10 +7,10 @@ "contiguous_gradients": true, "overlap_comm": true }, - "bp16": { + "bf16": { "enabled": true }, "train_micro_batch_size_per_gpu": "auto", "gradient_accumulation_steps": "auto" - } +} \ No newline at end of file