diff --git a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/alpaca_qlora_finetuning.py b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/alpaca_qlora_finetuning.py index 6743971c..41f7dc88 100644 --- a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/alpaca_qlora_finetuning.py +++ b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/alpaca_qlora_finetuning.py @@ -32,7 +32,7 @@ import os from typing import List - +os.environ["ACCELERATE_USE_XPU"] = "true" import fire import torch import transformers diff --git a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/deepspeed_zero2.json b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/deepspeed_zero2.json index ed4ddcdf..10a62b69 100644 --- a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/deepspeed_zero2.json +++ b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/deepspeed_zero2.json @@ -7,10 +7,10 @@ "contiguous_gradients": true, "overlap_comm": true }, - "bp16": { + "bf16": { "enabled": true }, "train_micro_batch_size_per_gpu": "auto", "gradient_accumulation_steps": "auto" - } +} \ No newline at end of file