LLM: update qlora alpaca example to change lora usage (#9835)

* update example

* fix style
This commit is contained in:
Ruonan Wang 2024-01-04 15:22:20 +08:00 committed by GitHub
parent 05b681fa85
commit 8504a2bbca

View file

@ -196,10 +196,18 @@ def train(
else:
# According to the QLoRA paper, using "nf4" could yield better model quality than "int4"
# Default 4-bit format for qa-lora is sym_int4
if training_mode == "lora":
model = AutoModelForCausalLM.from_pretrained(
base_model,
load_in_low_bit="bf16",
optimize_model=False,
torch_dtype=torch.bfloat16,
modules_to_not_convert=["lm_head"],
)
else:
# use bnb_config for qlora/qalora/relora, which use 4bit for base model
if training_mode == "qalora":
low_bit_format = "int4"
elif training_mode == "lora":
low_bit_format = "bf16"
else:
low_bit_format = "nf4"
bnb_config = BitsAndBytesConfig(