LLM: optimize QLoRA by reducing convert time (#9370)

This commit is contained in:
Ruonan Wang 2023-11-08 13:14:34 +08:00 committed by GitHub
parent 298b64217e
commit 7e8fb29b7c

View file

@ -448,6 +448,8 @@ class LowBitLinear(nn.Linear):
input_seq_size)
result = result.to(x.dtype)
else:
if torch.xpu.is_autocast_xpu_enabled():
x_2d = x_2d.to(torch.xpu.get_autocast_xpu_dtype())
result = linear_q4_0.forward_new(x_2d, self.weight.data, self.weight.qtype,
input_seq_size)
new_shape = x_shape[:-1] + (self.out_len,)