[LLM] apply allreduce and bias to training in LowBitLinear (#9395)
This commit is contained in:
parent
40cead6b5b
commit
df8e4d7889
1 changed files with 8 additions and 8 deletions
|
|
@ -463,7 +463,7 @@ class LowBitLinear(nn.Linear):
|
||||||
if self.training and x.requires_grad:
|
if self.training and x.requires_grad:
|
||||||
result = MatMulLowBitCPU.apply(x, self.weight)
|
result = MatMulLowBitCPU.apply(x, self.weight)
|
||||||
else:
|
else:
|
||||||
# Step 1. convert if necessary, and compute a linear result
|
# convert if necessary, and compute a linear result
|
||||||
if IS_SERVER and (not IS_SPR) and \
|
if IS_SERVER and (not IS_SPR) and \
|
||||||
self.qtype == SYM_INT4 and x_2d.shape[0] >= TORCH_LINEAR_THRESHOLD:
|
self.qtype == SYM_INT4 and x_2d.shape[0] >= TORCH_LINEAR_THRESHOLD:
|
||||||
x0_fp32 = ggml_int4_convert_fp32(x0, self.weight_shape, self.weight_length)
|
x0_fp32 = ggml_int4_convert_fp32(x0, self.weight_shape, self.weight_length)
|
||||||
|
|
@ -473,7 +473,7 @@ class LowBitLinear(nn.Linear):
|
||||||
result = ggml_matmul_src1_x_src0_t(x0, x_2d, self.weight_shape, self.qtype)
|
result = ggml_matmul_src1_x_src0_t(x0, x_2d, self.weight_shape, self.qtype)
|
||||||
new_shape = x_shape[:-1] + (self.out_len,)
|
new_shape = x_shape[:-1] + (self.out_len,)
|
||||||
result = result.view(new_shape)
|
result = result.view(new_shape)
|
||||||
# Step 2. allreduce to combine partial results and add bias if necessary
|
# allreduce to combine partial results and add bias if necessary
|
||||||
if self.mp_group is not None:
|
if self.mp_group is not None:
|
||||||
# deepspeed distibuted mode
|
# deepspeed distibuted mode
|
||||||
from deepspeed import comm as dist
|
from deepspeed import comm as dist
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue