[LLM] refactor cpu low-bit forward logic (#9366)
* [LLM] refactor cpu low-bit forward logic * fix style * Update low_bit_linear.py * Update low_bit_linear.py * refine
This commit is contained in:
parent
af94058203
commit
fae6db3ddc
1 changed files with 10 additions and 18 deletions
|
|
@ -465,27 +465,19 @@ class LowBitLinear(nn.Linear):
|
|||
if self.training and x.requires_grad:
|
||||
result = MatMulLowBitCPU.apply(x, self.weight)
|
||||
else:
|
||||
# Step 1. convert if necessary, and compute a linear result
|
||||
if IS_SERVER and (not IS_SPR) and \
|
||||
self.qtype == SYM_INT4 and x_2d.shape[0] >= TORCH_LINEAR_THRESHOLD:
|
||||
x0_fp32 = ggml_int4_convert_fp32(x0, self.weight_shape, self.weight_length)
|
||||
if self.mp_group is None:
|
||||
# none-distributed mode
|
||||
result = F.linear(x, x0_fp32, self.bias)
|
||||
else:
|
||||
result = F.linear(x, x0_fp32)
|
||||
from deepspeed import comm as dist
|
||||
# Parallel F.linear should be avoided,
|
||||
# thus deepspeed allreduce after the operation
|
||||
dist.inference_all_reduce(result, group=self.mp_group)
|
||||
if self.bias is not None:
|
||||
result += self.bias
|
||||
else:
|
||||
# Weight does not need a convert
|
||||
result = ggml_matmul_src1_x_src0_t(x0, x_2d, self.weight_shape, self.qtype)
|
||||
new_shape = x_shape[:-1] + (self.out_len,)
|
||||
result = result.view(new_shape)
|
||||
# bias is consistent among multi instances,
|
||||
# deepspeed only allreduce result without bias to reduce comunication
|
||||
# Step 2. allreduce to combine partial results and add bias if necessary
|
||||
if self.mp_group is not None:
|
||||
# deepspeed distibuted mode
|
||||
from deepspeed import comm as dist
|
||||
dist.inference_all_reduce(result, group=self.mp_group)
|
||||
if self.bias is not None:
|
||||
|
|
|
|||
Loading…
Reference in a new issue