From fae6db3ddc76fc3d835d81569264988795938484 Mon Sep 17 00:00:00 2001 From: Heyang Sun <60865256+Uxito-Ada@users.noreply.github.com> Date: Tue, 7 Nov 2023 15:09:16 +0800 Subject: [PATCH] [LLM] refactor cpu low-bit forward logic (#9366) * [LLM] refactor cpu low-bit forward logic * fix style * Update low_bit_linear.py * Update low_bit_linear.py * refine --- .../bigdl/llm/transformers/low_bit_linear.py | 28 +++++++------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/python/llm/src/bigdl/llm/transformers/low_bit_linear.py b/python/llm/src/bigdl/llm/transformers/low_bit_linear.py index cee22675..ba527a15 100644 --- a/python/llm/src/bigdl/llm/transformers/low_bit_linear.py +++ b/python/llm/src/bigdl/llm/transformers/low_bit_linear.py @@ -465,31 +465,23 @@ class LowBitLinear(nn.Linear): if self.training and x.requires_grad: result = MatMulLowBitCPU.apply(x, self.weight) else: + # Step 1. convert if necessary, and compute a linear result if IS_SERVER and (not IS_SPR) and \ self.qtype == SYM_INT4 and x_2d.shape[0] >= TORCH_LINEAR_THRESHOLD: x0_fp32 = ggml_int4_convert_fp32(x0, self.weight_shape, self.weight_length) - if self.mp_group is None: - # none-distributed mode - result = F.linear(x, x0_fp32, self.bias) - else: - result = F.linear(x, x0_fp32) - from deepspeed import comm as dist - # Parallel F.linear should be avoided, - # thus deepspeed allreduce after the operation - dist.inference_all_reduce(result, group=self.mp_group) - if self.bias is not None: - result += self.bias + result = F.linear(x, x0_fp32) else: + # Weight does not need a convert result = ggml_matmul_src1_x_src0_t(x0, x_2d, self.weight_shape, self.qtype) new_shape = x_shape[:-1] + (self.out_len,) result = result.view(new_shape) - # bias is consistent among multi instances, - # deepspeed only allreduce result without bias to reduce comunication - if self.mp_group is not None: - from deepspeed import comm as dist - dist.inference_all_reduce(result, group=self.mp_group) - if self.bias is not None: - result += self.bias + # Step 2. allreduce to combine partial results and add bias if necessary + if self.mp_group is not None: + # deepspeed distibuted mode + from deepspeed import comm as dist + dist.inference_all_reduce(result, group=self.mp_group) + if self.bias is not None: + result += self.bias return result