From 598c0920b70a138ef808b0ca3637131ad7579588 Mon Sep 17 00:00:00 2001 From: YiSheng5 Date: Sat, 21 Sep 2024 06:42:59 +0800 Subject: [PATCH] grad_wei can't be NoneType when running with DeepSpeed, for zero3 will divided the gradient (#428) --- megatron/core/tensor_parallel/layers.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/megatron/core/tensor_parallel/layers.py b/megatron/core/tensor_parallel/layers.py index 67a78853aa..3dd3299ae0 100644 --- a/megatron/core/tensor_parallel/layers.py +++ b/megatron/core/tensor_parallel/layers.py @@ -286,6 +286,7 @@ def forward(ctx, input, weight, bias, gradient_accumulation_fusion, @staticmethod @custom_bwd def backward(ctx, grad_output): + args = get_args() input, weight = ctx.saved_tensors use_bias = ctx.use_bias @@ -367,9 +368,13 @@ def backward(ctx, grad_output): # grad_weight = None # else: # grad_weight = grad_output.t().matmul(total_input) - from megatron.core.tensor_parallel.weight_grad_store import WeightGradStore - WeightGradStore.put(total_input, grad_output, weight, gradientUpdateFunction) - grad_weight = None + if args.enable_zbh1_pipeline: + from megatron.core.tensor_parallel.weight_grad_store import WeightGradStore + WeightGradStore.put(total_input, grad_output, weight, gradientUpdateFunction) + grad_weight = None + else: + grad_weight = grad_output.t().matmul(total_input) + grad_bias = grad_output.sum(dim=0) if use_bias else None if ctx.sequence_parallel: