Skip to content

Commit

Permalink
Support BF16 training for sharding (#46846) (#47246)
Browse files Browse the repository at this point in the history
* Fix bug of reduce_sum op. When input.numel() > INT32_MAX, its result
is wrong.

* support pure bfloat16

* support bf16 linear

* update PR to pass CI

* tiny fix where_grad_kernel.cu

* Support bfloat16 type for reducer and sharding.

* Fix some bug.

* Polish code.

* Polise code.

* Add bfloat16 datatype in fill_grad kernels.

Co-authored-by: sneaxiy <[email protected]>

Co-authored-by: sneaxiy <[email protected]>
  • Loading branch information
GhostScreaming and sneaxiy authored Oct 24, 2022
1 parent 82f1e1b commit 5c85f1a
Show file tree
Hide file tree
Showing 10 changed files with 23 additions and 0 deletions.
8 changes: 8 additions & 0 deletions paddle/fluid/distributed/collective/reducer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,10 @@ static void ConcatTensorsWithType(
ConcatTensorsForAllReduce<DeviceContext, double>()(
context, dense_tensors_, p_dense_contents);
break;
case phi::DataType::BFLOAT16:
ConcatTensorsForAllReduce<DeviceContext, platform::bfloat16>()(
context, dense_tensors_, p_dense_contents);
break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Data type (%s) is not supported when it concats tensors for "
Expand Down Expand Up @@ -281,6 +285,10 @@ static void SplitTensorsWithType(const DeviceContext &context,
SplitTensorsForAllReduce<DeviceContext, double>()(
context, p_dense_contents, p_dense_tensors);
break;
case phi::DataType::BFLOAT16:
SplitTensorsForAllReduce<DeviceContext, platform::bfloat16>()(
context, p_dense_contents, p_dense_tensors);
break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Data type (%s) is not supported when it splits tensors for "
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/kernels/cpu/fill_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,5 @@ PD_REGISTER_KERNEL(fill_grad,
int64_t,
int,
paddle::platform::float16,
paddle::platform::bfloat16,
bool) {}
1 change: 1 addition & 0 deletions paddle/phi/kernels/cpu/fill_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,5 @@ PD_REGISTER_KERNEL(fill,
int64_t,
int,
paddle::platform::float16,
paddle::platform::bfloat16,
bool) {}
1 change: 1 addition & 0 deletions paddle/phi/kernels/gpu/fill_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -27,4 +27,5 @@ PD_REGISTER_KERNEL(fill_grad,
int64_t,
int,
paddle::platform::float16,
paddle::platform::bfloat16,
bool) {}
1 change: 1 addition & 0 deletions paddle/phi/kernels/gpu/fill_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -27,4 +27,5 @@ PD_REGISTER_KERNEL(fill,
int64_t,
int,
paddle::platform::float16,
paddle::platform::bfloat16,
bool) {}
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
alignment = {"gpu": 256, "cpu": 4096}
align = {
Type.fp16.value: 2,
Type.bf16.value: 2,
Type.fp32.value: 4,
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -532,6 +532,12 @@ def _rank_buffer_size(self, buffer_max_size, model_size):
"====== FP16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======"
.format(rank_buffer_size[Type.fp16.value] / 2**19,
model_size / 2**19))
if Type.bf16.value in rank_buffer_size.keys():
# FP16 GradStorage and model size
logger_.info(
"====== BF16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======"
.format(rank_buffer_size[Type.bf16.value] / 2**19,
model_size / 2**19))
if Type.fp32.value in rank_buffer_size.keys():
# FP32 GradStorage and model size
logger_.info(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@ def __init__(self, size, dtype, device, convert_cpu=False):
dtype=np.float16) if Type.fp16.value == dtype else np.zeros(
size, dtype=np.float32)
self.buffer = core.eager.Tensor(value=value, place=core.CPUPlace())
if dtype == Type.bf16.value:
self.buffer = paddle.cast(self.buffer, dtype=paddle.bfloat16)
else:
self.buffer = paddle.zeros(size, dtype=dtype)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ class Type(Enum):
Type of trainable parameters
"""
fp16 = paddle.float16
bf16 = paddle.bfloat16
fp32 = paddle.float32


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ class Type(Enum):
Type of trainable parameters
"""
fp16 = paddle.float16
bf16 = paddle.bfloat16
fp32 = paddle.float32


Expand Down

0 comments on commit 5c85f1a

Please sign in to comment.