-
Notifications
You must be signed in to change notification settings - Fork 5.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[AMP OP&Test] arange op support fp16/bf16 #51106
Changes from 2 commits
c3f2c71
483c51b
9201f34
5f95119
396625d
cca0369
a021e44
e529711
2313fb1
03250cf
97bd840
859f7de
0757b77
21a014c
762c3f5
d7776be
8a3d863
7ecaae1
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -20,12 +20,16 @@ | |
#include "paddle/phi/core/kernel_registry.h" | ||
#include "paddle/phi/core/tensor_utils.h" | ||
#include "paddle/phi/kernels/funcs/range_function.h" | ||
#include "paddle/phi/common/amp_type_traits.h" | ||
#include "paddle/phi/common/float16.h" | ||
#include "paddle/phi/common/bfloat16.h" | ||
|
||
namespace phi { | ||
|
||
template <typename T> | ||
__global__ void Range(T start, T step, int64_t size, T* out) { | ||
CUDA_KERNEL_LOOP(index, size) { out[index] = start + step * index; } | ||
using MPType = typename phi::dtype::MPTypeTrait<T>::Type; | ||
CUDA_KERNEL_LOOP(index, size) { out[index] = static_cast<T>(static_cast<MPType>(start) + static_cast<MPType>(step) * index); } | ||
} | ||
|
||
template <typename T, typename Context> | ||
|
@@ -39,7 +43,8 @@ void ArangeKernel(const Context& dev_ctx, | |
T step_value = GetValue<T, Context>(dev_ctx, step); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 建议直接在这里拿到start、end、step的时候直接转成MPType,后续都使用MPType的值去计算 |
||
|
||
int64_t size = 0; | ||
phi::funcs::GetSize(start_value, end_value, step_value, &size); | ||
using MPType = typename phi::dtype::MPTypeTrait<T>::Type; | ||
phi::funcs::GetSize(static_cast<MPType>(start_value), static_cast<MPType>(end_value), static_cast<MPType>(step_value), &size); | ||
out->Resize(phi::make_ddim({size})); | ||
T* out_data = dev_ctx.template Alloc<T>(out); | ||
|
||
|
@@ -55,7 +60,7 @@ void ArangeKernel(const Context& dev_ctx, | |
} // namespace phi | ||
|
||
PD_REGISTER_KERNEL( | ||
arange, GPU, ALL_LAYOUT, phi::ArangeKernel, float, double, int64_t, int) { | ||
arange, GPU, ALL_LAYOUT, phi::ArangeKernel, float, double, int64_t, int, phi::dtype::float16, phi::dtype::bfloat16) { | ||
kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND); | ||
kernel->InputAt(1).SetBackend(phi::Backend::ALL_BACKEND); | ||
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,4 +1,4 @@ | ||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. | ||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里其实不用修改,但是如果改了就把2020改成2023吧 |
||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
|
@@ -15,7 +15,7 @@ | |
import unittest | ||
|
||
import numpy as np | ||
from eager_op_test import OpTest | ||
from eager_op_test import OpTest, convert_float_to_uint16 | ||
|
||
import paddle | ||
from paddle.fluid import core | ||
|
@@ -57,6 +57,37 @@ def init_config(self): | |
self.python_api = paddle.arange | ||
self.case = (0, 5, 1) | ||
|
||
class TestFloa16ArangeOp(TestArangeOp): | ||
def init_config(self): | ||
self.dtype = np.float16 | ||
self.python_api = paddle.arange | ||
self.case = (0, 5, 1) | ||
|
||
def test_check_output(self): | ||
self.check_output(atol=1e-3) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这个地方1e-3现在可以去掉了,内部已修改了默认值 |
||
|
||
class TestBFloat16ArangeOp(TestArangeOp): | ||
def init_config(self): | ||
self.dtype = np.uint16 | ||
self.python_api = arange_wrapper | ||
self.case = (0, 1, 0.2) | ||
|
||
@unittest.skipIf( | ||
not core.is_compiled_with_cuda() | ||
or not core.is_bfloat16_supported(core.CUDAPlace(0)), | ||
"core is not compiled with CUDA and not support the bfloat16", | ||
) | ||
def test_check_output(self): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 修改输入输出这部分不建议写在test_check_output中,建议直接继承OpTest重写setUp,其他参考TestArangeOp写就行 |
||
self.inputs = { | ||
'Start': convert_float_to_uint16(np.array([self.case[0]]).astype(np.float32)), | ||
'End': convert_float_to_uint16(np.array([self.case[1]]).astype(np.float32)), | ||
'Step': convert_float_to_uint16(np.array([self.case[2]]).astype(np.float32)), | ||
} | ||
|
||
self.outputs = { | ||
'Out': convert_float_to_uint16(np.arange(self.case[0], self.case[1], self.case[2])) | ||
} | ||
self.check_output(atol=1e-2) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这个后面也会改成默认值,这里可以不写 |
||
|
||
class TestInt32ArangeOp(TestArangeOp): | ||
def init_config(self): | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -1234,7 +1234,7 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): | |
check_dtype( | ||
dtype, | ||
'dtype', | ||
['float32', 'float64', 'int32', 'int64'], | ||
['float32', 'float64', 'int32', 'int64', 'float16', 'bfloat16'], | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 暂不支持 |
||
'range/arange', | ||
) | ||
helper = LayerHelper('range', **locals()) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
start和step是在计算中是固定的值,可以在计算前做static_cast成一个临时变量