Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Eager] Support div(scalar) in eager mode #42148

Merged
merged 5 commits into from
Apr 25, 2022
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions python/paddle/fluid/dygraph/math_op_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@
core.VarDesc.VarType.BOOL,
]

_supported_div_op_ = ["elementwise_div", "final_state_divide"]

# NOTE(chenweihang): We currently do not fully support the type promotion
# between tensors. Parting support here is because the interoperation of
# real and complex numbers in paddle quantum is very frequent, such as the
Expand Down Expand Up @@ -222,7 +224,7 @@ def __impl__(self, other_var):
# so the calculation result here and the calculation result of numpy are
# different after 6 decimal point. If necessary, we can also use float64 here.
# torch's behavior here is consistent with ours
if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_:
if op_type in _supported_div_op_ and self.dtype in _supported_int_dtype_:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this May cause performance issue

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done, Thx.

self = astype(self, 'float32')
# here use `scale` replace `elementwise` to get better performance
# but only +, -, *, / can use this method
Expand Down Expand Up @@ -277,7 +279,7 @@ def __impl__(self, other_var):
self = other_var
other_var = tmp

if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_:
if op_type in _supported_div_op_ and self.dtype in _supported_int_dtype_:
self = astype(self, 'float32')
other_var = astype(other_var, 'float32')

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import numpy as np

import paddle
from paddle.fluid.framework import _test_eager_guard

# Support types are ref from `paddle.tensor.math`
# - Related paddle dtypes:
Expand Down Expand Up @@ -50,7 +51,7 @@ def check_operation(self, a, b, c, op):
self.assertEqual(c_rlt.dtype, c.dtype)
self.assertTrue(np.array_equal(c_rlt.numpy(), c.numpy()))

def test_tensor_add_scalar(self):
def func_tensor_add_scalar(self):
# tensor(int64) + scalar(int)
a = paddle.ones([2, 2, 2], dtype='int64')
b = 1
Expand Down Expand Up @@ -81,7 +82,12 @@ def test_tensor_add_scalar(self):
c = paddle.full([2, 2, 2], 2.5, dtype="float32")
self.check_operation(a, b, c, '+')

def test_tensor_sub_scalar(self):
def test_tensor_add_scalar(self):
with _test_eager_guard():
self.func_tensor_add_scalar()
self.func_tensor_add_scalar()

def func_tensor_sub_scalar(self):
# tensor(int64) - scalar(int)
a = paddle.ones([2, 2, 2], dtype='int64')
b = 1
Expand Down Expand Up @@ -112,7 +118,12 @@ def test_tensor_sub_scalar(self):
c = paddle.full([2, 2, 2], 0.5, dtype="float32")
self.check_operation(a, b, c, '-')

def test_scalar_sub_tensor(self):
def test_tensor_sub_scalar(self):
with _test_eager_guard():
self.func_tensor_sub_scalar()
self.func_tensor_sub_scalar()

def func_scalar_sub_tensor(self):
# scalar(int) - tensor(int64)
a = 1
b = paddle.ones([2, 2, 2], dtype='int64')
Expand Down Expand Up @@ -143,7 +154,12 @@ def test_scalar_sub_tensor(self):
c = paddle.full([2, 2, 2], -0.5, dtype="float32")
self.check_operation(a, b, c, '-')

def test_tensor_mul_tensor(self):
def test_scalar_sub_tensor(self):
with _test_eager_guard():
self.func_scalar_sub_tensor()
self.func_scalar_sub_tensor()

def func_tensor_mul_tensor(self):
# tensor(int64) * scalar(int)
a = paddle.ones([2, 2, 2], dtype='int64')
b = 1
Expand Down Expand Up @@ -174,7 +190,12 @@ def test_tensor_mul_tensor(self):
c = paddle.full([2, 2, 2], 1.5, dtype="float32")
self.check_operation(a, b, c, '*')

def test_tensor_div_scalar(self):
def test_tensor_mul_tensor(self):
with _test_eager_guard():
self.func_tensor_mul_tensor()
self.func_tensor_mul_tensor()

def func_tensor_div_scalar(self):
# tensor(int64) / scalar(int)
a = paddle.ones([2, 2, 2], dtype='int64')
b = 2
Expand Down Expand Up @@ -205,7 +226,12 @@ def test_tensor_div_scalar(self):
c = paddle.full([2, 2, 2], 2, dtype="float32")
self.check_operation(a, b, c, '/')

def test_scalar_div_tensor(self):
def test_tensor_div_scalar(self):
with _test_eager_guard():
self.func_tensor_div_scalar()
self.func_tensor_div_scalar()

def func_scalar_div_tensor(self):
# scalar(int) / tensor(int64)
a = 1
b = paddle.full([2, 2, 2], 2, dtype='int64')
Expand All @@ -230,7 +256,12 @@ def test_scalar_div_tensor(self):
c = paddle.full([2, 2, 2], 2, dtype="float32")
self.check_operation(a, b, c, '/')

def test_tensor_pow_scalar(self):
def test_scalar_div_tensor(self):
with _test_eager_guard():
self.func_scalar_div_tensor()
self.func_scalar_div_tensor()

def func_tensor_pow_scalar(self):
# tensor(int64) ** scalar(int)
a = paddle.full([2, 2, 2], 2, dtype='int64')
b = 3
Expand All @@ -255,7 +286,12 @@ def test_tensor_pow_scalar(self):
c = paddle.full([2, 2, 2], 8, dtype="float32")
self.check_operation(a, b, c, '**')

def test_scalar_pow_tensor(self):
def test_tensor_pow_scalar(self):
with _test_eager_guard():
self.func_tensor_pow_scalar()
self.func_tensor_pow_scalar()

def func_scalar_pow_tensor(self):
# scalar(int) ** tensor(int64)
a = 3
b = paddle.full([2, 2, 2], 2, dtype='int64')
Expand All @@ -280,15 +316,25 @@ def test_scalar_pow_tensor(self):
c = paddle.full([2, 2, 2], 9, dtype="float32")
self.check_operation(a, b, c, '**')

def test_scalar_pow_tensor(self):
with _test_eager_guard():
self.func_scalar_pow_tensor()
self.func_scalar_pow_tensor()

## TODO: floordiv op kernel doesn't support float
def test_tensor_floordiv_scalar(self):
def func_tensor_floordiv_scalar(self):
# tensor(int64) // scalar(int)
a = paddle.full([2, 2, 2], 3, dtype='int64')
b = 2
c = paddle.full([2, 2, 2], 1, dtype="int64")
self.check_operation(a, b, c, '//')

def test_tensor_mod_scalar(self):
def test_tensor_floordiv_scalar(self):
with _test_eager_guard():
self.func_tensor_floordiv_scalar()
self.func_tensor_floordiv_scalar()

def func_tensor_mod_scalar(self):
# tensor(int64) % scalar(int)
a = paddle.full([2, 2, 2], 3, dtype='int64')
b = 2
Expand All @@ -313,6 +359,11 @@ def test_tensor_mod_scalar(self):
c = paddle.full([2, 2, 2], 1, dtype="float32")
self.check_operation(a, b, c, '%')

def test_tensor_mod_scalar(self):
with _test_eager_guard():
self.func_tensor_mod_scalar()
self.func_tensor_mod_scalar()


if __name__ == '__main__':
unittest.main()