Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Phi] Add yaml for assign_value #44596

Merged
merged 5 commits into from
Jul 29, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@
'Scalar(int64_t)' : 'paddle::experimental::Scalar',
'Scalar(float)' : 'paddle::experimental::Scalar',
'Scalar(double)' : 'paddle::experimental::Scalar',
'Scalar[]' : 'std::vector<phi::Scalar>',
'IntArray' : 'paddle::experimental::IntArray'
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ def SkipAPIGeneration(forward_api_name):
"std::vector<double>": "CastPyArg2Float64s",
"std::vector<std::string>": "CastPyArg2Strings",
"paddle::experimental::Scalar": "CastPyArg2Scalar",
"std::vector<phi::Scalar>": "CastPyArg2ScalarArray",
"paddle::experimental::IntArray": "CastPyArg2IntArray",
"paddle::Place": "CastPyArg2Place",
"paddle::experimental::DataType": "CastPyArg2DataType",
Expand Down Expand Up @@ -85,6 +86,7 @@ def SkipAPIGeneration(forward_api_name):
'rmsprop',
'sgd_',
'sgd',
'assign_value_',
'sparse_momentum_',
'sparse_momentum',
]
Expand Down
48 changes: 48 additions & 0 deletions paddle/fluid/pybind/eager_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1253,6 +1253,54 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
return paddle::experimental::Scalar(1.0);
}

std::vector<phi::Scalar> CastPyArg2ScalarArray(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos) {
if (obj == Py_None) {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"a list of int, float, or bool, but got %s",
op_type,
arg_pos + 1,
((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT
}

PyTypeObject* type = obj->ob_type;
auto type_name = std::string(type->tp_name);
VLOG(1) << "type_name: " << type_name;
if (PyList_Check(obj)) {
Py_ssize_t len = PyList_Size(obj);
PyObject* item = nullptr;
item = PyList_GetItem(obj, 0);
if (PyObject_CheckFloatOrToFloat(&item)) {
std::vector<phi::Scalar> value;
for (Py_ssize_t i = 0; i < len; i++) {
item = PyList_GetItem(obj, i);
value.emplace_back(phi::Scalar{PyFloat_AsDouble(item)});
}
return value;
} else if (PyObject_CheckLongOrToLong(&item)) {
std::vector<phi::Scalar> value;
for (Py_ssize_t i = 0; i < len; i++) {
item = PyList_GetItem(obj, i);
value.emplace_back(
phi::Scalar{static_cast<int64_t>(PyLong_AsLong(item))});
}
return value;
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"a list of int, float, or bool, but got %s",
op_type,
arg_pos + 1,
((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT
}

// Fake a ScalarArray
return std::vector<phi::Scalar>({phi::Scalar(1.0)});
}

paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos) {
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/pybind/eager_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,10 @@ paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos);

std::vector<phi::Scalar> CastPyArg2ScalarArray(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos);

paddle::experimental::IntArray CastPyArg2IntArray(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/api/lib/kernel_dispatch.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ bool HasAllocation(const phi::TensorBase& t) {
}

BackendSet GetTensorBackendSet(const phi::TensorBase& t) {
if (HasAllocation(t)) {
if (HasAllocation(t) && t.place().GetType() != AllocationType::UNDEFINED) {
BackendSet backend_set(phi::TransToPhiBackend(t.place()));
switch (t.layout()) {
case DataLayout::MKLDNN:
Expand Down
5 changes: 5 additions & 0 deletions paddle/phi/api/yaml/generator/api_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ def parse_input_and_attr(self, api_name, args_config, optional_vars=[]):
'Scalar(int64_t)': 'const Scalar&',
'Scalar(float)': 'const Scalar&',
'Scalar(dobule)': 'const Scalar&',
'Scalar[]': 'const std::vector<phi::Scalar>&',
'int': 'int',
'int32_t': 'int32_t',
'int64_t': 'int64_t',
Expand Down Expand Up @@ -642,6 +643,10 @@ def get_kernel_args(self, kernel_tensor_type=None, code_indent=''):
if 'IntArray' in self.attrs['attr_info'][param][0]:
kernel_args_type_list.append('const phi::IntArray&')
param = 'phi::IntArray(' + param + ')'
elif 'vector<phi::Scalar>' in self.attrs['attr_info'][param][0]:
kernel_args_type_list.append(
'const std::vector<phi::Scalar>&')
param = param
elif 'Scalar' in self.attrs['attr_info'][param][0]:
kernel_args_type_list.append('const phi::Scalar&')
param = 'phi::Scalar(' + param + ')'
Expand Down
5 changes: 4 additions & 1 deletion paddle/phi/api/yaml/generator/type_mapping.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
'Scalar(int)': 'const Scalar&',
'Scalar(int64_t)': 'const Scalar&',
'Scalar(float)': 'const Scalar&',
'Scalar[]': 'const std::vector<Scalar>&',
'Place': 'Place',
'DataLayout': 'DataLayout',
'DataType': 'DataType',
Expand Down Expand Up @@ -58,6 +59,7 @@
'Scalar(int)': 'int',
'Scalar(int64_t)': 'int64_t',
'Scalar(float)': 'float',
'Scalar[]': 'std::vector<Scalar>',
'Place': 'int',
'DataLayout': 'int',
'DataType': 'int',
Expand All @@ -83,7 +85,8 @@
phi_attr_types_map = attr_types_map.copy()
phi_attr_types_map.update({
'IntArray': 'const phi::IntArray&',
'Scalar': 'const phi::Scalar&'
'Scalar': 'const phi::Scalar&',
'Scalar[]': 'std::vector<phi::Scalar>&'
})

#--------------------------- phi dense tensor ---------------------------
Expand Down
14 changes: 14 additions & 0 deletions paddle/phi/api/yaml/legacy_api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,20 @@
inplace : (output -> out)
backward : assign_out__grad

# assgin_value
- api : assign_value_
args : (Tensor output, int[] shape, DataType dtype, Scalar[] values, Place place = {})
output : Tensor(out)
inplace: (output -> out)
infer_meta :
func : AssignValueInferMeta
param : [shape, dtype]
kernel :
func : assign_value
param : [shape, dtype, values]
data_type : dtype
backend : place > output

# atan
- api : atan
args : (Tensor x)
Expand Down
2 changes: 2 additions & 0 deletions paddle/phi/core/infermeta_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,8 @@ struct InferMetaFnImpl<Return (*)(Args...), infer_meta_fn> {
std::vector<double>);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_CONST_ATTRIBUTE_REF(
std::vector<std::string>);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_CONST_ATTRIBUTE_REF(
std::vector<Scalar>);

template <typename... Tail>
struct InferMetaFnCallHelper<MetaTensor*, Tail...> {
Expand Down
14 changes: 11 additions & 3 deletions python/paddle/fluid/layers/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -690,12 +690,20 @@ def assign(input, output=None):
if input.size > 1024 * 1024:
raise ValueError("The size of input is too big. Please consider "
"saving it to file and 'load_op' to load it")
if output is None:
output = helper.create_variable_for_type_inference(dtype=dtype)
if _non_static_mode():
if in_dygraph_mode():
if output is None:
output = zeros(list(input.shape), dtype)
_C_ops.final_state_assign_value_(output, list(input.shape), dtype,
values, _current_expected_place())
elif _in_legacy_dygraph():
if output is None:
output = core.VarBase()
_C_ops.assign_value(output, 'shape', list(input.shape), 'dtype',
dtype, value_name, values)
else:
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
helper.append_op(type='assign_value',
outputs={'Out': [output]},
attrs={
Expand Down
29 changes: 23 additions & 6 deletions python/paddle/fluid/tests/unittests/test_assign_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.backward import append_backward
import paddle.fluid.framework as framework


class TestAssignOp(op_test.OpTest):
Expand All @@ -35,14 +36,20 @@ def setUp(self):
self.outputs = {'Out': x}

def test_forward(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.check_output(check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()

def test_backward(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.check_grad(['X'], 'Out', check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()


class TestAssignFP16Op(op_test.OpTest):
Expand All @@ -55,19 +62,26 @@ def setUp(self):
self.outputs = {'Out': x}

def test_forward(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.check_output(check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()

def test_backward(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.check_grad(['X'], 'Out', check_eager=True)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()
framework._disable_legacy_dygraph()


class TestAssignOpWithLoDTensorArray(unittest.TestCase):

def test_assign_LoDTensorArray(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
main_program = Program()
startup_program = Program()
Expand Down Expand Up @@ -97,11 +111,13 @@ def test_assign_LoDTensorArray(self):
fetch_list=[sums.name, x.grad_name])
self.assertTrue(np.allclose(res[0], feed_add))
self.assertTrue(np.allclose(res[1], ones / 1000.0))
paddle.disable_static()


class TestAssignOpError(unittest.TestCase):

def test_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()):
# The type of input must be Variable or numpy.ndarray.
x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
Expand All @@ -110,11 +126,13 @@ def test_errors(self):
# When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
x2 = np.array([[2.5, 2.5]], dtype='uint8')
self.assertRaises(TypeError, fluid.layers.assign, x2)
paddle.disable_static()


class TestAssignOApi(unittest.TestCase):

def test_assign_LoDTensorArray(self):
paddle.enable_static()
main_program = Program()
startup_program = Program()
with program_guard(main_program):
Expand Down Expand Up @@ -142,6 +160,7 @@ def test_assign_LoDTensorArray(self):
fetch_list=[sums.name, x.grad_name])
self.assertTrue(np.allclose(res[0], feed_add))
self.assertTrue(np.allclose(res[1], ones / 1000.0))
paddle.disable_static()

def test_assign_NumpyArray(self):
with fluid.dygraph.guard():
Expand Down Expand Up @@ -172,24 +191,19 @@ def test_assign_NumpyArray3(self):
self.assertTrue(np.allclose(result1.numpy(), array))

def test_assign_List(self):
paddle.disable_static()
l = [1, 2, 3]
result = paddle.assign(l)
self.assertTrue(np.allclose(result.numpy(), np.array(l)))
paddle.enable_static()

def test_assign_BasicTypes(self):
paddle.disable_static()
result1 = paddle.assign(2)
result2 = paddle.assign(3.0)
result3 = paddle.assign(True)
self.assertTrue(np.allclose(result1.numpy(), np.array([2])))
self.assertTrue(np.allclose(result2.numpy(), np.array([3.0])))
self.assertTrue(np.allclose(result3.numpy(), np.array([1])))
paddle.enable_static()

def test_clone(self):
paddle.disable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
self.python_api = paddle.clone

Expand All @@ -216,11 +230,13 @@ def test_clone(self):
fetch_list=[clone_x])[0]

self.assertTrue(np.array_equal(y_np, x_np), True)
paddle.disable_static()


class TestAssignOpErrorApi(unittest.TestCase):

def test_errors(self):
paddle.enable_static()
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
with program_guard(Program(), Program()):
# The type of input must be Variable or numpy.ndarray.
Expand All @@ -231,15 +247,16 @@ def test_errors(self):
x2 = np.array([[2.5, 2.5]], dtype='uint8')
self.assertRaises(TypeError, paddle.assign, x2)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.disable_static()

def test_type_error(self):
paddle.enable_static()
with program_guard(Program(), Program()):
x = [paddle.randn([3, 3]), paddle.randn([3, 3])]
# not support to assign list(var)
self.assertRaises(TypeError, paddle.assign, x)
paddle.disable_static()


if __name__ == '__main__':
paddle.enable_static()
unittest.main()
3 changes: 3 additions & 0 deletions python/paddle/fluid/tests/unittests/test_assign_value_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,13 @@
import numpy

import op_test
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers

paddle.enable_static()


class TestAssignValueOp(op_test.OpTest):

Expand Down
15 changes: 11 additions & 4 deletions python/paddle/tensor/creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1569,13 +1569,20 @@ def assign(x, output=None):
if input.size > 1024 * 1024:
raise ValueError("The size of input is too big. Please consider "
"saving it to file and 'load_op' to load it")
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
if _non_static_mode():
if in_dygraph_mode():
if output is None:
output = zeros(list(input.shape), dtype)
_C_ops.final_state_assign_value_(output, list(input.shape), dtype,
values, _current_expected_place())
elif _in_legacy_dygraph():
if output is None:
output = core.VarBase()
_C_ops.assign_value(output, 'shape', list(input.shape), 'dtype',
dtype, value_name, values)
else:
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
helper.append_op(type='assign_value',
outputs={'Out': [output]},
attrs={
Expand Down