Skip to content

Commit

Permalink
Adding ADDV2 operation for complex numbers (openvinotoolkit#23178)
Browse files Browse the repository at this point in the history
### Details:
- *Extended loader ADDV2 by propagating ComplexTypeMark from input to
output and to represent output complex type tensor as a floating-point
type tensor with an auxiliary dimension that concatenates real and
imaginary parts of complex tensor.*
 - *Performed addition for complex numbers.*
- *Wrapped the complex result with ComplexTypeMark and returned the
result*


Fixes openvinotoolkit#22946

---------

Co-authored-by: Roman Kazantsev <[email protected]>
  • Loading branch information
MonalSD and rkazants authored Mar 12, 2024
1 parent 81e236f commit ad584f5
Show file tree
Hide file tree
Showing 4 changed files with 87 additions and 3 deletions.
4 changes: 2 additions & 2 deletions src/frontends/tensorflow/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -166,8 +166,8 @@ const std::map<std::string, CreatorFunction> get_supported_ops() {
{"Swish", CreatorFunction(translate_unary_op<v4::Swish>)},

// note: BinaryOp translator declaration for each op must to be added in binary_op.cpp file
{"Add", CreatorFunction(translate_binary_op<v1::Add>)},
{"AddV2", CreatorFunction(translate_binary_op<v1::Add>)},
{"Add", CreatorFunction(translate_addv2_op)},
{"AddV2", CreatorFunction(translate_addv2_op)},
{"Atan2", CreatorFunction(translate_atan2_op)},
{"BitwiseAnd", CreatorFunction(translate_binary_op<v13::BitwiseAnd>)},
{"BitwiseOr", CreatorFunction(translate_binary_op<v13::BitwiseOr>)},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ OP_T_CONVERTER(translate_unary_op);
OP_CONVERTER(translate_selu_op);
OP_T_CONVERTER(translate_binary_op);
OP_T_CONVERTER(translate_direct_reduce_op);

OP_CONVERTER(translate_addv2_op);
OP_CONVERTER(translate_add_n_op);
OP_CONVERTER(translate_adjust_contrast_op);
OP_CONVERTER(translate_arg_max_op);
Expand Down
27 changes: 27 additions & 0 deletions src/frontends/tensorflow_common/src/op/binary_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,33 @@ OutputVector translate_mul_op(const NodeContext& node) {
set_node_name(node.get_name(), result);
return {result};
}

OutputVector translate_addv2_op(const NodeContext& node) {
default_op_checks(node, 2, {"Add", "AddV2"}, true);
auto lhs = node.get_input(0);
auto rhs = node.get_input(1);

auto complex_type_mark_lhs = as_type_ptr<ComplexTypeMark>(lhs.get_node_shared_ptr());
auto complex_type_mark_rhs = as_type_ptr<ComplexTypeMark>(rhs.get_node_shared_ptr());
auto complex_type_inputs = (complex_type_mark_lhs || complex_type_mark_rhs) ? true : false;

if (complex_type_inputs) {
lhs = complex_type_mark_lhs->input_value(0);
rhs = complex_type_mark_rhs->input_value(0);
}

auto result = make_shared<v1::Add>(lhs, rhs);
if (complex_type_inputs) {
auto complex_result = make_shared<ComplexTypeMark>(result, complex_type_mark_lhs->get_complex_part_type());
set_node_name(node.get_name(), result);

return {complex_result};
}

set_node_name(node.get_name(), result);
return {result};
}

template OutputVector translate_binary_op<v1::Add>(const NodeContext& node);
template OutputVector translate_binary_op<v13::BitwiseAnd>(const NodeContext& node);
template OutputVector translate_binary_op<v13::BitwiseOr>(const NodeContext& node);
Expand Down
57 changes: 57 additions & 0 deletions tests/layer_tests/tensorflow_tests/test_tf_Add.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,3 +222,60 @@ def test_add_placeholder_const_broadcast_5D(self, params, ie_device, precision,
use_legacy_frontend=use_legacy_frontend),
ie_device, precision,
ir_version=ir_version, temp_dir=temp_dir, use_legacy_frontend=use_legacy_frontend)


class TestComplexAdd(CommonTFLayerTest):
def _prepare_input(self, inputs_info):
rng = np.random.default_rng()
assert 'param_real_1:0' in inputs_info
assert 'param_imag_1:0' in inputs_info
assert 'param_real_2:0' in inputs_info
assert 'param_imag_2:0' in inputs_info
param_real_shape_1 = inputs_info['param_real_1:0']
param_imag_shape_1 = inputs_info['param_imag_1:0']
param_real_shape_2 = inputs_info['param_real_2:0']
param_imag_shape_2 = inputs_info['param_imag_2:0']
inputs_data = {}
inputs_data['param_real_1:0'] = 4 * rng.random(param_real_shape_1).astype(np.float32) - 2
inputs_data['param_imag_1:0'] = 4 * rng.random(param_imag_shape_1).astype(np.float32) - 2
inputs_data['param_real_2:0'] = 4 * rng.random(param_real_shape_2).astype(np.float32) - 2
inputs_data['param_imag_2:0'] = 4 * rng.random(param_imag_shape_2).astype(np.float32) - 2
return inputs_data

def create_complex_addv2_net(self, input_shape):
import tensorflow as tf
tf.compat.v1.reset_default_graph()
# Create the graph and model
with tf.compat.v1.Session() as sess:
param_real1 = tf.compat.v1.placeholder(np.float32, input_shape, 'param_real_1')
param_imag1 = tf.compat.v1.placeholder(np.float32, input_shape, 'param_imag_1')
param_real2 = tf.compat.v1.placeholder(np.float32, input_shape, 'param_real_2')
param_imag2 = tf.compat.v1.placeholder(np.float32, input_shape, 'param_imag_2')
complex1 = tf.raw_ops.Complex(real=param_real1, imag=param_imag1)
complex2 = tf.raw_ops.Complex(real=param_real2, imag=param_imag2)
add = tf.raw_ops.AddV2(x=complex1, y=complex2, name="complex_add")
real = tf.raw_ops.Real(input=add)
img = tf.raw_ops.Imag(input=add)
tf.compat.v1.global_variables_initializer()
tf_net = sess.graph_def

return tf_net, None


test_data_basic = [
dict(input_shape=[]),
dict(input_shape=[2]),
dict(input_shape=[1, 3]),
dict(input_shape=[2, 3, 4]),
dict(input_shape=[3, 4, 5, 6]),
]

@pytest.mark.parametrize("params", test_data_basic)
@pytest.mark.precommit_tf_fe
@pytest.mark.nightly
def test_complex_add(self, params, ie_device, precision, ir_version, temp_dir,
use_legacy_frontend):
self._test(
*self.create_complex_addv2_net(**params),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_legacy_frontend=use_legacy_frontend)

0 comments on commit ad584f5

Please sign in to comment.