Skip to content

Commit

Permalink
[TF FE]: Support complex tensors for PadV2 operations (openvinotoolki…
Browse files Browse the repository at this point in the history
  • Loading branch information
awayzjj committed Mar 12, 2024
1 parent f8d0710 commit ffde30f
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 6 deletions.
32 changes: 31 additions & 1 deletion src/frontends/tensorflow_common/src/op/pad.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include "openvino/op/constant.hpp"
#include "openvino/op/convert.hpp"
#include "openvino/op/gather.hpp"
#include "openvino/op/unsqueeze.hpp"
#include "utils.hpp"

using namespace std;
Expand Down Expand Up @@ -82,10 +83,39 @@ OutputVector translate_pad_op(const NodeContext& node) {
}

OutputVector translate_padv2_op(const NodeContext& node) {
default_op_checks(node, 3, {"PadV2"});
default_op_checks(node, 3, {"PadV2"}, true);
auto input = node.get_input(0);
auto paddings = node.get_input(1);
auto constant_value = node.get_input(2);
if (auto complex_type_mark = as_type_ptr<ComplexTypeMark>(input.get_node_shared_ptr())) {
input = complex_type_mark->input_value(0);
element::Type complex_part_type = complex_type_mark->get_complex_part_type();

auto gather_index_real = make_shared<v0::Constant>(element::i32, Shape{}, 0);
auto gather_index_imag = make_shared<v0::Constant>(element::i32, Shape{}, 1);
auto minus_one = make_shared<v0::Constant>(element::i32, Shape{1}, -1);
auto x_real = make_shared<v8::Gather>(input, gather_index_real, minus_one)->output(0);
auto x_imag = make_shared<v8::Gather>(input, gather_index_imag, minus_one)->output(0);

auto constant_complex_type_mark = as_type_ptr<ComplexTypeMark>(constant_value.get_node_shared_ptr());
auto constant_input = constant_complex_type_mark->input_value(0);
auto constant = ov::util::get_constant_from_source(constant_input);
std::vector<float> constant_vector = constant->cast_vector<float>();
auto constant_value_real = create_same_type_const_scalar<float>(input, constant_vector[0]);
auto constant_value_imag = create_same_type_const_scalar<float>(input, constant_vector[1]);

auto y_real = translate_pad_base_op(node, x_real, paddings, constant_value_real)[0];
auto y_imag = translate_pad_base_op(node, x_imag, paddings, constant_value_imag)[0];

auto real_unsqueeze = make_shared<v0::Unsqueeze>(y_real, minus_one);
auto imag_unsqueeze = make_shared<v0::Unsqueeze>(y_imag, minus_one);

auto concat_result = make_shared<v0::Concat>(OutputVector{real_unsqueeze, imag_unsqueeze}, -1);

set_node_name(node.get_name(), concat_result);
auto complex_result = make_shared<ComplexTypeMark>(concat_result->output(0), complex_part_type);
return {complex_result};
}

return translate_pad_base_op(node, input, paddings, constant_value);
}
Expand Down
18 changes: 13 additions & 5 deletions tests/layer_tests/tensorflow_tests/test_tf_Pad.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,15 +57,20 @@ def _prepare_input(self, inputs_info):
inputs_data['param_imag:0'] = 4 * rng.random(param_imag_shape).astype(np.float32) - 2
return inputs_data

def create_pad_complex_net(self, input_shape, pads_values):
def create_pad_complex_net(self, input_shape, pads_values, const_value, pad_op):
tf.compat.v1.reset_default_graph()
# Create the graph and model
with tf.compat.v1.Session() as sess:
param_real = tf.compat.v1.placeholder(np.float32, input_shape, 'param_real')
param_imag = tf.compat.v1.placeholder(np.float32, input_shape, 'param_imag')
complex = tf.raw_ops.Complex(real=param_real, imag=param_imag)
paddings = tf.constant(pads_values, dtype=tf.int32)
pad = tf.raw_ops.Pad(input=complex, paddings=paddings, name='pad')
if pad_op == 'Pad':
pad = tf.raw_ops.Pad(input=complex, paddings=paddings, name='pad');
elif pad_op == 'PadV2':
real_part, imag_part = const_value
constant_values = tf.complex(real_part, imag_part)
pad = tf.raw_ops.PadV2(input=complex, paddings=paddings, constant_values=constant_values, name='padv2')
real = tf.raw_ops.Real(input=pad)
imag = tf.raw_ops.Imag(input=pad)
tf.raw_ops.Pack(values=[real, imag], axis=-1)
Expand All @@ -75,9 +80,12 @@ def create_pad_complex_net(self, input_shape, pads_values):
return tf_net, None

test_data_basic = [
dict(input_shape=[1, 50], pads_values=[[0, 1], [2, 3]]),
dict(input_shape=[2, 20, 10], pads_values=[[0, 1], [2, 3], [4, 0]]),
dict(input_shape=[1, 5, 10, 3], pads_values=[[1, 1], [0, 0], [4, 0], [1, 1]]),
dict(input_shape=[1, 50], pads_values=[[0, 1], [2, 3]], const_value=None, pad_op='Pad'),
dict(input_shape=[2, 20, 10], pads_values=[[0, 1], [2, 3], [4, 0]], const_value=None, pad_op='Pad'),
dict(input_shape=[1, 5, 10, 3], pads_values=[[1, 1], [0, 0], [4, 0], [1, 1]], const_value=None, pad_op='Pad'),
dict(input_shape=[1, 50], pads_values=[[0, 1], [2, 3]], const_value=(1.0, 0.0), pad_op='PadV2'),
dict(input_shape=[2, 20, 10], pads_values=[[0, 1], [2, 3], [4, 0]], const_value=(0.0, 1.0), pad_op='PadV2'),
dict(input_shape=[1, 5, 10, 3], pads_values=[[1, 1], [0, 0], [4, 0], [1, 1]], const_value=(1.0, 2.0), pad_op='PadV2'),
]

@pytest.mark.parametrize("params", test_data_basic)
Expand Down

0 comments on commit ffde30f

Please sign in to comment.