From 3cf4b9df3f95d73b8babe3171039bc1fb1c4a5f8 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Wed, 27 Mar 2024 13:50:25 +0100 Subject: [PATCH] [PT FE] Fix issue with aten.copy in FX graph --- src/frontends/pytorch/src/op/copy.cpp | 16 ++++++++++++++++ src/frontends/pytorch/src/op_table.cpp | 3 ++- tests/layer_tests/pytorch_tests/test_aliases.py | 17 +++++++++++++++++ 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op/copy.cpp b/src/frontends/pytorch/src/op/copy.cpp index 5f011ce5a7a64c..4494f34b2b2f84 100644 --- a/src/frontends/pytorch/src/op/copy.cpp +++ b/src/frontends/pytorch/src/op/copy.cpp @@ -29,6 +29,22 @@ OutputVector translate_copy_(const NodeContext& context) { return {res}; }; +OutputVector translate_copy_fx(const NodeContext& context) { + // copy = torch.ops.aten.copy.default(slice_4); + // copy = torch.ops.aten.copy.default(slice_4, clone); + num_inputs_check(context, 1, 2); + auto self = context.get_input(0); + if (context.input_is_none(1)) { + return {self}; + } else { + auto src = context.get_input(1); + auto src_converted = context.mark_node(std::make_shared(src, self)); + auto self_shape = context.mark_node(std::make_shared(self)); + Output res = context.mark_node(std::make_shared(src_converted, self_shape)); + return {res}; + } +}; + OutputVector translate_alias_copy(const NodeContext& context) { // aten::alias_copy(Tensor self) -> Tensor // aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 7b1048a2f13b87..65cc9f2e83556f 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -253,6 +253,7 @@ OP_CONVERTER(translate_batch_norm_legit_no_training_fx); OP_CONVERTER(translate_batch_norm_legit_no_stats_fx); OP_CONVERTER(translate_cat_fx); OP_CONVERTER(translate_constant_pad_nd_fx); +OP_CONVERTER(translate_copy_fx); OP_CONVERTER(translate_cumsum_fx); OP_CONVERTER(translate_chunk_fx); OP_CONVERTER(translate_div_fx); @@ -779,7 +780,7 @@ const std::map get_supported_ops_fx() { {"aten.clone.default", op::skip_node}, // ignore clone operators that are inserted by PyTorch autograd {"aten.constant_pad_nd.default", op::translate_constant_pad_nd_fx}, {"aten.convolution.default", op::translate_convolution}, - {"aten.copy.default", op::skip_node}, + {"aten.copy.default", op::translate_copy_fx}, {"aten.copy_.default", op::translate_copy_}, {"aten.cos.default", op::translate_1to1_match_1_inputs_with_fp32_type_alignment}, {"aten.cosh.default", op::translate_1to1_match_1_inputs_with_fp32_type_alignment}, diff --git a/tests/layer_tests/pytorch_tests/test_aliases.py b/tests/layer_tests/pytorch_tests/test_aliases.py index e6ce36ec88f18a..c90d2b929839c9 100644 --- a/tests/layer_tests/pytorch_tests/test_aliases.py +++ b/tests/layer_tests/pytorch_tests/test_aliases.py @@ -14,6 +14,15 @@ def forward(self, x): return y +class aten_alias_tensor(torch.nn.Module): + def forward(self, x): + y = x.clone() + n,c,h,w = x.shape + ones = torch.ones([2,h,w]).to(x.dtype) + y[:, 1:, :, :] = ones + return y + + class aten_loop_alias(torch.nn.Module): def forward(self, x): y = x.clone() @@ -36,6 +45,14 @@ def test_alias(self, ie_device, precision, ir_version): "aten::copy_"], ie_device, precision, ir_version) + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.precommit_torch_export + def test_alias_tensor(self, ie_device, precision, ir_version): + self._test(aten_alias_tensor(), None, ["aten::slice", + "aten::copy_"], + ie_device, precision, ir_version, freeze_model=False) + @pytest.mark.nightly @pytest.mark.precommit @pytest.mark.precommit_torch_export