diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index d0e388b5d08cf1..607f0bd32db80d 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -331,12 +331,15 @@ OP_CONVERTER(translate_zeros_like_fx); const std::unordered_map get_supported_ops_ts() { return { {"aten::__and__", op::translate_bitwise_and}, + {"aten::__iand__", op::inplace_op}, {"aten::__derive_index", op::translate_derive_index}, {"aten::__getitem__", op::translate_getitem}, {"aten::__not__", op::translate_1to1_match_1_inputs}, {"aten::__or__", op::translate_bitwise_or}, + {"aten::__ior__", op::inplace_op}, {"aten::__range_length", op::translate_range_length}, {"aten::__xor__", op::translate_bitwise_xor}, + {"aten::__ixor__", op::inplace_op}, {"aten::_convolution", op::translate_convolution}, {"aten::_convolution_mode", op::translate_convolution_mode}, {"aten::_native_multi_head_attention", op::translate_native_multi_head_attention}, diff --git a/tests/layer_tests/pytorch_tests/test_bitwise_ops.py b/tests/layer_tests/pytorch_tests/test_bitwise_ops.py index a400f6dcd76d17..125402b4dbec17 100644 --- a/tests/layer_tests/pytorch_tests/test_bitwise_ops.py +++ b/tests/layer_tests/pytorch_tests/test_bitwise_ops.py @@ -140,3 +140,61 @@ def test_bitwise_operators(self, lhs_dtype, rhs_dtype, lhs_shape, rhs_shape, ie_ trace_model=True, freeze_model=False, ) + + +class TestBitwiseInplaceOp(PytorchLayerTest): + def _prepare_input(self, lhs_shape, rhs_shape, dtype): + choices = np.array([0, 1, 255, 7]) + x = np.random.choice(choices, lhs_shape).astype(dtype) + y = np.random.choice(choices, rhs_shape).astype(dtype) + return x, y + + def create_model(self, op): + class aten_bitwise(torch.nn.Module): + def __init__(self, op) -> None: + super().__init__() + if op == "aten::__ior__": + self.forward = self.forward_or + if op == "aten::__iand__": + self.forward = self.forward_and + if op == "aten::__ixor__": + self.forward = self.forward_xor + + def forward_or(self, lhs, rhs): + return lhs.__ior__(rhs) + + def forward_and(self, lhs, rhs): + return lhs.__iand__(rhs) + + def forward_xor(self, lhs, rhs): + return lhs.__ixor__(rhs) + + return aten_bitwise(op), None, op + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize("dtype", ["bool", "int32"]) + @pytest.mark.parametrize( + ("lhs_shape", "rhs_shape"), + [ + ([2, 3], [2, 3]), + ([2, 3], []), + ], + ) + @pytest.mark.parametrize("op", ["aten::__ior__", "aten::__iand__", "aten::__ixor__"]) + def test_bitwise_operators(self, op, dtype, lhs_shape, rhs_shape, ie_device, precision, ir_version): + if ie_device == "GPU" and dtype != "bool": + pytest.xfail(reason="bitwise ops are not supported on GPU") + self._test( + *self.create_model(op), + ie_device, + precision, + ir_version, + kwargs_to_prepare_input={ + "dtype": dtype, + "lhs_shape": lhs_shape, + "rhs_shape": rhs_shape, + }, + trace_model=True, + freeze_model=False, + ) \ No newline at end of file