Skip to content

Commit

Permalink
add support aten::__ior__ (openvinotoolkit#27315)
Browse files Browse the repository at this point in the history
### Details:
 - *add support `aten::__ior__`, `aten::__iand__`, `aten::__ixor__`*


### Tickets:
 - *CVS-156301*
  • Loading branch information
eaidova authored Oct 30, 2024
1 parent 11cf409 commit cb292c7
Show file tree
Hide file tree
Showing 2 changed files with 61 additions and 0 deletions.
3 changes: 3 additions & 0 deletions src/frontends/pytorch/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -331,12 +331,15 @@ OP_CONVERTER(translate_zeros_like_fx);
const std::unordered_map<std::string, CreatorFunction> get_supported_ops_ts() {
return {
{"aten::__and__", op::translate_bitwise_and},
{"aten::__iand__", op::inplace_op<op::translate_bitwise_and>},
{"aten::__derive_index", op::translate_derive_index},
{"aten::__getitem__", op::translate_getitem},
{"aten::__not__", op::translate_1to1_match_1_inputs<opset10::LogicalNot>},
{"aten::__or__", op::translate_bitwise_or},
{"aten::__ior__", op::inplace_op<op::translate_bitwise_or>},
{"aten::__range_length", op::translate_range_length},
{"aten::__xor__", op::translate_bitwise_xor},
{"aten::__ixor__", op::inplace_op<op::translate_bitwise_xor>},
{"aten::_convolution", op::translate_convolution},
{"aten::_convolution_mode", op::translate_convolution_mode},
{"aten::_native_multi_head_attention", op::translate_native_multi_head_attention},
Expand Down
58 changes: 58 additions & 0 deletions tests/layer_tests/pytorch_tests/test_bitwise_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,3 +140,61 @@ def test_bitwise_operators(self, lhs_dtype, rhs_dtype, lhs_shape, rhs_shape, ie_
trace_model=True,
freeze_model=False,
)


class TestBitwiseInplaceOp(PytorchLayerTest):
def _prepare_input(self, lhs_shape, rhs_shape, dtype):
choices = np.array([0, 1, 255, 7])
x = np.random.choice(choices, lhs_shape).astype(dtype)
y = np.random.choice(choices, rhs_shape).astype(dtype)
return x, y

def create_model(self, op):
class aten_bitwise(torch.nn.Module):
def __init__(self, op) -> None:
super().__init__()
if op == "aten::__ior__":
self.forward = self.forward_or
if op == "aten::__iand__":
self.forward = self.forward_and
if op == "aten::__ixor__":
self.forward = self.forward_xor

def forward_or(self, lhs, rhs):
return lhs.__ior__(rhs)

def forward_and(self, lhs, rhs):
return lhs.__iand__(rhs)

def forward_xor(self, lhs, rhs):
return lhs.__ixor__(rhs)

return aten_bitwise(op), None, op

@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.parametrize("dtype", ["bool", "int32"])
@pytest.mark.parametrize(
("lhs_shape", "rhs_shape"),
[
([2, 3], [2, 3]),
([2, 3], []),
],
)
@pytest.mark.parametrize("op", ["aten::__ior__", "aten::__iand__", "aten::__ixor__"])
def test_bitwise_operators(self, op, dtype, lhs_shape, rhs_shape, ie_device, precision, ir_version):
if ie_device == "GPU" and dtype != "bool":
pytest.xfail(reason="bitwise ops are not supported on GPU")
self._test(
*self.create_model(op),
ie_device,
precision,
ir_version,
kwargs_to_prepare_input={
"dtype": dtype,
"lhs_shape": lhs_shape,
"rhs_shape": rhs_shape,
},
trace_model=True,
freeze_model=False,
)

0 comments on commit cb292c7

Please sign in to comment.