diff --git a/src/core/include/openvino/op/bitwise_and.hpp b/src/core/include/openvino/op/bitwise_and.hpp
new file mode 100644
index 00000000000000..4a9867b222aef5
--- /dev/null
+++ b/src/core/include/openvino/op/bitwise_and.hpp
@@ -0,0 +1,39 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "openvino/op/op.hpp"
+#include "openvino/op/util/binary_elementwise_bitwise.hpp"
+
+namespace ov {
+namespace op {
+namespace v13 {
+/// \brief Elementwise bitwise AND operation.
+/// \ingroup ov_ops_cpp_api
+class OPENVINO_API BitwiseAnd : public util::BinaryElementwiseBitwise {
+public:
+ OPENVINO_OP("BitwiseAnd", "opset13", util::BinaryElementwiseBitwise);
+ /// \brief Constructs a bitwise AND operation.
+ BitwiseAnd() = default;
+ /// \brief Constructs a bitwise AND operation.
+ ///
+ /// \param arg0 Output that produces the first input tensor.
+ /// `[d0, ...]`
+ /// \param arg1 Output that produces the second input tensor.
+ /// `[d0, ...]`
+ /// \param auto_broadcast Auto broadcast specification. Default is Numpy-style
+ /// implicit broadcasting.
+ ///
+ /// Output `[d0, ...]`
+ ///
+ BitwiseAnd(const Output& arg0,
+ const Output& arg1,
+ const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY));
+
+ std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override;
+};
+} // namespace v13
+} // namespace op
+} // namespace ov
diff --git a/src/core/include/openvino/op/bitwise_or.hpp b/src/core/include/openvino/op/bitwise_or.hpp
new file mode 100644
index 00000000000000..0f40a8500362a0
--- /dev/null
+++ b/src/core/include/openvino/op/bitwise_or.hpp
@@ -0,0 +1,39 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "openvino/op/op.hpp"
+#include "openvino/op/util/binary_elementwise_bitwise.hpp"
+
+namespace ov {
+namespace op {
+namespace v13 {
+/// \brief Elementwise bitwise OR operation.
+/// \ingroup ov_ops_cpp_api
+class OPENVINO_API BitwiseOr : public util::BinaryElementwiseBitwise {
+public:
+ OPENVINO_OP("BitwiseOr", "opset13", util::BinaryElementwiseBitwise);
+ /// \brief Constructs a bitwise OR operation.
+ BitwiseOr() = default;
+ /// \brief Constructs a bitwise OR operation.
+ ///
+ /// \param arg0 Output that produces the first input tensor.
+ /// `[d0, ...]`
+ /// \param arg1 Output that produces the second input tensor.
+ /// `[d0, ...]`
+ /// \param auto_broadcast Auto broadcast specification. Default is Numpy-style
+ /// implicit broadcasting.
+ ///
+ /// Output `[d0, ...]`
+ ///
+ BitwiseOr(const Output& arg0,
+ const Output& arg1,
+ const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY));
+
+ std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override;
+};
+} // namespace v13
+} // namespace op
+} // namespace ov
diff --git a/src/core/include/openvino/op/bitwise_xor.hpp b/src/core/include/openvino/op/bitwise_xor.hpp
new file mode 100644
index 00000000000000..6ebb07bfe38d73
--- /dev/null
+++ b/src/core/include/openvino/op/bitwise_xor.hpp
@@ -0,0 +1,39 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "openvino/op/op.hpp"
+#include "openvino/op/util/binary_elementwise_bitwise.hpp"
+
+namespace ov {
+namespace op {
+namespace v13 {
+/// \brief Elementwise bitwise XOR operation.
+/// \ingroup ov_ops_cpp_api
+class OPENVINO_API BitwiseXor : public util::BinaryElementwiseBitwise {
+public:
+ OPENVINO_OP("BitwiseXor", "opset13", util::BinaryElementwiseBitwise);
+ /// \brief Constructs a bitwise XOR operation.
+ BitwiseXor() = default;
+ /// \brief Constructs a bitwise XOR operation.
+ ///
+ /// \param arg0 Output that produces the first input tensor.
+ /// `[d0, ...]`
+ /// \param arg1 Output that produces the second input tensor.
+ /// `[d0, ...]`
+ /// \param auto_broadcast Auto broadcast specification. Default is Numpy-style
+ /// implicit broadcasting.
+ ///
+ /// Output `[d0, ...]`
+ ///
+ BitwiseXor(const Output& arg0,
+ const Output& arg1,
+ const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec(AutoBroadcastType::NUMPY));
+
+ std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override;
+};
+} // namespace v13
+} // namespace op
+} // namespace ov
diff --git a/src/core/include/openvino/op/ops.hpp b/src/core/include/openvino/op/ops.hpp
index 159a84176c427d..b57372f118f19b 100644
--- a/src/core/include/openvino/op/ops.hpp
+++ b/src/core/include/openvino/op/ops.hpp
@@ -21,7 +21,10 @@
#include "openvino/op/batch_norm.hpp"
#include "openvino/op/batch_to_space.hpp"
#include "openvino/op/binary_convolution.hpp"
+#include "openvino/op/bitwise_and.hpp"
#include "openvino/op/bitwise_not.hpp"
+#include "openvino/op/bitwise_or.hpp"
+#include "openvino/op/bitwise_xor.hpp"
#include "openvino/op/broadcast.hpp"
#include "openvino/op/bucketize.hpp"
#include "openvino/op/ceiling.hpp"
diff --git a/src/core/include/openvino/op/util/binary_elementwise_bitwise.hpp b/src/core/include/openvino/op/util/binary_elementwise_bitwise.hpp
new file mode 100644
index 00000000000000..16096219e4d110
--- /dev/null
+++ b/src/core/include/openvino/op/util/binary_elementwise_bitwise.hpp
@@ -0,0 +1,41 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "openvino/op/op.hpp"
+
+namespace ov {
+namespace op {
+namespace util {
+class OPENVINO_API BinaryElementwiseBitwise : public Op {
+protected:
+ BinaryElementwiseBitwise();
+
+ /// \brief Constructs a binary elementwise bitwise operation.
+ ///
+ /// \param arg0 Output that produces the first input tensor.
+ /// \param arg1 Output that produces the second input tensor.
+ /// \param auto_broadcast Auto broadcast specification. Default is Numpy-style
+ /// implicit broadcasting.
+ BinaryElementwiseBitwise(const Output& arg0,
+ const Output& arg1,
+ const AutoBroadcastSpec& autob = AutoBroadcastSpec());
+
+public:
+ OPENVINO_OP("BinaryElementwiseBitwise", "util");
+
+ void validate_and_infer_types() override;
+
+ virtual const AutoBroadcastSpec& get_autob() const override;
+
+ void set_autob(const AutoBroadcastSpec& autob);
+ bool visit_attributes(AttributeVisitor& visitor) override;
+
+private:
+ AutoBroadcastSpec m_autob = AutoBroadcastType::NUMPY;
+};
+} // namespace util
+} // namespace op
+} // namespace ov
diff --git a/src/core/include/openvino/opsets/opset13_tbl.hpp b/src/core/include/openvino/opsets/opset13_tbl.hpp
index 353124af1afd21..8d543e49b67614 100644
--- a/src/core/include/openvino/opsets/opset13_tbl.hpp
+++ b/src/core/include/openvino/opsets/opset13_tbl.hpp
@@ -209,5 +209,8 @@ _OPENVINO_OP_REG(Pad, ov::op::v12)
_OPENVINO_OP_REG(ScatterElementsUpdate, ov::op::v12)
// New operations added in opset13
+_OPENVINO_OP_REG(BitwiseAnd, ov::op::v13)
_OPENVINO_OP_REG(BitwiseNot, ov::op::v13)
+_OPENVINO_OP_REG(BitwiseOr, ov::op::v13)
+_OPENVINO_OP_REG(BitwiseXor, ov::op::v13)
_OPENVINO_OP_REG(NMSRotated, ov::op::v13)
diff --git a/src/core/reference/include/openvino/reference/bitwise_and.hpp b/src/core/reference/include/openvino/reference/bitwise_and.hpp
new file mode 100644
index 00000000000000..a6422b5d489342
--- /dev/null
+++ b/src/core/reference/include/openvino/reference/bitwise_and.hpp
@@ -0,0 +1,54 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include
+#include
+
+#include "openvino/reference/autobroadcast_binop.hpp"
+
+namespace ov {
+namespace reference {
+/**
+ * @brief Reference implementation of binary elementwise bitwise AND operator.
+ *
+ * @param arg0 Pointer to input 0 data.
+ * @param arg1 Pointer to input 1 data.
+ * @param out Pointer to output data.
+ * @param arg_shape0 Input 0 shape.
+ * @param arg_shape1 Input 1 shape.
+ * @param broadcast_spec Broadcast specification mode.
+ */
+template ::type, char>::value>::type* = nullptr>
+// Check for char datatype used by ov::element::boolean
+void bitwise_and(const T* arg0,
+ const T* arg1,
+ T* out,
+ const Shape& arg0_shape,
+ const Shape& arg1_shape,
+ const op::AutoBroadcastSpec& broadcast_spec) {
+ autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::bit_and());
+}
+/**
+ * @brief Reference implementation of binary elementwise bitwise AND operator.
+ *
+ * @param arg0 Pointer to input 0 data.
+ * @param arg1 Pointer to input 1 data.
+ * @param out Pointer to output data.
+ * @param arg_shape0 Input 0 shape.
+ * @param arg_shape1 Input 1 shape.
+ * @param broadcast_spec Broadcast specification mode.
+ */
+template ::type, char>::value>::type* = nullptr>
+void bitwise_and(const T* arg0,
+ const T* arg1,
+ T* out,
+ const Shape& arg0_shape,
+ const Shape& arg1_shape,
+ const op::AutoBroadcastSpec& broadcast_spec) {
+ autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::bit_and());
+}
+} // namespace reference
+} // namespace ov
diff --git a/src/core/reference/include/openvino/reference/bitwise_or.hpp b/src/core/reference/include/openvino/reference/bitwise_or.hpp
new file mode 100644
index 00000000000000..54eb2fe91ffde0
--- /dev/null
+++ b/src/core/reference/include/openvino/reference/bitwise_or.hpp
@@ -0,0 +1,54 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include
+#include
+
+#include "openvino/reference/autobroadcast_binop.hpp"
+
+namespace ov {
+namespace reference {
+/**
+ * @brief Reference implementation of binary elementwise bitwise OR operator.
+ *
+ * @param arg0 Pointer to input 0 data.
+ * @param arg1 Pointer to input 1 data.
+ * @param out Pointer to output data.
+ * @param arg_shape0 Input 0 shape.
+ * @param arg_shape1 Input 1 shape.
+ * @param broadcast_spec Broadcast specification mode.
+ */
+template ::type, char>::value>::type* = nullptr>
+// Check for char datatype used by ov::element::boolean
+void bitwise_or(const T* arg0,
+ const T* arg1,
+ T* out,
+ const Shape& arg0_shape,
+ const Shape& arg1_shape,
+ const op::AutoBroadcastSpec& broadcast_spec) {
+ autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::bit_or());
+}
+/**
+ * @brief Reference implementation of binary elementwise bitwise OR operator.
+ *
+ * @param arg0 Pointer to input 0 data.
+ * @param arg1 Pointer to input 1 data.
+ * @param out Pointer to output data.
+ * @param arg_shape0 Input 0 shape.
+ * @param arg_shape1 Input 1 shape.
+ * @param broadcast_spec Broadcast specification mode.
+ */
+template ::type, char>::value>::type* = nullptr>
+void bitwise_or(const T* arg0,
+ const T* arg1,
+ T* out,
+ const Shape& arg0_shape,
+ const Shape& arg1_shape,
+ const op::AutoBroadcastSpec& broadcast_spec) {
+ autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::bit_or());
+}
+} // namespace reference
+} // namespace ov
diff --git a/src/core/reference/include/openvino/reference/bitwise_xor.hpp b/src/core/reference/include/openvino/reference/bitwise_xor.hpp
new file mode 100644
index 00000000000000..7204077c4abce7
--- /dev/null
+++ b/src/core/reference/include/openvino/reference/bitwise_xor.hpp
@@ -0,0 +1,54 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include
+#include
+
+#include "openvino/reference/autobroadcast_binop.hpp"
+
+namespace ov {
+namespace reference {
+/**
+ * @brief Reference implementation of binary elementwise bitwise XOR operator.
+ *
+ * @param arg0 Pointer to input 0 data.
+ * @param arg1 Pointer to input 1 data.
+ * @param out Pointer to output data.
+ * @param arg_shape0 Input 0 shape.
+ * @param arg_shape1 Input 1 shape.
+ * @param broadcast_spec Broadcast specification mode.
+ */
+template ::type, char>::value>::type* = nullptr>
+// Check for char datatype used by ov::element::boolean
+void bitwise_xor(const T* arg0,
+ const T* arg1,
+ T* out,
+ const Shape& arg0_shape,
+ const Shape& arg1_shape,
+ const op::AutoBroadcastSpec& broadcast_spec) {
+ autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::bit_xor());
+}
+/**
+ * @brief Reference implementation of binary elementwise bitwise XOR operator.
+ *
+ * @param arg0 Pointer to input 0 data.
+ * @param arg1 Pointer to input 1 data.
+ * @param out Pointer to output data.
+ * @param arg_shape0 Input 0 shape.
+ * @param arg_shape1 Input 1 shape.
+ * @param broadcast_spec Broadcast specification mode.
+ */
+template ::type, char>::value>::type* = nullptr>
+void bitwise_xor(const T* arg0,
+ const T* arg1,
+ T* out,
+ const Shape& arg0_shape,
+ const Shape& arg1_shape,
+ const op::AutoBroadcastSpec& broadcast_spec) {
+ autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::bit_xor());
+}
+} // namespace reference
+} // namespace ov
diff --git a/src/core/src/op/bitwise_and.cpp b/src/core/src/op/bitwise_and.cpp
new file mode 100644
index 00000000000000..22da9e92f47386
--- /dev/null
+++ b/src/core/src/op/bitwise_and.cpp
@@ -0,0 +1,25 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+#include "openvino/op/bitwise_and.hpp"
+
+#include "itt.hpp"
+#include "openvino/op/op.hpp"
+
+namespace ov {
+namespace op {
+namespace v13 {
+BitwiseAnd::BitwiseAnd(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast)
+ : BinaryElementwiseBitwise(arg0, arg1, auto_broadcast) {
+ constructor_validate_and_infer_types();
+}
+
+std::shared_ptr BitwiseAnd::clone_with_new_inputs(const OutputVector& new_args) const {
+ OV_OP_SCOPE(v13_BitwiseAnd_clone_with_new_inputs);
+ check_new_args_count(this, new_args);
+ return std::make_shared(new_args[0], new_args[1], get_autob());
+}
+
+} // namespace v13
+} // namespace op
+} // namespace ov
diff --git a/src/core/src/op/bitwise_not.cpp b/src/core/src/op/bitwise_not.cpp
index 92aeace18ad501..257a796fda2d0f 100644
--- a/src/core/src/op/bitwise_not.cpp
+++ b/src/core/src/op/bitwise_not.cpp
@@ -4,7 +4,6 @@
#include "openvino/op/bitwise_not.hpp"
#include "itt.hpp"
-#include "openvino/core/validation_util.hpp"
#include "openvino/op/op.hpp"
namespace ov {
diff --git a/src/core/src/op/bitwise_or.cpp b/src/core/src/op/bitwise_or.cpp
new file mode 100644
index 00000000000000..02ff0ad0830f1f
--- /dev/null
+++ b/src/core/src/op/bitwise_or.cpp
@@ -0,0 +1,25 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+#include "openvino/op/bitwise_or.hpp"
+
+#include "itt.hpp"
+#include "openvino/op/op.hpp"
+
+namespace ov {
+namespace op {
+namespace v13 {
+BitwiseOr::BitwiseOr(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast)
+ : BinaryElementwiseBitwise(arg0, arg1, auto_broadcast) {
+ constructor_validate_and_infer_types();
+}
+
+std::shared_ptr BitwiseOr::clone_with_new_inputs(const OutputVector& new_args) const {
+ OV_OP_SCOPE(v13_BitwiseOr_clone_with_new_inputs);
+ check_new_args_count(this, new_args);
+ return std::make_shared(new_args[0], new_args[1], get_autob());
+}
+
+} // namespace v13
+} // namespace op
+} // namespace ov
diff --git a/src/core/src/op/bitwise_xor.cpp b/src/core/src/op/bitwise_xor.cpp
new file mode 100644
index 00000000000000..320fe39f120359
--- /dev/null
+++ b/src/core/src/op/bitwise_xor.cpp
@@ -0,0 +1,25 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+#include "openvino/op/bitwise_xor.hpp"
+
+#include "itt.hpp"
+#include "openvino/op/op.hpp"
+
+namespace ov {
+namespace op {
+namespace v13 {
+BitwiseXor::BitwiseXor(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast)
+ : BinaryElementwiseBitwise(arg0, arg1, auto_broadcast) {
+ constructor_validate_and_infer_types();
+}
+
+std::shared_ptr BitwiseXor::clone_with_new_inputs(const OutputVector& new_args) const {
+ OV_OP_SCOPE(v13_BitwiseXor_clone_with_new_inputs);
+ check_new_args_count(this, new_args);
+ return std::make_shared(new_args[0], new_args[1], get_autob());
+}
+
+} // namespace v13
+} // namespace op
+} // namespace ov
diff --git a/src/core/src/op/util/binary_elementwise_bitwise.cpp b/src/core/src/op/util/binary_elementwise_bitwise.cpp
new file mode 100644
index 00000000000000..342bcf9cd757a8
--- /dev/null
+++ b/src/core/src/op/util/binary_elementwise_bitwise.cpp
@@ -0,0 +1,41 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "openvino/op/util/binary_elementwise_bitwise.hpp"
+
+#include "itt.hpp"
+#include "openvino/op/util/elementwise_args.hpp"
+
+ov::op::util::BinaryElementwiseBitwise::BinaryElementwiseBitwise() = default;
+
+ov::op::util::BinaryElementwiseBitwise::BinaryElementwiseBitwise(const Output& arg0,
+ const Output& arg1,
+ const AutoBroadcastSpec& autob)
+ : Op({arg0, arg1}),
+ m_autob(autob) {}
+
+void ov::op::util::BinaryElementwiseBitwise::validate_and_infer_types() {
+ OV_OP_SCOPE(v0_util_BinaryElementwiseBitwise_validate_and_infer_types);
+ auto args_et_pshape = op::util::validate_and_infer_elementwise_args(this);
+ const auto& args_et = std::get<0>(args_et_pshape);
+ const auto& args_pshape = std::get<1>(args_et_pshape);
+
+ NODE_VALIDATION_CHECK(this,
+ args_et.is_dynamic() || args_et.is_integral(),
+ "The element type of the input tensor must be integer or boolean.");
+
+ set_output_type(0, args_et, args_pshape);
+}
+
+bool ov::op::util::BinaryElementwiseBitwise::visit_attributes(AttributeVisitor& visitor) {
+ OV_OP_SCOPE(v0_util_BinaryElementwiseBitwise_visit_attributes);
+ visitor.on_attribute("auto_broadcast", m_autob);
+ return true;
+}
+const ov::op::AutoBroadcastSpec& ov::op::util::BinaryElementwiseBitwise::get_autob() const {
+ return m_autob;
+}
+void ov::op::util::BinaryElementwiseBitwise::set_autob(const AutoBroadcastSpec& autob) {
+ m_autob = autob;
+}
diff --git a/src/core/tests/op_version_tbl.hpp b/src/core/tests/op_version_tbl.hpp
index bf2fc789b12635..d861bfba0c4c50 100644
--- a/src/core/tests/op_version_tbl.hpp
+++ b/src/core/tests/op_version_tbl.hpp
@@ -26,7 +26,10 @@ _OPENVINO_OP_REG(AvgPool, ov::op::v1)
_OPENVINO_OP_REG(BatchNormInference, ov::op::v0)
_OPENVINO_OP_REG(BatchToSpace, ov::op::v1)
_OPENVINO_OP_REG(BinaryConvolution, ov::op::v1)
+_OPENVINO_OP_REG(BitwiseAnd, ov::op::v13)
_OPENVINO_OP_REG(BitwiseNot, ov::op::v13)
+_OPENVINO_OP_REG(BitwiseOr, ov::op::v13)
+_OPENVINO_OP_REG(BitwiseXor, ov::op::v13)
_OPENVINO_OP_REG(Broadcast, ov::op::v1)
_OPENVINO_OP_REG(Broadcast, ov::op::v3)
_OPENVINO_OP_REG(Bucketize, ov::op::v3)
diff --git a/src/core/tests/opset.cpp b/src/core/tests/opset.cpp
index 947d2cdfa1f392..204f43ae8ff906 100644
--- a/src/core/tests/opset.cpp
+++ b/src/core/tests/opset.cpp
@@ -71,7 +71,7 @@ INSTANTIATE_TEST_SUITE_P(opset,
OpsetTestParams{ov::get_opset10, 177},
OpsetTestParams{ov::get_opset11, 177},
OpsetTestParams{ov::get_opset12, 178},
- OpsetTestParams{ov::get_opset13, 180}),
+ OpsetTestParams{ov::get_opset13, 183}),
OpsetTestNameGenerator{});
class MyOpOld : public ov::op::Op {
diff --git a/src/core/tests/type_prop/bitwise_and.cpp b/src/core/tests/type_prop/bitwise_and.cpp
new file mode 100644
index 00000000000000..0490f79c96e61c
--- /dev/null
+++ b/src/core/tests/type_prop/bitwise_and.cpp
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "openvino/op/bitwise_and.hpp"
+
+#include "bitwise_ops.hpp"
+
+using Type = ::testing::Types;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_bitwise_and, BitwiseOperator, Type);
diff --git a/src/core/tests/type_prop/bitwise_ops.hpp b/src/core/tests/type_prop/bitwise_ops.hpp
new file mode 100644
index 00000000000000..3a8dc24df1b3ec
--- /dev/null
+++ b/src/core/tests/type_prop/bitwise_ops.hpp
@@ -0,0 +1,936 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include
+
+#include
+
+#include "common_test_utils/test_assertions.hpp"
+#include "common_test_utils/type_prop.hpp"
+#include "openvino/core/dimension_tracker.hpp"
+#include "openvino/op/util/attr_types.hpp"
+
+using namespace ov;
+using op::v0::Parameter;
+using namespace testing;
+
+template
+class BitwiseOperator : public TypePropOpTest {};
+
+TYPED_TEST_SUITE_P(BitwiseOperator);
+
+TYPED_TEST_P(BitwiseOperator, default_constructor_integer) {
+ auto lhs = std::make_shared(element::i32, PartialShape{-1, 4, 1, 6, {1, 6}, {2, 6}});
+ auto rhs = std::make_shared(element::i32, PartialShape{-1, 1, 5, 6, {5, 8}, {5, 8}});
+
+ const auto op = this->make_op();
+
+ op->set_argument(0, lhs);
+ op->set_argument(1, rhs);
+
+ auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NONE);
+ op->set_autob(autob);
+ EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NONE);
+ ASSERT_THROW(op->validate_and_infer_types(), NodeValidationFailure);
+
+ autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NUMPY);
+ op->set_autob(autob);
+ EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NUMPY);
+
+ op->validate_and_infer_types();
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{-1, 4, 5, 6, {5, 8}, {5, 6}}));
+}
+
+TYPED_TEST_P(BitwiseOperator, default_constructor_boolean) {
+ auto lhs = std::make_shared(element::boolean, PartialShape{-1, 4, 1, 6, {1, 6}, {2, 6}});
+ auto rhs = std::make_shared(element::boolean, PartialShape{-1, 1, 5, 6, {5, 8}, {5, 8}});
+
+ const auto op = this->make_op();
+
+ op->set_argument(0, lhs);
+ op->set_argument(1, rhs);
+
+ auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NONE);
+ op->set_autob(autob);
+ EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NONE);
+ ASSERT_THROW(op->validate_and_infer_types(), NodeValidationFailure);
+
+ autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NUMPY);
+ op->set_autob(autob);
+ EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NUMPY);
+
+ op->validate_and_infer_types();
+
+ EXPECT_EQ(op->get_element_type(), element::boolean);
+ EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{-1, 4, 5, 6, {5, 8}, {5, 6}}));
+}
+
+TYPED_TEST_P(BitwiseOperator, shape_inference_2D) {
+ auto lhs = std::make_shared(element::i32, Shape{2, 2});
+ auto rhs = std::make_shared(element::i32, Shape{2, 2});
+
+ const auto op = this->make_op(lhs, rhs);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{2, 2}));
+}
+
+TYPED_TEST_P(BitwiseOperator, shape_inference_4D) {
+ auto lhs = std::make_shared(element::i32, Shape{2, 2, 3, 3});
+ auto rhs = std::make_shared(element::i32, Shape{2, 2, 3, 3});
+
+ const auto op = this->make_op(lhs, rhs);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{2, 2, 3, 3}));
+}
+
+TYPED_TEST_P(BitwiseOperator, default_autobroadcast) {
+ auto lhs = std::make_shared(element::i32, Shape{2, 2});
+ auto rhs = std::make_shared(element::i32, Shape{2, 2});
+
+ const auto op = this->make_op(lhs, rhs);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{2, 2}));
+ EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NUMPY);
+}
+
+TYPED_TEST_P(BitwiseOperator, no_autobroadcast) {
+ auto lhs = std::make_shared(element::i32, Shape{2, 2});
+ auto rhs = std::make_shared(element::i32, Shape{2, 2});
+
+ const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::NONE);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{2, 2}));
+ EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NONE);
+}
+
+TYPED_TEST_P(BitwiseOperator, shape_inference_4D_x_scalar_numpy_broadcast) {
+ auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5});
+ auto rhs = std::make_shared(element::i32, Shape{1});
+
+ const auto op = this->make_op(lhs, rhs);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5}));
+}
+
+TYPED_TEST_P(BitwiseOperator, shape_inference_4D_x_1D_numpy_broadcast) {
+ auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5});
+ auto rhs = std::make_shared(element::i32, Shape{5});
+
+ const auto op = this->make_op(lhs, rhs);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5}));
+}
+
+TYPED_TEST_P(BitwiseOperator, shape_inference_2D_x_4D_numpy_broadcast) {
+ auto lhs = std::make_shared(element::i32, Shape{4, 5});
+ auto rhs = std::make_shared(element::i32, Shape{2, 3, 4, 5});
+
+ const auto op = this->make_op(lhs, rhs);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5}));
+}
+
+TYPED_TEST_P(BitwiseOperator, shape_inference_3D_x_4D_numpy_broadcast) {
+ auto lhs = std::make_shared(element::i32, Shape{1, 4, 5});
+ auto rhs = std::make_shared(element::i32, Shape{2, 3, 1, 1});
+
+ const auto op = this->make_op(lhs, rhs);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5}));
+}
+
+TYPED_TEST_P(BitwiseOperator, shape_inference_4D_x_3D_numpy_broadcast) {
+ auto lhs = std::make_shared(element::i32, Shape{8, 1, 6, 1});
+ auto rhs = std::make_shared(element::i32, Shape{7, 1, 5});
+
+ const auto op = this->make_op(lhs, rhs);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{8, 7, 6, 5}));
+ EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NUMPY);
+}
+
+TYPED_TEST_P(BitwiseOperator, static_shape_pdpd_doc_examples) {
+ {
+ auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5});
+ auto rhs = std::make_shared(element::i32, Shape{3, 4});
+
+ const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1);
+ const auto op = this->make_op(lhs, rhs, autob);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5}));
+ EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD);
+ }
+ {
+ auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5});
+ auto rhs = std::make_shared(element::i32, Shape{3, 1});
+
+ const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1);
+ const auto op = this->make_op(lhs, rhs, autob);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5}));
+ EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD);
+ }
+ {
+ auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5});
+ auto rhs = std::make_shared(element::i32, Shape{});
+
+ const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD);
+ const auto op = this->make_op(lhs, rhs, autob);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5}));
+ EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD);
+ }
+ {
+ auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5});
+ auto rhs = std::make_shared(element::i32, Shape{5});
+
+ const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 3);
+ const auto op = this->make_op(lhs, rhs, autob);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5}));
+ EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD);
+ }
+ {
+ auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5});
+ auto rhs = std::make_shared(element::i32, Shape{1, 3});
+
+ const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 0);
+ const auto op = this->make_op(lhs, rhs, autob);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5}));
+ EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD);
+ }
+ {
+ auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5});
+ auto rhs = std::make_shared(element::i32, Shape{3, 1, 5});
+
+ const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1);
+ const auto op = this->make_op(lhs, rhs, autob);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5}));
+ EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD);
+ }
+}
+
+TYPED_TEST_P(BitwiseOperator, static_shape_inference_4D_x_4D_pdpd_broadcast) {
+ {
+ auto lhs = std::make_shared(element::i32, Shape{8, 1, 6, 5});
+ auto rhs = std::make_shared(element::i32, Shape{8, 1, 6, 5});
+
+ const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD);
+ const auto op = this->make_op(lhs, rhs, autob);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{8, 1, 6, 5}));
+ EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD);
+ }
+ {
+ auto lhs = std::make_shared(element::i32, Shape{8, 7, 6, 5});
+ auto rhs = std::make_shared(element::i32, Shape{8, 1, 6, 5});
+
+ const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD);
+ const auto op = this->make_op(lhs, rhs, autob);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{8, 7, 6, 5}));
+ EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD);
+ }
+}
+
+TYPED_TEST_P(BitwiseOperator, static_shape_inference_4D_x_3D_ax_default_pdpd_broadcast) {
+ auto lhs = std::make_shared(element::i32, Shape{8, 7, 6, 5});
+ auto rhs = std::make_shared(element::i32, Shape{7, 1, 5});
+
+ const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::PDPD);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_shape(), (Shape{8, 7, 6, 5}));
+ EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD);
+}
+
+TYPED_TEST_P(BitwiseOperator, incompatible_element_types_f32) {
+ auto lhs = std::make_shared(element::f32, Shape{2, 2, 3, 3});
+ auto rhs = std::make_shared(element::f32, Shape{2, 2, 3, 3});
+
+ OV_EXPECT_THROW(std::ignore = this->make_op(lhs, rhs),
+ NodeValidationFailure,
+ HasSubstr("The element type of the input tensor must be integer or boolean."));
+}
+
+TYPED_TEST_P(BitwiseOperator, shape_inference_1D_x_1D_incompatible) {
+ auto lhs = std::make_shared(element::i32, Shape{3});
+ auto rhs = std::make_shared(element::i32, Shape{4});
+
+ ASSERT_THROW(const auto unused = this->make_op(lhs, rhs), NodeValidationFailure);
+}
+
+TYPED_TEST_P(BitwiseOperator, shape_inference_3D_x_3D_incompatible) {
+ auto lhs = std::make_shared(element::i32, Shape{3, 5, 6});
+ auto rhs = std::make_shared(element::i32, Shape{4, 10, 12});
+
+ ASSERT_THROW(const auto unused = this->make_op(lhs, rhs), NodeValidationFailure);
+}
+
+TYPED_TEST_P(BitwiseOperator, shape_inference_5D_x_5D_incompatible) {
+ auto lhs = std::make_shared(element::i32, Shape{389, 112, 12});
+ auto rhs = std::make_shared(element::i32, Shape{389, 112, 19});
+
+ ASSERT_THROW(const auto unused = this->make_op(lhs, rhs), NodeValidationFailure);
+}
+
+TYPED_TEST_P(BitwiseOperator, shape_inference_axis_less_than_negative_1_pdpd_incompatible) {
+ auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 5});
+ auto rhs = std::make_shared(element::i32, Shape{3, 1});
+
+ const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, -2);
+
+ ASSERT_THROW(const auto unused = this->make_op(lhs, rhs, autob), NodeValidationFailure);
+}
+
+TYPED_TEST_P(BitwiseOperator, shape_inference_dst_smaller_than_src_pdpd_broadcast) {
+ auto lhs = std::make_shared(element::i32, Shape{2, 3, 4, 1});
+ auto rhs = std::make_shared(element::i32, Shape{2, 3, 4, 5});
+
+ const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD);
+
+ ASSERT_THROW(const auto unused = this->make_op(lhs, rhs, autob), NodeValidationFailure);
+}
+
+TYPED_TEST_P(BitwiseOperator, fully_dynamic_shape_broadcast_numpy) {
+ auto param = std::make_shared(element::i32, PartialShape::dynamic());
+ const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NUMPY);
+
+ const auto op = this->make_op(param, param, autob);
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_output_partial_shape(0), PartialShape::dynamic());
+}
+
+TYPED_TEST_P(BitwiseOperator, fully_dynamic_shape_broadcast_none) {
+ auto param = std::make_shared(element::i32, PartialShape::dynamic());
+ const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NONE);
+
+ const auto op = this->make_op(param, param, autob);
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_output_partial_shape(0), PartialShape::dynamic());
+}
+
+TYPED_TEST_P(BitwiseOperator, fully_dynamic_shape_broadcast_pdpd) {
+ auto param = std::make_shared(element::i32, PartialShape::dynamic());
+ const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD);
+
+ const auto op = this->make_op(param, param, autob);
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_output_partial_shape(0), PartialShape::dynamic());
+}
+
+TYPED_TEST_P(BitwiseOperator, dynamic_shape_3D) {
+ Dimension dynamic = Dimension::dynamic();
+ auto lhs = std::make_shared(element::i32, PartialShape{dynamic, dynamic, 6});
+ auto rhs = std::make_shared(element::i32, PartialShape{dynamic, dynamic, 6});
+
+ const auto op = this->make_op(lhs, rhs);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{dynamic, dynamic, 6}));
+}
+
+TYPED_TEST_P(BitwiseOperator, dynamic_shape_5D) {
+ Dimension dynamic = Dimension::dynamic();
+ auto lhs = std::make_shared(element::i32, PartialShape{dynamic, 4, dynamic, dynamic, 6});
+ auto rhs = std::make_shared(element::i32, PartialShape{dynamic, 4, dynamic, dynamic, 6});
+
+ const auto op = this->make_op(lhs, rhs);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{dynamic, 4, dynamic, dynamic, 6}));
+}
+
+TYPED_TEST_P(BitwiseOperator, dynamic_shape_intervals_broadcast_none) {
+ auto lhs = std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, {6, -1}, {-1, 6}, -1, 8});
+ auto rhs = std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, {6, -1}, {-1, 6}, -1, 8});
+
+ const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::NONE);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{{1, 3}, {2, 7}, {6, -1}, {-1, 6}, -1, 8}));
+}
+
+TYPED_TEST_P(BitwiseOperator, dynamic_shape_intervals_equal_rank_broadcast_numpy) {
+ // Equal rank
+ auto lhs = std::make_shared(element::i32, PartialShape{{1, 3}, {1, 3}, {1, 3}, {4, 8}, -1, 1, -1, 1, 3});
+ auto rhs = std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, -1, 1, {1, 3}, {4, 8}, -1, 1, 3});
+
+ const auto op = this->make_op(lhs, rhs);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{{1, 3}, {2, 7}, -1, {4, 8}, -1, {4, 8}, -1, 1, 3}));
+}
+
+TYPED_TEST_P(BitwiseOperator, dynamic_shape_intervals_a_rank_smaller_broadcast_numpy) {
+ // `lhs` rank smaller
+ auto lhs = std::make_shared(element::i32, PartialShape{{1, 3}, {4, 8}, -1, 1, -1, 1, 3});
+ auto rhs = std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, -1, 1, {1, 3}, {4, 8}, -1, 1, 3});
+
+ const auto op = this->make_op(lhs, rhs);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{{1, 3}, {2, 7}, -1, {4, 8}, -1, {4, 8}, -1, 1, 3}));
+}
+
+TYPED_TEST_P(BitwiseOperator, dynamic_shape_intervals_b_rank_smaller_broadcast_numpy) {
+ // `rhs` rank smaller
+ auto lhs = std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, -1, 1, {1, 3}, {4, 8}, -1, 1, 3});
+ auto rhs = std::make_shared(element::i32, PartialShape{{1, 3}, {4, 8}, -1, 1, -1, 1, 3});
+
+ const auto op = this->make_op(lhs, rhs);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{{1, 3}, {2, 7}, -1, {4, 8}, -1, {4, 8}, -1, 1, 3}));
+}
+
+TYPED_TEST_P(BitwiseOperator, dynamic_shape_intervals_broadcast_pdpd) {
+ { // Equal rank
+ auto lhs = std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, {1, 6}, {6, -1}, -1, 8});
+ auto rhs = std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, 1, 1, -1, 8});
+
+ const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::PDPD);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{{1, 3}, {2, 7}, {1, 6}, {6, -1}, -1, 8}));
+ }
+ { // `lhs` rank smaller
+ auto lhs =
+ std::make_shared(element::i32, PartialShape{{1, 3}, {1, 3}, {1, 3}, {4, 8}, -1, 1, -1, 1, 3});
+ auto rhs =
+ std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, -1, 1, {1, 3}, {4, 8}, -1, 1, 3});
+
+ const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 0);
+ const auto op = this->make_op(lhs, rhs, autob);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{{1, 3}, {2, 7}, -1, {4, 8}, -1, {4, 8}, -1, 1, 3}));
+ }
+ { // `rhs` rank smaller
+ auto lhs =
+ std::make_shared(element::i32, PartialShape{{1, 3}, {2, 7}, -1, 1, {1, 3}, {4, 8}, -1, 1, 3});
+ auto rhs = std::make_shared(element::i32, PartialShape{{1, 3}, {4, 8}, -1, 1, -1, 1, 3});
+
+ const auto op = this->make_op(lhs, rhs);
+
+ EXPECT_EQ(op->get_element_type(), element::i32);
+ EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{{1, 3}, {2, 7}, -1, {4, 8}, -1, {4, 8}, -1, 1, 3}));
+ }
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_a_dynamic_mixed_dims_broadcast_numpy) {
+ // All dimensions of lhs have labels, rhs without labels
+ PartialShape pshape_lhs{{-1}, {3}, {1}, {2, 128}};
+ PartialShape pshape_rhs{{-1}, {3}, {2, 224}, {1}};
+
+ PartialShape expected_shape = {-1, 3, {2, 224}, {2, 128}};
+
+ set_shape_labels(pshape_lhs, {10, 11, 12, 13});
+ set_shape_labels(expected_shape, {10, 11, 0, 13});
+
+ auto lhs = std::make_shared(element::i32, pshape_lhs);
+ auto rhs = std::make_shared(element::i32, pshape_rhs);
+
+ const auto op = this->make_op(lhs, rhs);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape));
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_b_dynamic_mixed_dims_broadcast_numpy) {
+ // All dimensions of rhs have labels, lhs without labels
+ PartialShape pshape_lhs{{-1}, {3}, {1}, {2, 128}};
+ PartialShape pshape_rhs{{-1}, {3}, {2, 224}, {1}};
+
+ PartialShape expected_shape = {-1, 3, {2, 224}, {2, 128}};
+
+ set_shape_labels(pshape_rhs, {20, 21, 22, 23});
+ set_shape_labels(expected_shape, {20, 21, 22, 0});
+
+ auto lhs = std::make_shared(element::i32, pshape_lhs);
+ auto rhs = std::make_shared(element::i32, pshape_rhs);
+
+ const auto op = this->make_op(lhs, rhs);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape));
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_different_interval_mixed_dims_broadcast_numpy) {
+ // Both params have dimensions with different labels
+ PartialShape pshape_lhs{{-1}, {3}, {1}, {2, 128}};
+ PartialShape pshape_rhs{{-1}, {3}, {2, 224}, {1}};
+
+ PartialShape expected_shape = {-1, 3, {2, 224}, {2, 128}};
+
+ set_shape_labels(pshape_lhs, {10, 11, 12, 13});
+ set_shape_labels(pshape_rhs, {20, 21, 22, 23});
+ set_shape_labels(expected_shape, {0, 21, 22, 13});
+
+ auto lhs = std::make_shared(element::i32, pshape_lhs);
+ auto rhs = std::make_shared(element::i32, pshape_rhs);
+
+ const auto op = this->make_op(lhs, rhs);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape));
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_different_interval_b_and_fully_dyn_a_broadcast_numpy) {
+ // Both params have dimension labels, output has label rhs
+ Dimension dim_0_lhs = {-1};
+ Dimension dim_0_rhs = {2, 4};
+
+ DimensionTracker::set_label(dim_0_lhs, 10);
+ DimensionTracker::set_label(dim_0_rhs, 20);
+
+ PartialShape pshape_lhs = {dim_0_lhs, 3, 224, 1}, pshape_rhs = {dim_0_rhs, 3, 1, 224};
+ PartialShape expected_shape = {{2, 4}, 3, 224, 224};
+ TensorLabel expected_labels{20, 0, 0, 0};
+
+ auto lhs = std::make_shared(element::i32, pshape_lhs);
+ auto rhs = std::make_shared(element::i32, pshape_rhs);
+
+ const auto op = this->make_op(lhs, rhs);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), expected_labels);
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_different_interval_a_and_fully_dyn_b_broadcast_numpy) {
+ // Both params have dimension labels, output has label lhs
+ Dimension dim_0_lhs = {2, 4};
+ Dimension dim_0_rhs = {-1};
+
+ DimensionTracker::set_label(dim_0_lhs, 10);
+ DimensionTracker::set_label(dim_0_rhs, 20);
+
+ PartialShape pshape_lhs = {dim_0_lhs, 3, 224, 1}, pshape_rhs = {dim_0_rhs, 3, 1, 224};
+ PartialShape expected_shape = {{2, 4}, 3, 224, 224};
+ TensorLabel expected_labels{10, 0, 0, 0};
+
+ auto lhs = std::make_shared(element::i32, pshape_lhs);
+ auto rhs = std::make_shared(element::i32, pshape_rhs);
+
+ const auto op = this->make_op(lhs, rhs);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), expected_labels);
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_equal_interval_dims_without_one_broadcast_numpy) {
+ // Both params have dynamic interval dimension the same labels
+ PartialShape pshape_lhs{{2, 4}, {8, 16}, {8, 16}, {8, 16}};
+ PartialShape pshape_rhs{{2, 4}, {4, 12}, {10, 12}, {16, 24}};
+
+ PartialShape expected_shape = {{2, 4}, {8, 12}, {10, 12}, 16};
+
+ set_shape_labels(pshape_lhs, {10, 11, 12, 13});
+ set_shape_labels(pshape_rhs, {10, 11, 12, 13});
+ set_shape_labels(expected_shape, {10, 11, 12, 13});
+
+ auto lhs = std::make_shared(element::i32, pshape_lhs);
+ auto rhs = std::make_shared(element::i32, pshape_rhs);
+
+ const auto op = this->make_op(lhs, rhs);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape));
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_different_interval_dims_without_one_broadcast_numpy) {
+ // Both params have dynamic interval dimension different labels
+ PartialShape pshape_lhs{{2, 4}, {8, 16}, {8, 16}, {8, 16}};
+ PartialShape pshape_rhs{{2, 4}, {4, 12}, {10, 12}, {16, 24}};
+
+ PartialShape expected_shape = {{2, 4}, {8, 12}, {10, 12}, 16};
+ TensorLabel expected_labels{20, 21, 22, 23};
+
+ set_shape_labels(pshape_lhs, {10, 11, 12, 13});
+ set_shape_labels(pshape_rhs, {20, 21, 22, 23});
+
+ auto lhs = std::make_shared(element::i32, pshape_lhs);
+ auto rhs = std::make_shared(element::i32, pshape_rhs);
+
+ const auto op = this->make_op(lhs, rhs);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), expected_labels);
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_different_interval_batch_without_one_equivalence_table_broadcast_numpy) {
+ // Both params have dynamic interval dimension different labels, use table of equivalence
+ auto table_of_equivalence = std::make_shared();
+ DimensionTracker dim_tracker(table_of_equivalence);
+
+ Dimension dim_0_lhs = {2, 4};
+ Dimension dim_0_rhs = {2, 4};
+
+ dim_tracker.set_up_for_tracking(dim_0_lhs, 10);
+ dim_tracker.set_up_for_tracking(dim_0_rhs, 20);
+
+ PartialShape pshape_lhs = {dim_0_lhs, 3, 224, 1}, pshape_rhs = {dim_0_rhs, 3, 1, 224};
+
+ auto lhs = std::make_shared(element::i32, pshape_lhs);
+ auto rhs = std::make_shared(element::i32, pshape_rhs);
+
+ const auto op = this->make_op(lhs, rhs);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ PartialShape expected_shape = {{2, 4}, 3, 224, 224};
+ TensorLabel expected_labels{20, 0, 0, 0};
+
+ auto eq_table = table_of_equivalence->get_equivalence_table();
+ EXPECT_EQ(*eq_table[DimensionTracker::get_label(dim_0_lhs)], std::set({10, 20}));
+ EXPECT_EQ(*eq_table[DimensionTracker::get_label(dim_0_rhs)], std::set({10, 20}));
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), expected_labels);
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_different_fully_dynamic_batch_broadcast_numpy) {
+ // Both params have fully dynamic dimension and different labels
+ Dimension dim_0_lhs = {-1};
+ Dimension dim_0_rhs = {-1};
+
+ DimensionTracker::set_label(dim_0_lhs, 10);
+ DimensionTracker::set_label(dim_0_rhs, 20);
+
+ PartialShape pshape_lhs = {dim_0_lhs, 3, 224, 1}, pshape_rhs = {dim_0_rhs, 3, 1, 224};
+ PartialShape expected_shape = {-1, 3, 224, 224};
+ TensorLabel expected_labels{0, 0, 0, 0};
+
+ auto lhs = std::make_shared(element::i32, pshape_lhs);
+ auto rhs = std::make_shared(element::i32, pshape_rhs);
+
+ const auto op = this->make_op(lhs, rhs);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), expected_labels);
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_equal_fully_dynamic_batch_broadcast_numpy) {
+ // Both params have fully dynamic dimension and the same labels
+ Dimension dim_0_lhs = {-1};
+ Dimension dim_0_rhs = {-1};
+
+ DimensionTracker::set_label(dim_0_lhs, 10);
+ DimensionTracker::set_label(dim_0_rhs, 10);
+
+ PartialShape pshape_lhs = {dim_0_lhs, 3, 224, 1}, pshape_rhs = {dim_0_rhs, 3, 1, 224};
+ PartialShape expected_shape = {-1, 3, 224, 224};
+ TensorLabel expected_labels{10, 0, 0, 0};
+
+ auto lhs = std::make_shared(element::i32, pshape_lhs);
+ auto rhs = std::make_shared(element::i32, pshape_rhs);
+
+ const auto op = this->make_op(lhs, rhs);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), expected_labels);
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_dyn_batch_a_broadcast_numpy) {
+ Dimension dim_0_lhs = -1;
+ DimensionTracker::set_label(dim_0_lhs, 10);
+ PartialShape pshape_lhs = {dim_0_lhs, 3, 224, 224}, pshape_rhs = {1, 3, 1, 1};
+ PartialShape expected_shape{dim_0_lhs, 3, 224, 224};
+
+ TensorLabel expected_labels{10, 0, 0, 0};
+
+ auto lhs = std::make_shared(element::i64, pshape_lhs);
+ auto rhs = std::make_shared(element::i64, pshape_rhs);
+
+ const auto op = this->make_op(lhs, rhs);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), expected_labels);
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_dyn_batch_b_broadcast_numpy) {
+ Dimension dim_0_rhs = -1;
+ DimensionTracker::set_label(dim_0_rhs, 10);
+ PartialShape pshape_rhs = {dim_0_rhs, 3, 224, 224}, pshape_lhs = {1, 3, 1, 1};
+ PartialShape expected_shape{dim_0_rhs, 3, 224, 224};
+
+ TensorLabel expected_labels{10, 0, 0, 0};
+
+ auto lhs = std::make_shared(element::i64, pshape_lhs);
+ auto rhs = std::make_shared(element::i64, pshape_rhs);
+
+ const auto op = this->make_op(lhs, rhs);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), expected_labels);
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_dyn_batch_and_higher_rank_a_broadcast_numpy) {
+ Dimension dim_0_lhs = -1;
+ DimensionTracker::set_label(dim_0_lhs, 10);
+
+ PartialShape pshape_lhs{dim_0_lhs, -1, -1, -1};
+ PartialShape pshape_rhs{3, 1, 1};
+ PartialShape expected_shape{dim_0_lhs, 3, -1, -1};
+
+ TensorLabel expected_labels{10, 0, 0, 0};
+
+ auto lhs = std::make_shared(element::i64, pshape_lhs);
+ auto rhs = std::make_shared(element::i64, pshape_rhs);
+
+ const auto op = this->make_op(lhs, rhs);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), expected_labels);
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_dyn_batch_and_higher_rank_b_broadcast_numpy) {
+ Dimension dim_0_rhs = -1;
+ DimensionTracker::set_label(dim_0_rhs, 10);
+
+ PartialShape pshape_lhs{3, 1, 1};
+ PartialShape pshape_rhs{dim_0_rhs, -1, -1, -1};
+ PartialShape expected_shape{dim_0_rhs, 3, -1, -1};
+
+ TensorLabel expected_labels{10, 0, 0, 0};
+
+ auto lhs = std::make_shared(element::i64, pshape_lhs);
+ auto rhs = std::make_shared(element::i64, pshape_rhs);
+
+ const auto op = this->make_op(lhs, rhs);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), expected_labels);
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_different_static_shape_broadcast_numpy) {
+ // Static shape, different labels
+ PartialShape pshape_lhs{{2}, {1}, {224}, {1}};
+ PartialShape pshape_rhs{{2}, {1}, {1}, {128}};
+ PartialShape expected_shape{2, 1, 224, 128};
+
+ // Different labels
+ set_shape_labels(pshape_lhs, {10, 11, 12, 13});
+ set_shape_labels(pshape_rhs, {20, 21, 22, 23});
+ set_shape_labels(expected_shape, {20, 21, 12, 23});
+
+ auto lhs = std::make_shared(element::i32, pshape_lhs);
+ auto rhs = std::make_shared(element::i32, pshape_rhs);
+ const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::NUMPY);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape));
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_equal_static_shape_broadcast_numpy) {
+ // Static shape, the same labels
+ PartialShape pshape_lhs{2, 1, 224, 1};
+ PartialShape pshape_rhs{2, 1, 1, 128};
+ PartialShape expected_shape{2, 1, 224, 128};
+
+ // Equal labels
+ set_shape_labels(pshape_lhs, {30, 31, 32, 33});
+ set_shape_labels(pshape_rhs, {30, 31, 32, 33});
+ set_shape_labels(expected_shape, {30, 31, 32, 33});
+
+ auto lhs = std::make_shared(element::i32, pshape_lhs);
+ auto rhs = std::make_shared(element::i32, pshape_rhs);
+ const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::NUMPY);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape));
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_different_static_shape_broadcast_none) {
+ // Static shape
+ PartialShape pshape_lhs{2, 3, 224, 128};
+ PartialShape pshape_rhs{2, 3, 224, 128};
+ PartialShape expected_shape{2, 3, 224, 128};
+
+ // Different labels
+ set_shape_labels(pshape_lhs, {10, 11, 12, 13});
+ set_shape_labels(pshape_rhs, {20, 21, 22, 23});
+ set_shape_labels(expected_shape, {20, 21, 22, 23});
+
+ auto lhs = std::make_shared(element::i32, pshape_lhs);
+ auto rhs = std::make_shared(element::i32, pshape_rhs);
+ const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::NONE);
+
+ auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape));
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_equal_static_shape_broadcast_none) {
+ // Static shape
+ PartialShape pshape_lhs{2, 3, 224, 128};
+ PartialShape pshape_rhs{2, 3, 224, 128};
+ PartialShape expected_shape{2, 3, 224, 128};
+
+ // Equal labels
+ set_shape_labels(pshape_lhs, {30, 31, 32, 33});
+ set_shape_labels(pshape_rhs, {30, 31, 32, 33});
+ set_shape_labels(expected_shape, {30, 31, 32, 33});
+
+ auto lhs = std::make_shared(element::i32, pshape_lhs);
+ auto rhs = std::make_shared(element::i32, pshape_rhs);
+ const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::NONE);
+
+ auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape));
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_different_dynamic_shape_broadcast_none) {
+ // Dynamic shape
+ PartialShape pshape_lhs{{-1}, {3}, {2, 224}, {1, 128}};
+ PartialShape pshape_rhs{{-1}, {3}, {2, 224}, {1, 128}};
+ PartialShape expected_shape{-1, 3, {2, 224}, {1, 128}};
+
+ // Different labels
+ set_shape_labels(pshape_lhs, {10, 11, 12, 13});
+ set_shape_labels(pshape_rhs, {20, 21, 22, 23});
+ set_shape_labels(expected_shape, {20, 21, 22, 23});
+
+ auto lhs = std::make_shared(element::i32, pshape_lhs);
+ auto rhs = std::make_shared(element::i32, pshape_rhs);
+ const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::NONE);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape));
+}
+
+TYPED_TEST_P(BitwiseOperator, labels_equal_dynamic_shape_broadcast_none) {
+ // Dynamic shape
+ PartialShape pshape_lhs{{-1}, {3}, {2, 224}, {1, 128}};
+ PartialShape pshape_rhs{{-1}, {3}, {2, 224}, {1, 128}};
+ PartialShape expected_shape{-1, 3, {2, 224}, {1, 128}};
+
+ // Equal labels
+ set_shape_labels(pshape_lhs, {30, 31, 32, 33});
+ set_shape_labels(pshape_rhs, {30, 31, 32, 33});
+ set_shape_labels(expected_shape, {30, 31, 32, 33});
+
+ auto lhs = std::make_shared(element::i32, pshape_lhs);
+ auto rhs = std::make_shared(element::i32, pshape_rhs);
+ const auto op = this->make_op(lhs, rhs, op::AutoBroadcastType::NONE);
+
+ const auto out_shape = op->get_output_partial_shape(0);
+
+ EXPECT_EQ(out_shape, expected_shape);
+ EXPECT_EQ(get_shape_labels(out_shape), get_shape_labels(expected_shape));
+}
+
+REGISTER_TYPED_TEST_SUITE_P(BitwiseOperator,
+ default_constructor_integer,
+ default_constructor_boolean,
+
+ // Static shapes
+ shape_inference_2D,
+ shape_inference_4D,
+ default_autobroadcast,
+ no_autobroadcast,
+ shape_inference_4D_x_scalar_numpy_broadcast,
+ shape_inference_4D_x_1D_numpy_broadcast,
+ shape_inference_2D_x_4D_numpy_broadcast,
+ shape_inference_3D_x_4D_numpy_broadcast,
+ shape_inference_4D_x_3D_numpy_broadcast,
+ static_shape_pdpd_doc_examples,
+ static_shape_inference_4D_x_4D_pdpd_broadcast,
+ static_shape_inference_4D_x_3D_ax_default_pdpd_broadcast,
+ incompatible_element_types_f32,
+ shape_inference_1D_x_1D_incompatible,
+ shape_inference_3D_x_3D_incompatible,
+ shape_inference_5D_x_5D_incompatible,
+ shape_inference_axis_less_than_negative_1_pdpd_incompatible,
+ shape_inference_dst_smaller_than_src_pdpd_broadcast,
+
+ // Dynamic shapes
+ fully_dynamic_shape_broadcast_numpy,
+ fully_dynamic_shape_broadcast_none,
+ fully_dynamic_shape_broadcast_pdpd,
+ dynamic_shape_3D,
+ dynamic_shape_5D,
+ dynamic_shape_intervals_broadcast_none,
+ dynamic_shape_intervals_equal_rank_broadcast_numpy,
+ dynamic_shape_intervals_a_rank_smaller_broadcast_numpy,
+ dynamic_shape_intervals_b_rank_smaller_broadcast_numpy,
+ dynamic_shape_intervals_broadcast_pdpd,
+
+ // Dimension labels (static and dynamic)
+ labels_a_dynamic_mixed_dims_broadcast_numpy,
+ labels_b_dynamic_mixed_dims_broadcast_numpy,
+ labels_different_interval_mixed_dims_broadcast_numpy,
+ labels_different_interval_b_and_fully_dyn_a_broadcast_numpy,
+ labels_different_interval_a_and_fully_dyn_b_broadcast_numpy,
+ labels_equal_interval_dims_without_one_broadcast_numpy,
+ labels_different_interval_dims_without_one_broadcast_numpy,
+ labels_different_interval_batch_without_one_equivalence_table_broadcast_numpy,
+ labels_different_fully_dynamic_batch_broadcast_numpy,
+ labels_equal_fully_dynamic_batch_broadcast_numpy,
+ labels_dyn_batch_a_broadcast_numpy,
+ labels_dyn_batch_b_broadcast_numpy,
+ labels_dyn_batch_and_higher_rank_a_broadcast_numpy,
+ labels_dyn_batch_and_higher_rank_b_broadcast_numpy,
+ labels_different_static_shape_broadcast_numpy,
+ labels_equal_static_shape_broadcast_numpy,
+ labels_different_static_shape_broadcast_none,
+ labels_equal_static_shape_broadcast_none,
+ labels_different_dynamic_shape_broadcast_none,
+ labels_equal_dynamic_shape_broadcast_none);
diff --git a/src/core/tests/type_prop/bitwise_or.cpp b/src/core/tests/type_prop/bitwise_or.cpp
new file mode 100644
index 00000000000000..bb41322f1dec49
--- /dev/null
+++ b/src/core/tests/type_prop/bitwise_or.cpp
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "openvino/op/bitwise_or.hpp"
+
+#include "bitwise_ops.hpp"
+
+using Type = ::testing::Types;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_bitwise_or, BitwiseOperator, Type);
diff --git a/src/core/tests/type_prop/bitwise_xor.cpp b/src/core/tests/type_prop/bitwise_xor.cpp
new file mode 100644
index 00000000000000..00a1a299573882
--- /dev/null
+++ b/src/core/tests/type_prop/bitwise_xor.cpp
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "openvino/op/bitwise_xor.hpp"
+
+#include "bitwise_ops.hpp"
+
+using Type = ::testing::Types;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_bitwise_xor, BitwiseOperator, Type);
diff --git a/src/core/tests/visitors/op/bitwise_and.cpp b/src/core/tests/visitors/op/bitwise_and.cpp
new file mode 100644
index 00000000000000..35c29762061283
--- /dev/null
+++ b/src/core/tests/visitors/op/bitwise_and.cpp
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "openvino/op/bitwise_and.hpp"
+
+#include "binary_ops.hpp"
+
+using Type = ::testing::Types>;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(visitor_with_auto_broadcast, BinaryOperatorVisitor, Type, BinaryOperatorTypeName);
diff --git a/src/core/tests/visitors/op/bitwise_or.cpp b/src/core/tests/visitors/op/bitwise_or.cpp
new file mode 100644
index 00000000000000..ebcff6e5e932b0
--- /dev/null
+++ b/src/core/tests/visitors/op/bitwise_or.cpp
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "openvino/op/bitwise_or.hpp"
+
+#include "binary_ops.hpp"
+
+using Type = ::testing::Types>;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(visitor_with_auto_broadcast, BinaryOperatorVisitor, Type, BinaryOperatorTypeName);
diff --git a/src/core/tests/visitors/op/bitwise_xor.cpp b/src/core/tests/visitors/op/bitwise_xor.cpp
new file mode 100644
index 00000000000000..ef36fc98ab707d
--- /dev/null
+++ b/src/core/tests/visitors/op/bitwise_xor.cpp
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "openvino/op/bitwise_xor.hpp"
+
+#include "binary_ops.hpp"
+
+using Type = ::testing::Types>;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(visitor_with_auto_broadcast, BinaryOperatorVisitor, Type, BinaryOperatorTypeName);
diff --git a/src/plugins/template/backend/ops/bitwise_and.cpp b/src/plugins/template/backend/ops/bitwise_and.cpp
new file mode 100644
index 00000000000000..d0e5d05b11360d
--- /dev/null
+++ b/src/plugins/template/backend/ops/bitwise_and.cpp
@@ -0,0 +1,56 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "openvino/op/bitwise_and.hpp"
+
+#include "evaluate_node.hpp"
+#include "openvino/reference/bitwise_and.hpp"
+#include "utils.hpp"
+
+using namespace ov;
+
+template
+bool evaluate(const std::shared_ptr& node,
+ ov::TensorVector& outputs,
+ const ov::TensorVector& inputs) {
+ OPENVINO_ASSERT(inputs.size() == 2);
+ OPENVINO_ASSERT(outputs.size() == 1);
+ outputs[0].set_shape(infer_broadcast_shape(node.get(), inputs[0].get_shape(), inputs[1].get_shape()));
+ using T = typename ov::element_type_traits::value_type;
+ ov::reference::bitwise_and(inputs[0].data(),
+ inputs[1].data(),
+ outputs[0].data(),
+ inputs[0].get_shape(),
+ inputs[1].get_shape(),
+ node->get_autob());
+ return true;
+}
+
+template <>
+bool evaluate_node(std::shared_ptr node,
+ ov::TensorVector& outputs,
+ const ov::TensorVector& inputs) {
+ switch (node->get_input_element_type(0)) {
+ case element::boolean:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::u8:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::i8:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::u16:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::i16:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::u32:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::i32:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::u64:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::i64:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ default:
+ OPENVINO_THROW("Unhandled data type ", node->get_element_type().get_type_name(), "in evaluate_node()");
+ }
+}
diff --git a/src/plugins/template/backend/ops/bitwise_not.cpp b/src/plugins/template/backend/ops/bitwise_not.cpp
index 91a73fa0dd1c3f..83f1d77750eeec 100644
--- a/src/plugins/template/backend/ops/bitwise_not.cpp
+++ b/src/plugins/template/backend/ops/bitwise_not.cpp
@@ -19,7 +19,7 @@ bool evaluate(const std::shared_ptr& node,
outputs[0].set_shape(inputs[0].get_shape());
using T = typename ov::element_type_traits::value_type;
- ov::reference::bitwise_not(inputs[0].data(), outputs[0].data(), shape_size(inputs[0].get_shape()));
+ ov::reference::bitwise_not(inputs[0].data(), outputs[0].data(), shape_size(inputs[0].get_shape()));
return true;
}
diff --git a/src/plugins/template/backend/ops/bitwise_or.cpp b/src/plugins/template/backend/ops/bitwise_or.cpp
new file mode 100644
index 00000000000000..fe163edeccb3a1
--- /dev/null
+++ b/src/plugins/template/backend/ops/bitwise_or.cpp
@@ -0,0 +1,56 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "openvino/op/bitwise_or.hpp"
+
+#include "evaluate_node.hpp"
+#include "openvino/reference/bitwise_or.hpp"
+#include "utils.hpp"
+
+using namespace ov;
+
+template
+bool evaluate(const std::shared_ptr& node,
+ ov::TensorVector& outputs,
+ const ov::TensorVector& inputs) {
+ OPENVINO_ASSERT(inputs.size() == 2);
+ OPENVINO_ASSERT(outputs.size() == 1);
+ outputs[0].set_shape(infer_broadcast_shape(node.get(), inputs[0].get_shape(), inputs[1].get_shape()));
+ using T = typename ov::element_type_traits::value_type;
+ ov::reference::bitwise_or(inputs[0].data(),
+ inputs[1].data(),
+ outputs[0].data(),
+ inputs[0].get_shape(),
+ inputs[1].get_shape(),
+ node->get_autob());
+ return true;
+}
+
+template <>
+bool evaluate_node(std::shared_ptr node,
+ ov::TensorVector& outputs,
+ const ov::TensorVector& inputs) {
+ switch (node->get_input_element_type(0)) {
+ case element::boolean:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::u8:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::i8:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::u16:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::i16:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::u32:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::i32:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::u64:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::i64:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ default:
+ OPENVINO_THROW("Unhandled data type ", node->get_element_type().get_type_name(), "in evaluate_node()");
+ }
+}
diff --git a/src/plugins/template/backend/ops/bitwise_xor.cpp b/src/plugins/template/backend/ops/bitwise_xor.cpp
new file mode 100644
index 00000000000000..3fa98775a05e18
--- /dev/null
+++ b/src/plugins/template/backend/ops/bitwise_xor.cpp
@@ -0,0 +1,56 @@
+// Copyright (C) 2018-2023 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "openvino/op/bitwise_xor.hpp"
+
+#include "evaluate_node.hpp"
+#include "openvino/reference/bitwise_xor.hpp"
+#include "utils.hpp"
+
+using namespace ov;
+
+template
+bool evaluate(const std::shared_ptr& node,
+ ov::TensorVector& outputs,
+ const ov::TensorVector& inputs) {
+ OPENVINO_ASSERT(inputs.size() == 2);
+ OPENVINO_ASSERT(outputs.size() == 1);
+ outputs[0].set_shape(infer_broadcast_shape(node.get(), inputs[0].get_shape(), inputs[1].get_shape()));
+ using T = typename ov::element_type_traits::value_type;
+ ov::reference::bitwise_xor(inputs[0].data(),
+ inputs[1].data(),
+ outputs[0].data(),
+ inputs[0].get_shape(),
+ inputs[1].get_shape(),
+ node->get_autob());
+ return true;
+}
+
+template <>
+bool evaluate_node(std::shared_ptr node,
+ ov::TensorVector& outputs,
+ const ov::TensorVector& inputs) {
+ switch (node->get_input_element_type(0)) {
+ case element::boolean:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::u8:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::i8:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::u16:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::i16:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::u32:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::i32:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::u64:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ case element::i64:
+ return evaluate(as_type_ptr(node), outputs, inputs);
+ default:
+ OPENVINO_THROW("Unhandled data type ", node->get_element_type().get_type_name(), "in evaluate_node()");
+ }
+}
diff --git a/src/plugins/template/backend/ops/ops_evaluates.hpp b/src/plugins/template/backend/ops/ops_evaluates.hpp
index 0b860fcd4b28c6..040fd8334a3527 100644
--- a/src/plugins/template/backend/ops/ops_evaluates.hpp
+++ b/src/plugins/template/backend/ops/ops_evaluates.hpp
@@ -445,10 +445,22 @@ extern template bool evaluate_node(std::shared_
ov::TensorVector& outputs,
const ov::TensorVector& inputs);
+extern template bool evaluate_node