Skip to content

Commit

Permalink
Merge branch 'master' into ci/gha/remove-cpu-cores-action
Browse files Browse the repository at this point in the history
  • Loading branch information
akashchi authored Mar 21, 2024
2 parents d27c683 + 9072ea7 commit 232ccd7
Show file tree
Hide file tree
Showing 53 changed files with 1,742 additions and 512 deletions.
17 changes: 10 additions & 7 deletions src/bindings/python/src/openvino/frontend/pytorch/patch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,7 @@ def __exit__(self, *args):


def patch_model(model, module_extensions, orig_forward_name):
for name, m in model.named_modules():
if hasattr(m, orig_forward_name):
# already patched, skipping with a warning because it is unexpected
print(f'[ WARNING ] Unexpectedly found already patched module {name} while applying ModuleExtension during PyTorch model conversion. '
'Result of the conversion maybe broken. Depending on the exact issue it may lead to broken original model.')
continue
def module_patcher(m, name):
extension = None
if m in module_extensions:
extension = module_extensions[m]
Expand Down Expand Up @@ -54,7 +49,7 @@ def forward(*args, **kwargs):
m.forward = getattr(m, orig_forward_name)
# call user code
results = extension.evaluate(
m, *Trampoline.stashed_args, **Trampoline.stashed_kwargs)
m, *Trampoline.stashed_args, **Trampoline.stashed_kwargs) # call user code
m.forward = patched_forward # return patched forward back
return results

Expand All @@ -65,6 +60,14 @@ def new_forward(*args, **kwargs):
setattr(m, orig_forward_name, m.forward)
m.forward = new_forward

for name, m in model.named_modules():
if hasattr(m, orig_forward_name):
# already patched, skipping with a warning because it is unexpected
print(f'[ WARNING ] Unexpectedly found already patched module {name} while applying ModuleExtension during PyTorch model conversion. '
'Result of the conversion maybe broken. Depending on the exact issue it may lead to broken original model.')
continue
module_patcher(m, name)


def unpatch_model(model, orig_forward_name):
for _, m in model.named_modules():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -607,7 +607,11 @@ bool fuse_type_to_parameter(const std::shared_ptr<ov::Node>& node,
auto convert = std::make_shared<opset4::Convert>(param, to);
for (auto& input : param_consumers) {
const auto consumer = input.get_node();
if (ov::is_type<ov::op::v0::Result>(consumer) || ov::is_type<ov::op::v0::Convert>(consumer)) {
if (ov::is_type<ov::op::v0::Result>(consumer) || ov::is_type<ov::op::v0::Convert>(consumer) ||
// TODO: refactor after ngraph op defined
// The fourth and fifth inputs are kvcache and should be directly connected to parameters
(consumer->get_type_name() == std::string("PagedAttentionExtension") &&
(input.get_index() == 3 || input.get_index() == 4))) {
continue;
}
input.replace_source_output(convert);
Expand Down
2 changes: 1 addition & 1 deletion src/core/src/op/divide.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ bool Divide::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
this,
outputs,
inputs,
OV_PP_ET_LIST(f32, i32, i64, u32, u64),
OV_PP_ET_LIST(f32, i8, i32, i64, u8, u32, u64),
divide::Evaluate,
inputs[0].get_element_type(),
inputs[0],
Expand Down
2 changes: 1 addition & 1 deletion src/core/src/op/multiply.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ bool Multiply::evaluate(TensorVector& outputs, const TensorVector& inputs) const
this,
outputs,
inputs,
OV_PP_ET_LIST(f32, f64, i32, i64, u32, u64),
OV_PP_ET_LIST(f32, f64, i8, i32, i64, u8, u32, u64),
multiply::Evaluate,
inputs[0].get_element_type(),
inputs[0],
Expand Down
2 changes: 1 addition & 1 deletion src/frontends/onnx/frontend/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ ov_add_frontend(NAME onnx
FILEDESCRIPTION "FrontEnd to load and convert ONNX file format"
LINK_LIBRARIES openvino_onnx_common openvino::core::dev)

set(ONNX_OPSET_VERSION 18 CACHE INTERNAL "Supported version of ONNX operator set")
set(ONNX_OPSET_VERSION 20 CACHE INTERNAL "Supported version of ONNX operator set")
target_compile_definitions(${TARGET_NAME} PRIVATE ONNX_OPSET_VERSION=${ONNX_OPSET_VERSION})

ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME}
Expand Down
46 changes: 45 additions & 1 deletion src/frontends/onnx/frontend/src/op/reduce.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include "identity.hpp"
#include "openvino/frontend/exception.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/convert.hpp"
#include "openvino/op/exp.hpp"
#include "openvino/op/log.hpp"
#include "openvino/op/multiply.hpp"
Expand Down Expand Up @@ -94,6 +95,27 @@ const std::set<element::Type> supported_types_v1 =
{element::u32, element::u64, element::i32, element::i64, element::f16, element::f32, element::f64};
const std::set<element::Type> supported_types_v2 =
{element::u32, element::u64, element::i32, element::i64, element::f16, element::f32, element::f64, element::bf16};
const std::set<element::Type> supported_types_v3 = {element::u32,
element::u64,
element::i32,
element::i64,
element::f16,
element::f32,
element::f64,
element::bf16,
element::i8,
element::u8};
const std::set<element::Type> supported_types_v4 = {element::u32,
element::u64,
element::i32,
element::i64,
element::f16,
element::f32,
element::f64,
element::bf16,
element::i8,
element::u8,
element::boolean};

template <typename OpType>
std::shared_ptr<ov::Node> make_ov_reduction_op(const Node& node,
Expand Down Expand Up @@ -177,11 +199,33 @@ namespace set_13 {
ov::OutputVector reduce_sum(const ov::frontend::onnx::Node& node) {
return {make_ov_reduction_op<v1::ReduceSum>(node, node.get_ov_inputs().at(0), supported_types_v2, false)};
}
ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node) {
return {make_ov_reduction_op<v1::ReduceMax>(node, node.get_ov_inputs().at(0), supported_types_v3)};
}
} // namespace set_13

namespace set_18 {
// Placeholder
ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node) {
return {make_ov_reduction_op<v1::ReduceMax>(node, node.get_ov_inputs().at(0), supported_types_v3, false)};
}
} // namespace set_18

namespace set_20 {
ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node) {
auto data = node.get_ov_inputs().at(0);
if (data.get_element_type() != element::boolean) {
return {make_ov_reduction_op<v1::ReduceMax>(node, data, supported_types_v3, false)};
} else {
// Handling boolean as a uint8
return {std::make_shared<v0::Convert>(
make_ov_reduction_op<v1::ReduceMax>(node,
std::make_shared<ov::op::v0::Convert>(data, element::u8),
supported_types_v4,
false),
element::boolean)};
}
}
} // namespace set_20
} // namespace op
} // namespace onnx
} // namespace frontend
Expand Down
9 changes: 9 additions & 0 deletions src/frontends/onnx/frontend/src/op/reduce.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,15 @@ ov::OutputVector reduce_l2(const ov::frontend::onnx::Node& node);
namespace set_1 {
ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node);
} // namespace set_1
namespace set_13 {
ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node);
} // namespace set_13
namespace set_18 {
ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node);
} // namespace set_18
namespace set_20 {
ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node);
} // namespace set_20

namespace set_1 {
ov::OutputVector reduce_mean(const ov::frontend::onnx::Node& node);
Expand Down
3 changes: 3 additions & 0 deletions src/frontends/onnx/frontend/src/ops_bridge.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -484,6 +484,9 @@ OperatorsBridge::OperatorsBridge() {
REGISTER_OPERATOR("ReduceL1", 1, reduce_l1);
REGISTER_OPERATOR("ReduceL2", 1, reduce_l2);
REGISTER_OPERATOR("ReduceMax", 1, reduce_max);
REGISTER_OPERATOR("ReduceMax", 13, reduce_max);
REGISTER_OPERATOR("ReduceMax", 18, reduce_max);
REGISTER_OPERATOR("ReduceMax", 20, reduce_max);
REGISTER_OPERATOR("ReduceMean", 1, reduce_mean);
REGISTER_OPERATOR("ReduceMin", 1, reduce_min);
REGISTER_OPERATOR("ReduceProd", 1, reduce_prod);
Expand Down
2 changes: 0 additions & 2 deletions src/frontends/onnx/tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True):
xfail_issue_99954 = xfail_test(reason="Constant Pad - RuntimeError: Shape inference of Reference node with name y failed")
xfail_issue_99955 = xfail_test(reason="GroupNorm is not supported")
xfail_issue_99957 = xfail_test(reason="LayerNorm - RuntimeError: While validating node '<Node(Reshape): Mean>'")
xfail_issue_99958 = xfail_test(reason="LogSoftmax - Results mismatch")
xfail_issue_99960 = xfail_test(reason="MVN - Results mismatch")
xfail_issue_99961 = xfail_test(reason="Optional has/get element operators are not supported)'")
xfail_issue_99962 = pytest.mark.skip(reason="ReduceL1/L2 - Unrecognized attribute: axes for operator ReduceL1/L2")
Expand All @@ -71,7 +70,6 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True):
xfail_issue_99970 = xfail_test(reason="Scatter and ScatterND - RuntimeError: Check '(reduction == none)' failed at "
"src/frontends/onnx/frontend/src/op/scatter_elements.cpp OR at "
"src/frontends/onnx/frontend/src/op/scatter_nd")
xfail_issue_99972 = xfail_test(reason="Softmax - Results mismatch")
xfail_issue_99973 = xfail_test(reason="Split - RuntimeError: While validating ONNX node "
"'<Node(Split): output_1, output_2, output_3, output_4>'")
xfail_issue_38710 = xfail_test(reason="RuntimeError: data has zero dimension which is not allowed")
Expand Down
68 changes: 68 additions & 0 deletions src/frontends/onnx/tests/models/reduce_max_18.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
ir_version: 3
producer_name: "OpenVINO ONNX Frontend"
graph {
node {
input: "A"
input: "axes"
output: "B"
op_type: "ReduceMax"
}
name: "compute_graph"
initializer {
data_type: 6
dims: 1
name: "axes"
raw_data: "\002\000\000\000"
}
input {
name: "A"
type {
tensor_type {
elem_type: 2
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 4
}
dim {
dim_value: 4
}
}
}
}
}
input {
name: "axes"
type {
tensor_type {
elem_type: 6
shape {
dim {
dim_value: 1
}
}
}
}
}
output {
name: "B"
type {
tensor_type {
elem_type: 2
shape {
dim {
dim_value: 1
}
}
}
}
}
}
opset_import {
version: 18
}
48 changes: 48 additions & 0 deletions src/frontends/onnx/tests/models/reduce_wrong_type_v3.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
ir_version: 3
producer_name: "OpenVINO ONNX Frontend"
graph {
node {
input: "A"
output: "B"
op_type: "ReduceMax"
}
name: "compute_graph"
input {
name: "A"
type {
tensor_type {
elem_type: 9
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 4
}
dim {
dim_value: 4
}
}
}
}
}
output {
name: "B"
type {
tensor_type {
elem_type: 9
shape {
dim {
dim_value: 1
}
}
}
}
}
}
opset_import {
version: 13
}
48 changes: 48 additions & 0 deletions src/frontends/onnx/tests/models/reduce_wrong_type_v4.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
ir_version: 3
producer_name: "OpenVINO ONNX Frontend"
graph {
node {
input: "A"
output: "B"
op_type: "ReduceMax"
}
name: "compute_graph"
input {
name: "A"
type {
tensor_type {
elem_type: 9
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 4
}
dim {
dim_value: 4
}
}
}
}
}
output {
name: "B"
type {
tensor_type {
elem_type: 9
shape {
dim {
dim_value: 1
}
}
}
}
}
}
opset_import {
version: 20
}
22 changes: 22 additions & 0 deletions src/frontends/onnx/tests/onnx_import.in.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -974,6 +974,28 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_max) {
test_case.run();
}

OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_max_18) {
// TEMPLATE plugin has an issue with evaluation for u8 type
if (std::string("${BACKEND_NAME}") == std::string("INTERPRETER")) {
GTEST_SKIP();
}

auto model = convert_model("reduce_max_18.onnx");

// input data shape (1, 1, 4, 4)
std::vector<std::vector<uint8_t>> inputs{
ov::test::NDArray<uint8_t, 4>({{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}, {13, 14, 15, 16}}}})
.get_vector()};

// output data shape (1,)
auto expected_output = ov::test::NDArray<uint8_t, 1>({13, 14, 15, 16}).get_vector();

auto test_case = ov::test::TestCase(model, s_device);
test_case.add_multiple_inputs(inputs);
test_case.add_expected_output(expected_output);
test_case.run();
}

OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_max_invalid_axes) {
EXPECT_THROW(convert_model("reduce_max_invalid_axes.onnx"), ov::Exception);
}
Expand Down
Loading

0 comments on commit 232ccd7

Please sign in to comment.