Skip to content

Commit

Permalink
Merge branch 'master' into dependencies-update
Browse files Browse the repository at this point in the history
  • Loading branch information
ilya-lavrenov authored Aug 12, 2024
2 parents 49521bc + e567c9e commit 3d1b5f3
Show file tree
Hide file tree
Showing 29 changed files with 1,803 additions and 982 deletions.
14 changes: 13 additions & 1 deletion src/plugins/intel_cpu/src/nodes/interpolate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2021,7 +2021,13 @@ void Interpolate::initSupportedPrimitiveDescriptors() {
return;

ov::element::Type inputPrecision = getOriginalInputPrecisionAtPort(DATA_ID);
if ((inputPrecision != ov::element::i8) && (inputPrecision != ov::element::u8) && (inputPrecision != ov::element::bf16)) {

#if defined(OV_CPU_WITH_ACL)
bool isInputPrecisionSupported = one_of(inputPrecision, ov::element::i8, ov::element::u8, ov::element::f16);
#else
bool isInputPrecisionSupported = one_of(inputPrecision, ov::element::i8, ov::element::u8, ov::element::bf16);
#endif
if (!isInputPrecisionSupported) {
inputPrecision = ov::element::f32;
}

Expand All @@ -2039,9 +2045,11 @@ void Interpolate::initSupportedPrimitiveDescriptors() {
outputPrecision = fusedWith[fusedWith.size() - 1]->getOriginalOutputPrecisionAtPort(DATA_ID);
}

#if !defined(OV_CPU_WITH_ACL)
if (!mayiuse(cpu::x64::sse41)) {
inputPrecision = outputPrecision = ov::element::f32;
}
#endif

auto targetShapeType = ov::element::i32;
auto scalesType = ov::element::f32;
Expand Down Expand Up @@ -2115,6 +2123,8 @@ void Interpolate::initSupportedPrimitiveDescriptors() {
canUseAclExecutor = !supportedPrimitiveDescriptors.empty();
if (canUseAclExecutor)
return;
//fallback to f32 if ref is used
inputPrecision = outputPrecision = ov::element::f32;
#endif

if (dataRank == 4) {
Expand Down Expand Up @@ -2147,6 +2157,8 @@ void Interpolate::initSupportedPrimitiveDescriptors() {
canUseAclExecutor = !supportedPrimitiveDescriptors.empty();
if (canUseAclExecutor)
return;
//fallback to f32 if ref is used
inputPrecision = outputPrecision = ov::element::f32;
#endif

if (!mayiuse(cpu::x64::sse41) || interpAttrs.mode == InterpolateMode::linear) {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "execution_graph_tests/duplicate_inputs_outputs_names.hpp"

#include "common_test_utils/test_constants.hpp"

using namespace ExecutionGraphTests;

namespace {

INSTANTIATE_TEST_SUITE_P(smoke_duplicateInputsOutputsNames,
ExecGraphDuplicateInputsOutputsNames,
::testing::Values(ov::test::utils::DEVICE_CPU),
ExecGraphDuplicateInputsOutputsNames::getTestCaseName);

} // namespace
5 changes: 5 additions & 0 deletions src/plugins/intel_gpu/src/graph/program_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -988,6 +988,11 @@ void program_node::load(cldnn::BinaryInputBuffer& ib) {
ib >> fused_prims_onednn[idx].dims;
ib >> make_data(&fused_prims_onednn[idx].dt, sizeof(dnnl::memory::data_type));
}

// added a dummpy onednn_attrs to prevent initializing it for non-onednn impls
if (impl_type != impl_types::onednn) {
onednn_attrs = std::make_shared<dnnl::primitive_attr>();
}
#endif // ENABLE_ONEDNN_FOR_GPU
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,24 @@ ConvertMatMulToFullyConnected::ConvertMatMulToFullyConnected() {
return false;
}

// fc_input_a and fc_input_b - are the final inputs that will be set to FullyConnected.
// So in case of adding new operations that takes matmul inputs we need keep update fc_input_a and fc_input_b.
auto fc_input_a = pattern_map.at(activations_m);
auto fc_input_b = pattern_map.at(weights_m);

// Not to convert fc_input_b which should be fc_input_a of other sibling MatMul
auto input_b = fc_input_b.get_node_shared_ptr();
for (auto& user : input_b->get_users()) {
if (user != matmul && ov::is_type<ov::op::v0::MatMul>(user) && ov::is_type<ov::op::v0::Convert>(input_b)) {
auto other_matmul = std::dynamic_pointer_cast<ov::op::v0::MatMul>(user);
// Transpose for input_b generates invalid input for other sibling matmul
if (input_b == other_matmul->get_input_node_shared_ptr(0) || fc_input_b == fc_input_a ||
(input_b == other_matmul->get_input_node_shared_ptr(1) && matmul->get_transpose_b() != other_matmul->get_transpose_b())) {
return false;
}
}
}

// fc_input_a and fc_input_b - are the final inputs that will be set to FullyConnected.
// So in case of adding new operations that takes matmul inputs we need keep update fc_input_a and fc_input_b.
bool is_convert = false;
if (auto convert_node = std::dynamic_pointer_cast<ov::op::v0::Convert>(fc_input_b.get_node_shared_ptr())) {
is_convert = true;
Expand Down
Loading

0 comments on commit 3d1b5f3

Please sign in to comment.