Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CPU] Dynamic shapes test coverage #3

Open
wants to merge 13 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,11 @@ const std::vector<std::map<std::string, std::string>> configs = {

INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestDynamicTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(ngraph::builder::subgraph::makeSplitConvConcat()),
::testing::Values(std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>>{{{1, 4, 20, 20}, {1, 10, 18, 18}},
{{2, 4, 20, 20}, {2, 10, 18, 18}}}),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
InferRequestDynamicTests::getTestCaseName);

} // namespace

12 changes: 0 additions & 12 deletions inference-engine/src/cldnn_engine/cldnn_engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -308,18 +308,6 @@ InferenceEngine::CNNNetwork clDNNEngine::CloneAndTransformNetwork(const Inferenc
return true;
});

pass_config->set_callback<ngraph::pass::ConvertNMS1ToNMS5,
ngraph::pass::ConvertNMS3ToNMS5,
ngraph::pass::ConvertNMS4ToNMS5,
ngraph::pass::ConvertNMSToNMSIEInternal>(
[](const_node_ptr &node) -> bool {
return node->input_value(0).get_shape().back() == 4lu &&
node->input_value(0).get_shape().front() == node->input_value(1).get_shape().front() &&
node->input_value(0).get_shape()[1] == node->input_value(1).get_shape().back() &&
node->input_value(0).get_shape().size() == 3lu &&
node->input_value(1).get_shape().size() == 3lu;
});

pass_config->set_callback<ngraph::pass::MVN6Decomposition>(
[](const_node_ptr &node) -> bool {
const auto mvn = std::dynamic_pointer_cast<const ngraph::op::v6::MVN>(node);
Expand Down
395 changes: 0 additions & 395 deletions inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.cpp

This file was deleted.

88 changes: 0 additions & 88 deletions inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.h

This file was deleted.

49 changes: 49 additions & 0 deletions inference-engine/src/mkldnn_plugin/cpu_shape.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "cpu_shape.h"
#include "utils/general_utils.h"
#include "memory_desc/cpu_memory_desc_utils.h"

using namespace MKLDNNPlugin;

bool Shape::isCompatible(const VectorDims &vecDims) const {
if (getRank() != vecDims.size()) {
return false;
}

auto comparator = [](Dim lhs, Dim rhs) {
return (lhs == rhs) || (lhs == Shape::UNDEFINED_DIM);
};

if (!std::equal(getDims().begin(), getDims().end(), vecDims.begin(), comparator)) {
return false;
}

if (!std::equal(getMaxDims().begin(), getMaxDims().end(), vecDims.begin(), [](Dim lhs, Dim rhs) { return lhs >= rhs; })) {
return false;
}

if (!std::equal(getMinDims().begin(), getMinDims().end(), vecDims.begin(), [](Dim lhs, Dim rhs) { return lhs <= rhs; })) {
return false;
}
return true;
}

std::string Shape::toString() const {
std::stringstream output;
output << "{";

size_t i = 0;
do {
if (dims[i] == Shape::UNDEFINED_DIM) {
output << MemoryDescUtils::dim2str(minDims[i]) << " - " << MemoryDescUtils::dim2str(maxDims[i]);
} else {
output << dims[i];
}
} while (++i < dims.size() && output << ", ");

output << "}";
return output.str();
}
49 changes: 33 additions & 16 deletions inference-engine/src/mkldnn_plugin/cpu_shape.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
#include <utility>
#include <ie_common.h>
#include <ngraph/partial_shape.hpp>
#include "mkldnn_dims.h"
#include "cpu_types.h"

namespace MKLDNNPlugin {

Expand All @@ -19,7 +19,9 @@ class Shape {

explicit Shape(const ngraph::PartialShape& shape) {
minDims = shape.get_min_shape();
std::transform(minDims.begin(), minDims.end(), minDims.begin(), [](Dim x){ return ngraph::Interval::s_max == x ? UNDEFINED_DIM : x;});
maxDims = shape.get_max_shape();
std::transform(maxDims.begin(), maxDims.end(), maxDims.begin(), [](Dim x){ return ngraph::Interval::s_max == x ? UNDEFINED_DIM : x;});
type = shape.is_static() ? ShapeType::Static : ShapeType::Dynamic;

initDims();
Expand All @@ -34,7 +36,7 @@ class Shape {
}

/**
* @brief
* @brief
* for static shape
* maxDims = [2, 3, 4, 5]
* minDims = [2, 3, 4, 5]
Expand All @@ -46,12 +48,12 @@ class Shape {
* dims = [UNDEFINED_DIM, UNDEFINED_DIM, UNDEFINED_DIM, UNDEFINED_DIM]
* @return return lower bound of shape = [1, 1, 1, 1]
*/
const std::vector<size_t>& getMinDims() const {
const VectorDims& getMinDims() const {
return minDims;
}

/**
* @brief
* @brief
* for static shape
* maxDims = [2, 3, 4, 5]
* minDims = [2, 3, 4, 5]
Expand All @@ -63,15 +65,15 @@ class Shape {
* dims = [UNDEFINED_DIM, UNDEFINED_DIM, UNDEFINED_DIM, UNDEFINED_DIM]
* @return return upper bound of shape = [6, 6, 6, 6]
*/
const std::vector<size_t>& getMaxDims() const {
const VectorDims& getMaxDims() const {
return maxDims;
}

/**
* @brief return defined shape or throw exception for dynamic case
* @brief return defined shape or throw exception for dynamic case
* @return return shape
*/
const std::vector<size_t>& getStaticDims() const {
const VectorDims& getStaticDims() const {
if (type != ShapeType::Static) {
IE_THROW() << "Cannot get dims for non static shape";
}
Expand All @@ -80,7 +82,7 @@ class Shape {
}

/**
* @brief
* @brief
* for static shape
* maxDims = [2, 3, 4, 5]
* minDims = [2, 3, 4, 5]
Expand All @@ -92,13 +94,18 @@ class Shape {
* dims = [2, 3, UNDEFINED_DIM, UNDEFINED_DIM]
* @return return shape with defined and undefined dims = [2, 3, UNDEFINED_DIM, UNDEFINED_DIM]
*/
const std::vector<size_t>& getDims() const {
const VectorDims& getDims() const {
return dims;
}

bool isStatic() const {
return type == ShapeType::Static;
}

bool isDynamic() const {
return type == ShapeType::Dynamic;
}

size_t getRank() const {
return minDims.size();
}
Expand All @@ -118,14 +125,21 @@ class Shape {
}

ngraph::PartialShape toPartialShape() const {
std::vector<ngraph::Dimension> nGraphDims;
using ngraph::Dimension;
std::vector<Dimension> nGraphDims;
nGraphDims.reserve(minDims.size());
for (int i = 0; i < minDims.size(); i++) {
nGraphDims.emplace_back(minDims[i], maxDims[i]);
Dimension::value_type minDim = Shape::UNDEFINED_DIM == minDims[i] ? -1 : minDims[i];
Dimension::value_type maxDim = Shape::UNDEFINED_DIM == maxDims[i] ? -1 : maxDims[i];
nGraphDims.emplace_back(minDim, maxDim);
}
return ngraph::PartialShape(nGraphDims);
}

bool isCompatible(const VectorDims& vecDims) const;

std::string toString() const;

bool operator == (const Shape& rhs) const {
return minDims == rhs.minDims && maxDims == rhs.maxDims;
}
Expand All @@ -134,7 +148,11 @@ class Shape {
return !(*this == rhs);
}

enum : size_t {
bool hasDefinedUpperBounds() const {
return std::all_of(maxDims.begin(), maxDims.end(), [](Dim dim){ return dim != UNDEFINED_DIM; });
}

enum : Dim {
UNDEFINED_DIM = 0xffffffffffffffff
};

Expand All @@ -151,9 +169,8 @@ class Shape {
Dynamic
} type {ShapeType::Static};

std::vector<size_t> minDims;
std::vector<size_t> maxDims;
std::vector<size_t> dims;
VectorDims minDims;
VectorDims maxDims;
VectorDims dims;
};

} // namespace MKLDNNPlugin
5 changes: 5 additions & 0 deletions inference-engine/src/mkldnn_plugin/cpu_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,13 @@

#pragma once

#include <vector>

namespace MKLDNNPlugin {

using Dim = std::size_t;
using VectorDims = std::vector<Dim>;

enum Type {
Unknown,
Generic,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "blocked_memory_desc.h"
#include "utils/general_utils.h"

using namespace MKLDNNPlugin;

bool BlockedMemoryDesc::isCompatible(const BlockedMemoryDesc &rhs) const {
if (this->getShape() != rhs.getShape() || this->getPrecision() != rhs.getPrecision())
return false;

if (!dimsEqualWeak(this->getBlockDims(), rhs.getBlockDims())) {
return false;
}

if (!dimsEqualWeak(this->getOffsetPaddingToData(), rhs.getOffsetPaddingToData())) {
return false;
}

// this check needed to avoid inserting unnecessary reorders if the memory is used in place and the batch size is equal to 1
size_t skipAxis = this->getShape().getRank() > 0 && this->getShape().getDims().front() == 1 ? 0 :
Shape::UNDEFINED_DIM; //ignore batch axis if batch size == 1
if (!dimsEqualWeak(this->getStrides(), rhs.getStrides(), skipAxis)) {
return false;
}

if (!dimsEqualWeak(this->getOrder(), rhs.getOrder())) {
return false;
}

return dimsEqualWeak(this->getOffsetPadding(), rhs.getOffsetPadding());
}
Loading