Skip to content

Commit

Permalink
[CPU] Remove BatchNorm node. (#22)
Browse files Browse the repository at this point in the history
  • Loading branch information
nshchego authored and mandrono committed Apr 30, 2021
1 parent 32f681e commit 0824bad
Show file tree
Hide file tree
Showing 9 changed files with 0 additions and 397 deletions.
1 change: 0 additions & 1 deletion inference-engine/src/mkldnn_plugin/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ endif()

## TODO
set(LAYERS
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_batchnorm_node.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_bin_conv_node.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_concat_node.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_conv_node.cpp
Expand Down
12 changes: 0 additions & 12 deletions inference-engine/src/mkldnn_plugin/mkldnn_descriptor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,18 +23,6 @@ size_t MKLDNNDescriptor::outputNumbers() const {
return 1;
}

MKLDNNDescriptor::MKLDNNDescriptor(std::shared_ptr<mkldnn::batch_normalization_forward::desc> desc) {
this->desc.reset(new DescFwdImpl<mkldnn::batch_normalization_forward::desc>(desc));
}

MKLDNNDescriptor::operator std::shared_ptr<mkldnn::batch_normalization_forward::desc>() {
auto typeDesc = std::dynamic_pointer_cast<DescFwdImpl<mkldnn::batch_normalization_forward::desc>>(desc);
if (typeDesc == nullptr) {
IE_THROW() << "Cannot cast descriptor!";
}
return typeDesc->getPtr();
}

MKLDNNDescriptor::MKLDNNDescriptor(std::shared_ptr<mkldnn::convolution_forward::desc> desc) {
this->desc.reset(new DescFwdImpl<mkldnn::convolution_forward::desc>(desc));
}
Expand Down
3 changes: 0 additions & 3 deletions inference-engine/src/mkldnn_plugin/mkldnn_descriptor.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,6 @@

class MKLDNNDescriptor {
public:
explicit MKLDNNDescriptor(std::shared_ptr<mkldnn::batch_normalization_forward::desc> desc);
operator std::shared_ptr<mkldnn::batch_normalization_forward::desc>();

explicit MKLDNNDescriptor(std::shared_ptr<mkldnn::convolution_forward::desc> desc);
operator std::shared_ptr<mkldnn::convolution_forward::desc>();

Expand Down
1 change: 0 additions & 1 deletion inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,6 @@ bool MKLDNNExecNetwork::CanProcessDynBatch(const InferenceEngine::CNNNetwork &ne
// type != Concatenation &&
// type != Eltwise &&
// type != Crop &&
// type != BatchNormalization &&
// type != Copy) {
// check_result = false;
// }
Expand Down
43 changes: 0 additions & 43 deletions inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,15 +103,6 @@ void MKLDNNGraphOptimizer::ApplyCommonGraphOptimizations(MKLDNNGraph &graph) {
FuseBinaryConvolutionAndQuantize(graph);
graph.RemoveDroppedNodes();

OV_ITT_SCOPE_NEXT(FIRST_INFERENCE, taskChain, "FuseBatchNormWithScale");
FuseBatchNormWithScale(graph);
graph.RemoveDroppedNodes();

// OV_ITT_SCOPE_NEXT(FIRST_INFERENCE, taskChain, "RemoveIdentityOperator");
// RemoveIdentityOperator(graph);
// graph.RemoveDroppedNodes();

OV_ITT_SCOPE_NEXT(FIRST_INFERENCE, taskChain, "FuseConvolutionSumAndConvolutionSumActivation");
FuseConvolutionSumAndConvolutionSumActivation(graph);
graph.RemoveDroppedNodes();

Expand Down Expand Up @@ -714,40 +705,6 @@ void MKLDNNGraphOptimizer::MergeTwoEqualScaleShifts(MKLDNNGraph& graph) {
// }
}

void MKLDNNGraphOptimizer::FuseBatchNormWithScale(MKLDNNGraph &graph) {
// auto &graphNodes = graph.GetNodes();
//
// for (int i = 0; i < graphNodes.size(); i++) {
// const auto& bn = graphNodes[i];
// if (bn->getType() == BatchNormalization) {
// const auto& outputNodesMap = graph.GetOutputNodesMap();
// const std::string node_name = bn->getName();
// // Check that the node is not output node
// if (std::find_if(outputNodesMap.begin(), outputNodesMap.end(),
// [&node_name](const MKLDNNNodePtr& x) {
// return x->getName() == node_name;}) == outputNodesMap.end()) {
// if (bn->getChildEdges().size() == 1) {
// auto child = bn->getChildEdgeAt(0)->getChild();
// if (child->type == Eltwise && child->getCnnLayer()->type == "ScaleShift") {
// bn->fuseWith(child);
//
// auto parentEdges = child->parentEdges;
// for (auto &parentEdge : parentEdges) {
// auto p_edge = parentEdge.lock();
// if (p_edge->getParent()->getType() == BatchNormalization)
// continue;
//
// removeEdge(graph, p_edge);
// }
//
// graph.DropNode(child);
// }
// }
// }
// }
// }
}

void MKLDNNGraphOptimizer::FuseFullyConnectedAndSimpleOperation(MKLDNNGraph &graph) {
auto& graphNodes = graph.GetNodes();

Expand Down
2 changes: 0 additions & 2 deletions inference-engine/src/mkldnn_plugin/mkldnn_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
#include <cstdint>
#include <unordered_map>

#include <nodes/mkldnn_batchnorm_node.h>
#include <nodes/mkldnn_concat_node.h>
#include <nodes/mkldnn_conv_node.h>
#include <nodes/mkldnn_deconv_node.h>
Expand Down Expand Up @@ -167,7 +166,6 @@ static const InferenceEngine::details::caseless_unordered_map<std::string, Type>
// { "SimplerNMS", SimplerNMS },
// { "ROIAlign", ROIAlign },
// { "ROIPooling", ROIPooling },
// { "BatchNormalization", BatchNormalization },
// { "Flatten", Flatten },
{ "Pad", Pad },
{ "Transpose", Transpose },
Expand Down
3 changes: 0 additions & 3 deletions inference-engine/src/mkldnn_plugin/mkldnn_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@ enum Type {
SimplerNMS,
ROIAlign,
ROIPooling,
BatchNormalization,
DepthToSpace,
Flatten,
Pad,
Expand Down Expand Up @@ -201,8 +200,6 @@ static std::string NameFromType(Type type) {
return "ROIAlign";
case ROIPooling:
return "ROIPooling";
case BatchNormalization:
return "BatchNormalization";
case DepthToSpace:
return "DepthToSpace";
case Flatten:
Expand Down
Loading

0 comments on commit 0824bad

Please sign in to comment.