diff --git a/cmake/developer_package/add_ie_target.cmake b/cmake/developer_package/add_ie_target.cmake index d44149383c7609..d49f16a4db0e49 100644 --- a/cmake/developer_package/add_ie_target.cmake +++ b/cmake/developer_package/add_ie_target.cmake @@ -31,6 +31,7 @@ addIeTarget( function(addIeTarget) set(options ADD_CPPLINT # Enables code style checks for the target + ADD_CLANG_FORMAT # Enables code style checks for the target ) set(oneValueRequiredArgs TYPE # type of target, SHARED|STATIC|EXECUTABLE. SHARED and STATIC correspond to add_library, EXECUTABLE to add_executable @@ -119,6 +120,10 @@ function(addIeTarget) # code style add_cpplint_target(${ARG_NAME}_cpplint FOR_TARGETS ${ARG_NAME}) endif() + if (ARG_ADD_CLANG_FORMAT) + # code style + add_clang_format_target(${ARG_NAME}_clang FOR_TARGETS ${ARG_NAME}) + endif() if (ARG_DEVELOPER_PACKAGE) # developer package openvino_developer_export_targets(COMPONENT ${ARG_DEVELOPER_PACKAGE} @@ -128,7 +133,6 @@ function(addIeTarget) # Provide default compile pdb name equal to target name set_target_properties(${ARG_NAME} PROPERTIES COMPILE_PDB_NAME ${ARG_NAME}) endif() - endfunction() #[[ diff --git a/cmake/developer_package/plugins/plugins.cmake b/cmake/developer_package/plugins/plugins.cmake index faafb8e99167eb..aae1dbb7fb7759 100644 --- a/cmake/developer_package/plugins/plugins.cmake +++ b/cmake/developer_package/plugins/plugins.cmake @@ -27,7 +27,10 @@ endif() # ) # function(ie_add_plugin) - set(options SKIP_INSTALL) + set(options + SKIP_INSTALL + ADD_CLANG_FORMAT + ) set(oneValueArgs NAME DEVICE_NAME VERSION_DEFINES_FOR) set(multiValueArgs SOURCES OBJECT_LIBRARIES CPPLINT_FILTERS) cmake_parse_arguments(IE_PLUGIN "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) @@ -73,7 +76,11 @@ function(ie_add_plugin) string(CONCAT custom_filter "${custom_filter}" "," "${filter}") endforeach() - add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter}) + if (IE_PLUGIN_ADD_CLANG_FORMAT) + add_clang_format_target(${IE_PLUGIN_NAME}_clang FOR_TARGETS ${IE_PLUGIN_NAME}) + else() + add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter}) + endif() # check that plugin with such name is not registered diff --git a/docs/.clang-format b/docs/.clang-format new file mode 100644 index 00000000000000..c93e6254b5b855 --- /dev/null +++ b/docs/.clang-format @@ -0,0 +1,25 @@ +BasedOnStyle: Google +IndentWidth: 4 +UseTab: Never + +Language: Cpp +Standard: Cpp11 + +AccessModifierOffset: -4 +AlignConsecutiveMacros: true +AllowAllArgumentsOnNextLine: false +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortFunctionsOnASingleLine: Empty +AllowShortIfStatementsOnASingleLine: Never +AllowShortLambdasOnASingleLine: Empty +AllowShortLoopsOnASingleLine: false +AlwaysBreakBeforeMultilineStrings: false +ColumnLimit: 160 +# Specialize this comment pragma in order to avoid changes in SEA copyrights +CommentPragmas: '^#' +DerivePointerAlignment: false +FixNamespaceComments: true +IndentCaseLabels: false +IndentPPDirectives: BeforeHash +SpaceBeforeCpp11BracedList: true +SpaceBeforeCtorInitializerColon: false \ No newline at end of file diff --git a/docs/onnx_custom_op/CMakeLists.txt b/docs/onnx_custom_op/CMakeLists.txt index 8446846dcfe072..f38ead369d8a5e 100644 --- a/docs/onnx_custom_op/CMakeLists.txt +++ b/docs/onnx_custom_op/CMakeLists.txt @@ -9,7 +9,10 @@ set(TARGET_NAME "onnx_custom_op") find_package(ngraph REQUIRED COMPONENTS onnx_importer) -add_library(${TARGET_NAME} STATIC onnx_custom_op.cpp) +add_library(${TARGET_NAME} STATIC onnx_custom_op.cpp onnx_custom_op.hpp) target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES} ${ONNX_IMPORTER_LIBRARIES}) # [cmake:onnx_custom_op] + +# Enable code style check +add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) diff --git a/docs/template_extension/CMakeLists.txt b/docs/template_extension/CMakeLists.txt index 9224383ffd62f5..a6e7527e55fe21 100644 --- a/docs/template_extension/CMakeLists.txt +++ b/docs/template_extension/CMakeLists.txt @@ -33,3 +33,7 @@ if (ngraph_onnx_importer_FOUND) target_compile_definitions(${TARGET_NAME} PRIVATE NGRAPH_ONNX_IMPORT_ENABLED) endif() # [cmake:extension] + +# Enable code style check +file(GLOB_RECURSE template_extension_src "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/*.hpp") +add_clang_format_target(${TARGET_NAME}_clang FOR_SOURCES ${template_extension_src}) diff --git a/docs/template_extension/cpu_kernel.cpp b/docs/template_extension/cpu_kernel.cpp index 9469094e4c10bc..aa2486589cbff2 100644 --- a/docs/template_extension/cpu_kernel.cpp +++ b/docs/template_extension/cpu_kernel.cpp @@ -3,13 +3,15 @@ // #include "cpu_kernel.hpp" -#include "op.hpp" + #include +#include "op.hpp" + using namespace TemplateExtension; //! [cpu_implementation:ctor] -OpImplementation::OpImplementation(const std::shared_ptr &node) { +OpImplementation::OpImplementation(const std::shared_ptr& node) { try { auto castedNode = std::dynamic_pointer_cast(node); if (!castedNode) @@ -32,8 +34,8 @@ OpImplementation::OpImplementation(const std::shared_ptr &node) { //! [cpu_implementation:ctor] //! [cpu_implementation:getSupportedConfigurations] -InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector &conf, - InferenceEngine::ResponseDesc *resp) noexcept { +InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector& conf, + InferenceEngine::ResponseDesc* resp) noexcept { auto createConfig = [](const InferenceEngine::SizeVector inShape, const InferenceEngine::SizeVector& outShape, bool planar) { InferenceEngine::LayerConfig config; config.dynBatchSupport = false; @@ -72,7 +74,7 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve if (!error.empty()) { if (resp) { strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1); - resp->msg[sizeof(resp->msg)-1] = 0; + resp->msg[sizeof(resp->msg) - 1] = 0; } return InferenceEngine::GENERAL_ERROR; } @@ -85,25 +87,24 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve //! [cpu_implementation:getSupportedConfigurations] //! [cpu_implementation:init] -InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig &config, InferenceEngine::ResponseDesc *resp) noexcept { +InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept { try { if (config.inConfs.size() != 1 || config.outConfs.size() != 1) { IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!"; } if (config.inConfs[0].desc.getDims().size() != 4 || config.outConfs[0].desc.getDims().size() != 4) { - IE_THROW() - << "Operation can be initialized only with 4d input/output tensors!"; + IE_THROW() << "Operation can be initialized only with 4d input/output tensors!"; } if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 || - config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) { + config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) { IE_THROW() << "Operation supports only FP32 precisions!"; } } catch (InferenceEngine::Exception& ex) { if (resp) { strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1); - resp->msg[sizeof(resp->msg)-1] = 0; + resp->msg[sizeof(resp->msg) - 1] = 0; } return InferenceEngine::GENERAL_ERROR; } @@ -113,11 +114,10 @@ InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig //! [cpu_implementation:init] //! [cpu_implementation:execute] -InferenceEngine::StatusCode OpImplementation::execute(std::vector &inputs, - std::vector &outputs, - InferenceEngine::ResponseDesc *resp) noexcept { - const float* src_data = inputs[0]->cbuffer().as() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); - float *dst_data = outputs[0]->buffer().as() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); +InferenceEngine::StatusCode OpImplementation::execute(std::vector& inputs, std::vector& outputs, + InferenceEngine::ResponseDesc* resp) noexcept { + const float* src_data = inputs[0]->cbuffer().as() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); + float* dst_data = outputs[0]->buffer().as() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); for (size_t i = 0; i < inputs[0]->size(); i++) { dst_data[i] = src_data[i] + add; diff --git a/docs/template_extension/cpu_kernel.hpp b/docs/template_extension/cpu_kernel.hpp index 692bbbbec307bc..901d33093b5079 100644 --- a/docs/template_extension/cpu_kernel.hpp +++ b/docs/template_extension/cpu_kernel.hpp @@ -5,6 +5,7 @@ #pragma once #include + #include namespace TemplateExtension { @@ -13,13 +14,12 @@ namespace TemplateExtension { class OpImplementation : public InferenceEngine::ILayerExecImpl { public: explicit OpImplementation(const std::shared_ptr& node); - InferenceEngine::StatusCode getSupportedConfigurations(std::vector &conf, - InferenceEngine::ResponseDesc *resp) noexcept override; - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config, - InferenceEngine::ResponseDesc *resp) noexcept override; - InferenceEngine::StatusCode execute(std::vector &inputs, - std::vector &outputs, - InferenceEngine::ResponseDesc *resp) noexcept override; + InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, + InferenceEngine::ResponseDesc* resp) noexcept override; + InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override; + InferenceEngine::StatusCode execute(std::vector& inputs, std::vector& outputs, + InferenceEngine::ResponseDesc* resp) noexcept override; + private: int64_t add; ngraph::Shape inShape; diff --git a/docs/template_extension/extension.cpp b/docs/template_extension/extension.cpp index d9baa69a059efb..7a0874f2bea8c8 100644 --- a/docs/template_extension/extension.cpp +++ b/docs/template_extension/extension.cpp @@ -3,15 +3,16 @@ // #include "extension.hpp" + #include "cpu_kernel.hpp" #include "op.hpp" #ifdef OPENCV_IMPORT_ENABLED -#include "fft_op.hpp" -#include "fft_kernel.hpp" + #include "fft_kernel.hpp" + #include "fft_op.hpp" #endif #include #ifdef NGRAPH_ONNX_IMPORT_ENABLED -#include + #include #endif #include @@ -21,22 +22,19 @@ using namespace TemplateExtension; - //! [extension:ctor] Extension::Extension() { #ifdef NGRAPH_ONNX_IMPORT_ENABLED - ngraph::onnx_import::register_operator( - Operation::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector { - ngraph::OutputVector ng_inputs{node.get_ng_inputs()}; - int64_t add = node.get_attribute_value("add"); - return {std::make_shared(ng_inputs.at(0), add)}; + ngraph::onnx_import::register_operator(Operation::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector { + ngraph::OutputVector ng_inputs {node.get_ng_inputs()}; + int64_t add = node.get_attribute_value("add"); + return {std::make_shared(ng_inputs.at(0), add)}; }); #ifdef OPENCV_IMPORT_ENABLED - ngraph::onnx_import::register_operator( - FFTOp::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector { - ngraph::OutputVector ng_inputs{node.get_ng_inputs()}; - bool inverse = node.get_attribute_value("inverse"); - return {std::make_shared(ng_inputs.at(0), inverse)}; + ngraph::onnx_import::register_operator(FFTOp::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector { + ngraph::OutputVector ng_inputs {node.get_ng_inputs()}; + bool inverse = node.get_attribute_value("inverse"); + return {std::make_shared(ng_inputs.at(0), inverse)}; }); #endif #endif @@ -47,19 +45,19 @@ Extension::Extension() { Extension::~Extension() { #ifdef NGRAPH_ONNX_IMPORT_ENABLED ngraph::onnx_import::unregister_operator(Operation::type_info.name, 1, "custom_domain"); -#ifdef OPENCV_IMPORT_ENABLED + #ifdef OPENCV_IMPORT_ENABLED ngraph::onnx_import::unregister_operator(FFTOp::type_info.name, 1, "custom_domain"); -#endif // OPENCV_IMPORT_ENABLED -#endif // NGRAPH_ONNX_IMPORT_ENABLED + #endif // OPENCV_IMPORT_ENABLED +#endif // NGRAPH_ONNX_IMPORT_ENABLED } //! [extension:dtor] //! [extension:GetVersion] -void Extension::GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept { +void Extension::GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept { static InferenceEngine::Version ExtensionDescription = { - {1, 0}, // extension API version + {1, 0}, // extension API version "1.0", - "template_ext" // extension description message + "template_ext" // extension description message }; versionInfo = &ExtensionDescription; @@ -80,7 +78,7 @@ std::map Extension::getOpSets() { //! [extension:getOpSets] //! [extension:getImplTypes] -std::vector Extension::getImplTypes(const std::shared_ptr &node) { +std::vector Extension::getImplTypes(const std::shared_ptr& node) { if (std::dynamic_pointer_cast(node)) { return {"CPU"}; } @@ -94,7 +92,7 @@ std::vector Extension::getImplTypes(const std::shared_ptr &node, const std::string &implType) { +InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr& node, const std::string& implType) { if (implType == "CPU") { if (std::dynamic_pointer_cast(node)) { return std::make_shared(node); @@ -110,16 +108,16 @@ InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ //! [extension:getImplementation] //! [extension:CreateExtension] -//Generate exported function +// Generate exported function IE_DEFINE_EXTENSION_CREATE_FUNCTION(Extension) //! [extension:CreateExtension] -INFERENCE_EXTENSION_API(InferenceEngine::StatusCode) InferenceEngine::CreateExtension(InferenceEngine::IExtension *&ext, - InferenceEngine::ResponseDesc *resp) noexcept { +INFERENCE_EXTENSION_API(InferenceEngine::StatusCode) +InferenceEngine::CreateExtension(InferenceEngine::IExtension*& ext, InferenceEngine::ResponseDesc* resp) noexcept { try { ext = new Extension(); return OK; - } catch (std::exception &ex) { + } catch (std::exception& ex) { if (resp) { std::string err = ((std::string) "Couldn't create extension: ") + ex.what(); err.copy(resp->msg, 255); diff --git a/docs/template_extension/extension.hpp b/docs/template_extension/extension.hpp index 24e731bcf2d297..0cc3b5816fe9ac 100644 --- a/docs/template_extension/extension.hpp +++ b/docs/template_extension/extension.hpp @@ -4,13 +4,14 @@ #pragma once -#include #include -#include +#include + +#include #include -#include +#include #include -#include +#include //! [extension:header] namespace TemplateExtension { diff --git a/docs/template_extension/fft_kernel.cpp b/docs/template_extension/fft_kernel.cpp index 8e37bdfce9fcb6..12554a70c75406 100644 --- a/docs/template_extension/fft_kernel.cpp +++ b/docs/template_extension/fft_kernel.cpp @@ -4,14 +4,16 @@ //! [fft_kernel:implementation] #include "fft_kernel.hpp" -#include "fft_op.hpp" + #include #include +#include "fft_op.hpp" + using namespace TemplateExtension; -FFTImpl::FFTImpl(const std::shared_ptr &node) { +FFTImpl::FFTImpl(const std::shared_ptr& node) { auto castedNode = std::dynamic_pointer_cast(node); if (!castedNode) IE_THROW() << "Cannot create implementation for unknown operation!"; @@ -26,8 +28,7 @@ FFTImpl::FFTImpl(const std::shared_ptr &node) { inverse = castedNode->inverse; } -InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector &conf, - InferenceEngine::ResponseDesc *resp) noexcept { +InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector& conf, InferenceEngine::ResponseDesc* resp) noexcept { std::vector inDataConfig; std::vector outDataConfig; InferenceEngine::SizeVector order(inpShape.size()); @@ -54,28 +55,27 @@ InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vectormsg, error.c_str(), sizeof(resp->msg) - 1); - resp->msg[sizeof(resp->msg)-1] = 0; + resp->msg[sizeof(resp->msg) - 1] = 0; } return InferenceEngine::GENERAL_ERROR; } return InferenceEngine::OK; } -static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) -{ +static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) { // NOTE: Inference Engine sizes are reversed. std::vector dims = blob->getTensorDesc().getDims(); std::vector size(dims.begin(), dims.end()); @@ -84,9 +84,8 @@ static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) return cv::Mat(size, CV_32F, (void*)blob->buffer()); } -InferenceEngine::StatusCode FFTImpl::execute(std::vector &inputs, - std::vector &outputs, - InferenceEngine::ResponseDesc *resp) noexcept { +InferenceEngine::StatusCode FFTImpl::execute(std::vector& inputs, std::vector& outputs, + InferenceEngine::ResponseDesc* resp) noexcept { cv::Mat inp = infEngineBlobToMat(inputs[0]); cv::Mat out = infEngineBlobToMat(outputs[0]); @@ -95,10 +94,7 @@ InferenceEngine::StatusCode FFTImpl::execute(std::vector components = { - cv::Mat(h, w, CV_32F, inp.ptr(i, 0)), - cv::Mat(h, w, CV_32F, inp.ptr(i, 1)) - }; + std::vector components = {cv::Mat(h, w, CV_32F, inp.ptr(i, 0)), cv::Mat(h, w, CV_32F, inp.ptr(i, 1))}; cv::merge(components, complex); if (!inverse) @@ -106,13 +102,9 @@ InferenceEngine::StatusCode FFTImpl::execute(std::vector(i, 0)), - cv::Mat(h, w, CV_32F, out.ptr(i, 1)) - }; + components = {cv::Mat(h, w, CV_32F, out.ptr(i, 0)), cv::Mat(h, w, CV_32F, out.ptr(i, 1))}; cv::split(interleavedOut, components); } return InferenceEngine::OK; } //! [fft_kernel:implementation] - diff --git a/docs/template_extension/fft_kernel.hpp b/docs/template_extension/fft_kernel.hpp index 74fc3a4b13805b..f328328886150c 100644 --- a/docs/template_extension/fft_kernel.hpp +++ b/docs/template_extension/fft_kernel.hpp @@ -6,6 +6,7 @@ #pragma once #include + #include namespace TemplateExtension { @@ -13,13 +14,12 @@ namespace TemplateExtension { class FFTImpl : public InferenceEngine::ILayerExecImpl { public: explicit FFTImpl(const std::shared_ptr& node); - InferenceEngine::StatusCode getSupportedConfigurations(std::vector &conf, - InferenceEngine::ResponseDesc *resp) noexcept override; - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config, - InferenceEngine::ResponseDesc *resp) noexcept override; - InferenceEngine::StatusCode execute(std::vector &inputs, - std::vector &outputs, - InferenceEngine::ResponseDesc *resp) noexcept override; + InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, + InferenceEngine::ResponseDesc* resp) noexcept override; + InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override; + InferenceEngine::StatusCode execute(std::vector& inputs, std::vector& outputs, + InferenceEngine::ResponseDesc* resp) noexcept override; + private: ngraph::Shape inpShape; ngraph::Shape outShape; @@ -27,5 +27,5 @@ class FFTImpl : public InferenceEngine::ILayerExecImpl { std::string error; }; -} +} // namespace TemplateExtension //! [fft_kernel:header] diff --git a/docs/template_extension/fft_op.cpp b/docs/template_extension/fft_op.cpp index 8d85d5c08f59cb..b71a06bc746702 100644 --- a/docs/template_extension/fft_op.cpp +++ b/docs/template_extension/fft_op.cpp @@ -9,7 +9,7 @@ using namespace TemplateExtension; constexpr ngraph::NodeTypeInfo FFTOp::type_info; -FFTOp::FFTOp(const ngraph::Output& inp, bool _inverse) : Op({inp}) { +FFTOp::FFTOp(const ngraph::Output& inp, bool _inverse): Op({inp}) { constructor_validate_and_infer_types(); inverse = _inverse; } @@ -19,16 +19,15 @@ void FFTOp::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), outShape); } -std::shared_ptr FFTOp::clone_with_new_inputs(const ngraph::OutputVector &new_args) const { +std::shared_ptr FFTOp::clone_with_new_inputs(const ngraph::OutputVector& new_args) const { if (new_args.size() != 1) { throw ngraph::ngraph_error("Incorrect number of new arguments"); } return std::make_shared(new_args.at(0), inverse); } -bool FFTOp::visit_attributes(ngraph::AttributeVisitor &visitor) { +bool FFTOp::visit_attributes(ngraph::AttributeVisitor& visitor) { visitor.on_attribute("inverse", inverse); return true; } //! [fft_op:implementation] - diff --git a/docs/template_extension/fft_op.hpp b/docs/template_extension/fft_op.hpp index eca07bcb7fcc37..2e79888cfd3e05 100644 --- a/docs/template_extension/fft_op.hpp +++ b/docs/template_extension/fft_op.hpp @@ -11,8 +11,10 @@ namespace TemplateExtension { class FFTOp : public ngraph::op::Op { public: - static constexpr ngraph::NodeTypeInfo type_info{"FFT", 0}; - const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; } + static constexpr ngraph::NodeTypeInfo type_info {"FFT", 0}; + const ngraph::NodeTypeInfo& get_type_info() const override { + return type_info; + } FFTOp() = default; FFTOp(const ngraph::Output& inp, bool inverse); @@ -23,6 +25,5 @@ class FFTOp : public ngraph::op::Op { bool inverse; }; -} +} // namespace TemplateExtension //! [fft_op:header] - diff --git a/docs/template_extension/op.cpp b/docs/template_extension/op.cpp index e431443d400ab6..ec53c2ca26c57e 100644 --- a/docs/template_extension/op.cpp +++ b/docs/template_extension/op.cpp @@ -9,7 +9,7 @@ using namespace TemplateExtension; //! [op:ctor] NGRAPH_RTTI_DEFINITION(TemplateExtension::Operation, "Template", 0); -Operation::Operation(const ngraph::Output &arg, int64_t add) : Op({arg}), add(add) { +Operation::Operation(const ngraph::Output& arg, int64_t add): Op({arg}), add(add) { constructor_validate_and_infer_types(); } //! [op:ctor] @@ -22,7 +22,7 @@ void Operation::validate_and_infer_types() { //! [op:validate] //! [op:copy] -std::shared_ptr Operation::clone_with_new_inputs(const ngraph::OutputVector &new_args) const { +std::shared_ptr Operation::clone_with_new_inputs(const ngraph::OutputVector& new_args) const { if (new_args.size() != 1) { throw ngraph::ngraph_error("Incorrect number of new arguments"); } @@ -32,63 +32,63 @@ std::shared_ptr Operation::clone_with_new_inputs(const ngraph::Out //! [op:copy] //! [op:visit_attributes] -bool Operation::visit_attributes(ngraph::AttributeVisitor &visitor) { +bool Operation::visit_attributes(ngraph::AttributeVisitor& visitor) { visitor.on_attribute("add", add); return true; } //! [op:visit_attributes] //! [op:evaluate] -namespace -{ +namespace { template -void implementation(const T* input, - T* output, - int64_t add, - size_t size) { +void implementation(const T* input, T* output, int64_t add, size_t size) { for (size_t i = 0; i < size; i++) { output[i] = input[i] + add; } } template -bool evaluate_op(const ngraph::HostTensorPtr& arg0, - const ngraph::HostTensorPtr& out, int64_t add) -{ +bool evaluate_op(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out, int64_t add) { size_t size = ngraph::shape_size(arg0->get_shape()); - implementation(arg0->get_data_ptr(), - out->get_data_ptr(), - add, - size); + implementation(arg0->get_data_ptr(), out->get_data_ptr(), add, size); return true; } } // namespace -bool Operation::evaluate(const ngraph::HostTensorVector& outputs, - const ngraph::HostTensorVector& inputs) const { - switch (inputs[0]->get_element_type()) - { - case ngraph::element::Type_t::i8: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::i16: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::i32: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::i64: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::u8: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::u16: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::u32: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::u64: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::bf16: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::f16: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::f32: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - default: break; +bool Operation::evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const { + switch (inputs[0]->get_element_type()) { + case ngraph::element::Type_t::i8: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::i16: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::i32: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::i64: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::u8: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::u16: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::u32: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::u64: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::bf16: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::f16: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::f32: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + default: + break; } return false; } bool Operation::has_evaluate() const { - switch (get_input_element_type(0)) - { + switch (get_input_element_type(0)) { case ngraph::element::Type_t::i8: case ngraph::element::Type_t::i16: case ngraph::element::Type_t::i32: @@ -99,8 +99,10 @@ bool Operation::has_evaluate() const { case ngraph::element::Type_t::u64: case ngraph::element::Type_t::bf16: case ngraph::element::Type_t::f16: - case ngraph::element::Type_t::f32: return true; - default: break; + case ngraph::element::Type_t::f32: + return true; + default: + break; } return false; } diff --git a/docs/template_extension/op.hpp b/docs/template_extension/op.hpp index 3f515eb8f82946..4d3baf83a53faa 100644 --- a/docs/template_extension/op.hpp +++ b/docs/template_extension/op.hpp @@ -18,9 +18,10 @@ class Operation : public ngraph::op::Op { void validate_and_infer_types() override; std::shared_ptr clone_with_new_inputs(const ngraph::OutputVector& new_args) const override; bool visit_attributes(ngraph::AttributeVisitor& visitor) override; - int64_t getAddAttr() const { return add; } - bool evaluate(const ngraph::HostTensorVector& outputs, - const ngraph::HostTensorVector& inputs) const override; + int64_t getAddAttr() const { + return add; + } + bool evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const override; bool has_evaluate() const override; private: diff --git a/docs/template_plugin/src/CMakeLists.txt b/docs/template_plugin/src/CMakeLists.txt index 62cfe6641a16e7..9a24bc6c32a0f9 100644 --- a/docs/template_plugin/src/CMakeLists.txt +++ b/docs/template_plugin/src/CMakeLists.txt @@ -13,7 +13,8 @@ ie_add_plugin(NAME ${TARGET_NAME} DEVICE_NAME "TEMPLATE" SOURCES ${SOURCES} ${HEADERS} SKIP_INSTALL # ATTENTION: uncomment to install component - VERSION_DEFINES_FOR template_plugin.cpp) + VERSION_DEFINES_FOR template_plugin.cpp + ADD_CLANG_FORMAT) target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}" diff --git a/docs/template_plugin/src/template_async_infer_request.cpp b/docs/template_plugin/src/template_async_infer_request.cpp index 503607530004b7..bcdd3b6f2a2406 100644 --- a/docs/template_plugin/src/template_async_infer_request.cpp +++ b/docs/template_plugin/src/template_async_infer_request.cpp @@ -3,18 +3,16 @@ // #include "template_async_infer_request.hpp" + #include "template_itt.hpp" using namespace TemplatePlugin; // ! [async_infer_request:ctor] -TemplateAsyncInferRequest::TemplateAsyncInferRequest( - const TemplateInferRequest::Ptr& inferRequest, - const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor, - const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, - const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor) : - AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor), - _inferRequest(inferRequest), _waitExecutor(waitExecutor) { +TemplateAsyncInferRequest::TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor, + const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, + const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor) + : AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor), _inferRequest(inferRequest), _waitExecutor(waitExecutor) { // In current implementation we have CPU only tasks and no needs in 2 executors // So, by default single stage pipeline is created. // This stage executes InferRequest::Infer() using cpuTaskExecutor. @@ -23,24 +21,21 @@ TemplateAsyncInferRequest::TemplateAsyncInferRequest( constexpr const auto remoteDevice = false; if (remoteDevice) { - _pipeline = { - {cpuTaskExecutor, [this] { - OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, - "TemplateAsyncInferRequest::PreprocessingAndStartPipeline"); - _inferRequest->inferPreprocess(); - _inferRequest->startPipeline(); - }}, - {_waitExecutor, [this] { - OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, - "TemplateAsyncInferRequest::WaitPipeline"); - _inferRequest->waitPipeline(); - }}, - {cpuTaskExecutor, [this] { - OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, - "TemplateAsyncInferRequest::Postprocessing"); - _inferRequest->inferPostprocess(); - }} - }; + _pipeline = {{cpuTaskExecutor, + [this] { + OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::PreprocessingAndStartPipeline"); + _inferRequest->inferPreprocess(); + _inferRequest->startPipeline(); + }}, + {_waitExecutor, + [this] { + OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::WaitPipeline"); + _inferRequest->waitPipeline(); + }}, + {cpuTaskExecutor, [this] { + OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::Postprocessing"); + _inferRequest->inferPostprocess(); + }}}; } } // ! [async_infer_request:ctor] diff --git a/docs/template_plugin/src/template_async_infer_request.hpp b/docs/template_plugin/src/template_async_infer_request.hpp index 51221f908eeffc..942f71a616f379 100644 --- a/docs/template_plugin/src/template_async_infer_request.hpp +++ b/docs/template_plugin/src/template_async_infer_request.hpp @@ -13,15 +13,13 @@ namespace TemplatePlugin { // ! [async_infer_request:header] class TemplateAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault { public: - TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, - const InferenceEngine::ITaskExecutor::Ptr& taskExecutor, - const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, - const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor); + TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& taskExecutor, + const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor); ~TemplateAsyncInferRequest(); private: - TemplateInferRequest::Ptr _inferRequest; + TemplateInferRequest::Ptr _inferRequest; InferenceEngine::ITaskExecutor::Ptr _waitExecutor; }; // ! [async_infer_request:header] diff --git a/docs/template_plugin/src/template_config.cpp b/docs/template_plugin/src/template_config.cpp index c29e17512c381b..3d9d4e488fe6bd 100644 --- a/docs/template_plugin/src/template_config.cpp +++ b/docs/template_plugin/src/template_config.cpp @@ -2,17 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "template_config.hpp" + #include +#include -#include "template_config.hpp" #include "template/template_config.hpp" using namespace TemplatePlugin; -Configuration::Configuration() { } +Configuration::Configuration() {} -Configuration::Configuration(const ConfigMap& config, const Configuration & defaultCfg, bool throwOnUnsupported) { +Configuration::Configuration(const ConfigMap& config, const Configuration& defaultCfg, bool throwOnUnsupported) { *this = defaultCfg; // If plugin needs to use InferenceEngine::StreamsExecutor it should be able to process its configuration auto streamExecutorConfigKeys = _streamsExecutorConfig.SupportedKeys(); @@ -22,8 +23,7 @@ Configuration::Configuration(const ConfigMap& config, const Configuration & defa if (TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) == key) { _streamsExecutorConfig.SetConfig(CONFIG_KEY(CPU_THROUGHPUT_STREAMS), value); - } else if (streamExecutorConfigKeys.end() != - std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) { + } else if (streamExecutorConfigKeys.end() != std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) { _streamsExecutorConfig.SetConfig(key, value); } else if (CONFIG_KEY(DEVICE_ID) == key) { deviceId = std::stoi(value); diff --git a/docs/template_plugin/src/template_config.hpp b/docs/template_plugin/src/template_config.hpp index 2085e290af2171..d49bf491327a35 100644 --- a/docs/template_plugin/src/template_config.hpp +++ b/docs/template_plugin/src/template_config.hpp @@ -4,11 +4,9 @@ #pragma once -#include -#include - #include - +#include +#include #include namespace TemplatePlugin { @@ -18,19 +16,19 @@ using ConfigMap = std::map; struct Configuration { Configuration(); - Configuration(const Configuration&) = default; - Configuration(Configuration&&) = default; - Configuration& operator=(const Configuration&) = default; - Configuration& operator=(Configuration&&) = default; + Configuration(const Configuration&) = default; + Configuration(Configuration&&) = default; + Configuration& operator=(const Configuration&) = default; + Configuration& operator=(Configuration&&) = default; - explicit Configuration(const ConfigMap& config, const Configuration & defaultCfg = {}, const bool throwOnUnsupported = true); + explicit Configuration(const ConfigMap& config, const Configuration& defaultCfg = {}, const bool throwOnUnsupported = true); InferenceEngine::Parameter Get(const std::string& name) const; // Plugin configuration parameters - int deviceId = 0; - bool perfCount = true; + int deviceId = 0; + bool perfCount = true; InferenceEngine::IStreamsExecutor::Config _streamsExecutorConfig; }; // ! [configuration:header] diff --git a/docs/template_plugin/src/template_executable_network.cpp b/docs/template_plugin/src/template_executable_network.cpp index e599dceb43445a..f0f2a8066e515f 100644 --- a/docs/template_plugin/src/template_executable_network.cpp +++ b/docs/template_plugin/src/template_executable_network.cpp @@ -2,36 +2,35 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "template_executable_network.hpp" + #include #include #include -#include "transformations/serialize.hpp" #include "template/template_config.hpp" -#include "template_plugin.hpp" -#include "template_executable_network.hpp" #include "template_itt.hpp" +#include "template_plugin.hpp" +#include "transformations/serialize.hpp" using namespace TemplatePlugin; // ! [executable_network:ctor_cnnnetwork] TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr& function, - const InferenceEngine::InputsDataMap& inputInfoMap, - const InferenceEngine::OutputsDataMap& outputsInfoMap, - const Configuration& cfg, - const Plugin::Ptr& plugin) : - InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr, nullptr), // Disable default threads creation - _cfg(cfg), - _plugin(plugin) { + const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap, + const Configuration& cfg, const Plugin::Ptr& plugin) + : InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr, nullptr), // Disable default threads creation + _cfg(cfg), + _plugin(plugin) { // TODO: if your plugin supports device ID (more that single instance of device can be on host machine) // you should select proper device based on KEY_DEVICE_ID or automatic behavior // In this case, _waitExecutor should also be created per device. try { CompileNetwork(function, inputInfoMap, outputsInfoMap); - InitExecutor(); // creates thread-based executor using for async requests + InitExecutor(); // creates thread-based executor using for async requests } catch (const InferenceEngine::Exception&) { throw; - } catch (const std::exception & e) { + } catch (const std::exception& e) { IE_THROW(Unexpected) << "Standard exception from compilation library: " << e.what(); } catch (...) { IE_THROW(Unexpected) << "Generic exception is thrown"; @@ -40,11 +39,7 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr(&dataSize), sizeof(dataSize)); if (0 != dataSize) { dataBlob = InferenceEngine::make_shared_blob( - InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, - {static_cast(dataSize)}, - InferenceEngine::Layout::C)); + InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, {static_cast(dataSize)}, InferenceEngine::Layout::C)); dataBlob->allocate(); model.read(dataBlob->buffer(), dataSize); } @@ -77,10 +70,10 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model, try { CompileNetwork(cnnnetwork.getFunction(), inputInfoMap, outputInfoMap); - InitExecutor(); // creates thread-based executor using for async requests + InitExecutor(); // creates thread-based executor using for async requests } catch (const InferenceEngine::Exception&) { throw; - } catch (const std::exception & e) { + } catch (const std::exception& e) { IE_THROW(Unexpected) << "Standard exception from compilation library: " << e.what(); } catch (...) { IE_THROW(Unexpected) << "Generic exception is thrown"; @@ -90,12 +83,11 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model, // ! [executable_network:map_graph] // forward declaration -std::shared_ptr TransformNetwork(const std::shared_ptr& function, - const InferenceEngine::InputsDataMap & inputInfoMap, +std::shared_ptr TransformNetwork(const std::shared_ptr& function, const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap); void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr& function, - const InferenceEngine::InputsDataMap & inputInfoMap, + const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap) { // TODO: perform actual graph compilation / mapping to backend graph representation / kernels @@ -120,7 +112,6 @@ void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr(networkInputs, networkOutputs, std::static_pointer_cast(shared_from_this())); } // ! [executable_network:create_infer_request_impl] @@ -148,32 +138,26 @@ InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::C // ! [executable_network:create_infer_request] InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() { auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs); - return std::make_shared(std::static_pointer_cast(internalRequest), - _taskExecutor, _plugin->_waitExecutor, _callbackExecutor); + return std::make_shared(std::static_pointer_cast(internalRequest), _taskExecutor, _plugin->_waitExecutor, + _callbackExecutor); } // ! [executable_network:create_infer_request] // ! [executable_network:get_config] -InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name) const { +InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const std::string& name) const { return _cfg.Get(name); } // ! [executable_network:get_config] // ! [executable_network:get_metric] -InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const std::string &name) const { +InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const std::string& name) const { // TODO: return more supported values for metrics if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_METRICS) == name) { - IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector{ - METRIC_KEY(NETWORK_NAME), - METRIC_KEY(SUPPORTED_METRICS), - METRIC_KEY(SUPPORTED_CONFIG_KEYS), - METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)}); + IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector {METRIC_KEY(NETWORK_NAME), METRIC_KEY(SUPPORTED_METRICS), + METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)}); } else if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) { - std::vector configKeys = { - CONFIG_KEY(DEVICE_ID), - CONFIG_KEY(PERF_COUNT), - TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) }; - auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys(); + std::vector configKeys = {CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)}; + auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys(); for (auto&& configKey : streamExecutorConfigKeys) { configKeys.emplace_back(configKey); } @@ -197,8 +181,7 @@ void TemplatePlugin::ExecutableNetwork::ExportImpl(std::ostream& modelStream) { // Note: custom ngraph extensions are not supported std::map custom_opsets; std::stringstream xmlFile, binFile; - ngraph::pass::Serialize serializer(xmlFile, binFile, - ngraph::pass::Serialize::Version::IR_V10, custom_opsets); + ngraph::pass::Serialize serializer(xmlFile, binFile, ngraph::pass::Serialize::Version::IR_V10, custom_opsets); serializer.run_on_function(_function); auto m_constants = binFile.str(); diff --git a/docs/template_plugin/src/template_executable_network.hpp b/docs/template_plugin/src/template_executable_network.hpp index 23f781a2efda3f..cebfddb3947eed 100644 --- a/docs/template_plugin/src/template_executable_network.hpp +++ b/docs/template_plugin/src/template_executable_network.hpp @@ -4,13 +4,12 @@ #pragma once +#include #include +#include "template_async_infer_request.hpp" #include "template_config.hpp" #include "template_infer_request.hpp" -#include "template_async_infer_request.hpp" - -#include namespace TemplatePlugin { @@ -24,15 +23,10 @@ class Plugin; // ! [executable_network:header] class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault { public: - ExecutableNetwork(const std::shared_ptr& function, - const InferenceEngine::InputsDataMap& inputInfoMap, - const InferenceEngine::OutputsDataMap& outputsInfoMap, - const Configuration& cfg, - const std::shared_ptr& plugin); + ExecutableNetwork(const std::shared_ptr& function, const InferenceEngine::InputsDataMap& inputInfoMap, + const InferenceEngine::OutputsDataMap& outputsInfoMap, const Configuration& cfg, const std::shared_ptr& plugin); - ExecutableNetwork(std::istream& model, - const Configuration& cfg, - const std::shared_ptr& plugin); + ExecutableNetwork(std::istream& model, const Configuration& cfg, const std::shared_ptr& plugin); ~ExecutableNetwork() override = default; @@ -42,23 +36,22 @@ class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDef InferenceEngine::IInferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs, InferenceEngine::OutputsDataMap networkOutputs) override; InferenceEngine::IInferRequestInternal::Ptr CreateInferRequest() override; - InferenceEngine::Parameter GetMetric(const std::string &name) const override; - InferenceEngine::Parameter GetConfig(const std::string &name) const override; + InferenceEngine::Parameter GetMetric(const std::string& name) const override; + InferenceEngine::Parameter GetConfig(const std::string& name) const override; private: friend class TemplateInferRequest; - void CompileNetwork(const std::shared_ptr& function, - const InferenceEngine::InputsDataMap& inputInfoMap, - const InferenceEngine::OutputsDataMap& outputsInfoMap); + void CompileNetwork(const std::shared_ptr& function, const InferenceEngine::InputsDataMap& inputInfoMap, + const InferenceEngine::OutputsDataMap& outputsInfoMap); void InitExecutor(); - std::atomic _requestId = {0}; - Configuration _cfg; - std::shared_ptr _plugin; - std::shared_ptr _function; - std::map _inputIndex; - std::map _outputIndex; + std::atomic _requestId = {0}; + Configuration _cfg; + std::shared_ptr _plugin; + std::shared_ptr _function; + std::map _inputIndex; + std::map _outputIndex; }; // ! [executable_network:header] diff --git a/docs/template_plugin/src/template_infer_request.cpp b/docs/template_plugin/src/template_infer_request.cpp index 24a9d40d218c30..20c47bfd19e931 100644 --- a/docs/template_plugin/src/template_infer_request.cpp +++ b/docs/template_plugin/src/template_infer_request.cpp @@ -2,19 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "template_infer_request.hpp" + #include +#include #include +#include #include -#include +#include -#include -#include "template_infer_request.hpp" +#include "blob_factory.hpp" +#include "ie_ngraph_utils.hpp" #include "template_executable_network.hpp" -#include "template_plugin.hpp" #include "template_itt.hpp" -#include "ie_ngraph_utils.hpp" -#include "blob_factory.hpp" +#include "template_plugin.hpp" using namespace TemplatePlugin; using namespace InferenceEngine; @@ -22,11 +23,9 @@ using namespace InferenceEngine; using Time = std::chrono::high_resolution_clock; // ! [infer_request:ctor] -TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, - const InferenceEngine::OutputsDataMap& networkOutputs, - const std::shared_ptr& executableNetwork) : - IInferRequestInternal(networkInputs, networkOutputs), - _executableNetwork(executableNetwork) { +TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, const InferenceEngine::OutputsDataMap& networkOutputs, + const std::shared_ptr& executableNetwork) + : IInferRequestInternal(networkInputs, networkOutputs), _executableNetwork(executableNetwork) { // TODO: allocate infer request device and host buffers if needed, fill actual list of profiling tasks auto requestID = std::to_string(_executableNetwork->_requestId.fetch_add(1)); @@ -60,11 +59,8 @@ void TemplateInferRequest::allocateDeviceBuffers() { _outputTensors.resize(_networkOutputs.size()); } -template -static void AllocateImpl(const BlobDataMap& userDataMap, - BlobMap& userBlobMap, - BlobMap& deviceBlobMap, - GetNetworkPrecisionF&& GetNetworkPrecision, +template +static void AllocateImpl(const BlobDataMap& userDataMap, BlobMap& userBlobMap, BlobMap& deviceBlobMap, GetNetworkPrecisionF&& GetNetworkPrecision, bool isInputBlob = true) { for (auto&& userData : userDataMap) { const auto& dims = userData.second->getTensorDesc().getDims(); @@ -82,7 +78,7 @@ static void AllocateImpl(const BlobDataMap& userDataMap, deviceBlob = userBlob; } else { if (userLayout != deviceLayout && !isInputBlob) { - IE_THROW(NotImplemented) << "Template Plugin: does not support setLayout for outputs"; + IE_THROW(NotImplemented) << "Template Plugin: does not support setLayout for outputs"; } deviceBlob = make_blob_with_precision({networkPrecision, dims, deviceLayout}); deviceBlob->allocate(); @@ -94,13 +90,16 @@ static void AllocateImpl(const BlobDataMap& userDataMap, void TemplateInferRequest::allocateBlobs() { auto&& parameters = _executableNetwork->_function->get_parameters(); - AllocateImpl(_networkInputs, _inputs, _deviceInputs, [&] (const std::string& blobName) { + AllocateImpl(_networkInputs, _inputs, _deviceInputs, [&](const std::string& blobName) { return parameters.at(_executableNetwork->_inputIndex.at(blobName))->get_element_type(); }); auto&& results = _executableNetwork->_function->get_results(); - AllocateImpl(_networkOutputs, _outputs, _networkOutputBlobs, [&] (const std::string& blobName) { - return results.at(_executableNetwork->_outputIndex.at(blobName))->get_element_type(); - }, false); + AllocateImpl( + _networkOutputs, _outputs, _networkOutputBlobs, + [&](const std::string& blobName) { + return results.at(_executableNetwork->_outputIndex.at(blobName))->get_element_type(); + }, + false); } // ! [infer_request:infer_impl] @@ -113,103 +112,108 @@ void TemplateInferRequest::InferImpl() { } // ! [infer_request:infer_impl] -template +template static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { - ngraph::runtime::reference::convert( - InferenceEngine::as(src)->rmap().as(), - InferenceEngine::as(dst)->wmap().as(), - src->size()); + ngraph::runtime::reference::convert(InferenceEngine::as(src)->rmap().as(), + InferenceEngine::as(dst)->wmap().as(), src->size()); } static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { switch (src->getTensorDesc().getPrecision()) { - case Precision::U8 : { - switch (dst->getTensorDesc().getPrecision()) { - case Precision::U8 : break; - case Precision::FP32 : { - blobCopy(src, dst); - } break; - default : { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " - << src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision(); - } - } + case Precision::U8: { + switch (dst->getTensorDesc().getPrecision()) { + case Precision::U8: + break; + case Precision::FP32: { + blobCopy(src, dst); } break; - case Precision::FP32 : { - switch (dst->getTensorDesc().getPrecision()) { - case Precision::FP32 : break; - case Precision::U8 : { - blobCopy(src, dst); - } break; - default : { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " - << src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision(); - } - } + default: { + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " + << dst->getTensorDesc().getPrecision(); + } + } + } break; + case Precision::FP32: { + switch (dst->getTensorDesc().getPrecision()) { + case Precision::FP32: + break; + case Precision::U8: { + blobCopy(src, dst); } break; - case Precision::I64 : { - switch (dst->getTensorDesc().getPrecision()) { - case Precision::I64 : break; - case Precision::I32 : { - blobCopy(src, dst); - } break; - default : { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " - << src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision(); - } - } + default: { + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " + << dst->getTensorDesc().getPrecision(); + } + } + } break; + case Precision::I64: { + switch (dst->getTensorDesc().getPrecision()) { + case Precision::I64: + break; + case Precision::I32: { + blobCopy(src, dst); } break; - case Precision::I16 : { - switch (dst->getTensorDesc().getPrecision()) { - case Precision::I16 : break; - case Precision::FP32 : { - blobCopy(src, dst); - } break; - default : { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " - << src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision(); - } - } + default: { + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " + << dst->getTensorDesc().getPrecision(); + } + } + } break; + case Precision::I16: { + switch (dst->getTensorDesc().getPrecision()) { + case Precision::I16: + break; + case Precision::FP32: { + blobCopy(src, dst); } break; - case Precision::I8 : { - switch (dst->getTensorDesc().getPrecision()) { - case Precision::I8 : break; - case Precision::FP32 : { - blobCopy(src, dst); - } break; - default : { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " - << src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision(); - } - } + default: { + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " + << dst->getTensorDesc().getPrecision(); + } + } + } break; + case Precision::I8: { + switch (dst->getTensorDesc().getPrecision()) { + case Precision::I8: + break; + case Precision::FP32: { + blobCopy(src, dst); } break; - case Precision::BOOL : { - switch (dst->getTensorDesc().getPrecision()) { - case Precision::BOOL : break; - case Precision::FP32 : { - blobCopy(src, dst); - } break; - default : { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " - << src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision(); - } - } + default: { + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " + << dst->getTensorDesc().getPrecision(); + } + } + } break; + case Precision::BOOL: { + switch (dst->getTensorDesc().getPrecision()) { + case Precision::BOOL: + break; + case Precision::FP32: { + blobCopy(src, dst); } break; - case Precision::U16 : { - switch (dst->getTensorDesc().getPrecision()) { - case Precision::U16 : break; - case Precision::FP32 : { - blobCopy(src, dst); - } break; - default : { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " - << src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision(); - } - } + default: { + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " + << dst->getTensorDesc().getPrecision(); + } + } + } break; + case Precision::U16: { + switch (dst->getTensorDesc().getPrecision()) { + case Precision::U16: + break; + case Precision::FP32: { + blobCopy(src, dst); } break; - default : { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision(); + default: { + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " + << dst->getTensorDesc().getPrecision(); } + } + } break; + default: { + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision(); + } } } @@ -225,8 +229,8 @@ void TemplateInferRequest::inferPreprocess() { const auto& parameter = _parameters[index]; const auto& parameterShape = parameter->get_shape(); const auto& parameterType = parameter->get_element_type(); - _inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(parameterType, parameterShape, - InferenceEngine::as(networkInput.second)->rmap().as()); + _inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor( + parameterType, parameterShape, InferenceEngine::as(networkInput.second)->rmap().as()); } for (auto&& output : _outputs) { auto outputBlob = output.second; @@ -238,8 +242,8 @@ void TemplateInferRequest::inferPreprocess() { const auto& result = _results[index]; const auto& resultShape = result->get_shape(); const auto& resultType = result->get_element_type(); - _outputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(resultType, resultShape, - InferenceEngine::as(networkOutput)->wmap().as()); + _outputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor( + resultType, resultShape, InferenceEngine::as(networkOutput)->wmap().as()); } _durations[Preprocess] = Time::now() - start; } diff --git a/docs/template_plugin/src/template_infer_request.hpp b/docs/template_plugin/src/template_infer_request.hpp index 61187df79858e4..ca92c76bbbdf29 100644 --- a/docs/template_plugin/src/template_infer_request.hpp +++ b/docs/template_plugin/src/template_infer_request.hpp @@ -4,20 +4,17 @@ #pragma once -#include -#include -#include #include -#include #include - -#include - -#include #include - -#include #include +#include +#include +#include +#include +#include +#include +#include namespace TemplatePlugin { @@ -29,8 +26,7 @@ class TemplateInferRequest : public InferenceEngine::IInferRequestInternal { public: typedef std::shared_ptr Ptr; - TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, - const InferenceEngine::OutputsDataMap& networkOutputs, + TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, const InferenceEngine::OutputsDataMap& networkOutputs, const std::shared_ptr& executableNetwork); ~TemplateInferRequest(); @@ -47,26 +43,20 @@ class TemplateInferRequest : public InferenceEngine::IInferRequestInternal { void allocateDeviceBuffers(); void allocateBlobs(); - enum { - Preprocess, - Postprocess, - StartPipeline, - WaitPipeline, - numOfStages - }; + enum { Preprocess, Postprocess, StartPipeline, WaitPipeline, numOfStages }; - std::shared_ptr _executableNetwork; - std::array _profilingTask; + std::shared_ptr _executableNetwork; + std::array _profilingTask; // for performance counters - std::array, numOfStages> _durations; + std::array, numOfStages> _durations; - InferenceEngine::BlobMap _networkOutputBlobs; - ngraph::ParameterVector _parameters; - ngraph::ResultVector _results; + InferenceEngine::BlobMap _networkOutputBlobs; + ngraph::ParameterVector _parameters; + ngraph::ResultVector _results; - std::vector> _inputTensors; - std::vector> _outputTensors; - std::shared_ptr _executable; + std::vector> _inputTensors; + std::vector> _outputTensors; + std::shared_ptr _executable; }; // ! [infer_request:header] diff --git a/docs/template_plugin/src/template_itt.hpp b/docs/template_plugin/src/template_itt.hpp index 089d49c17522f7..1d734bcf9424cf 100644 --- a/docs/template_plugin/src/template_itt.hpp +++ b/docs/template_plugin/src/template_itt.hpp @@ -14,7 +14,7 @@ namespace TemplatePlugin { namespace itt { namespace domains { - OV_ITT_DOMAIN(TemplatePlugin); -} -} +OV_ITT_DOMAIN(TemplatePlugin); } +} // namespace itt +} // namespace TemplatePlugin diff --git a/docs/template_plugin/src/template_plugin.cpp b/docs/template_plugin/src/template_plugin.cpp index 6da16cce891216..beaedb97c5e509 100644 --- a/docs/template_plugin/src/template_plugin.cpp +++ b/docs/template_plugin/src/template_plugin.cpp @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // +// clang-format off #include #include #include @@ -24,6 +25,7 @@ #include "template_infer_request.hpp" #include "transformations/template_pattern_transformation.hpp" #include "transformations/preprocessing/preprocessing.hpp" +// clang-format on using namespace TemplatePlugin; @@ -53,8 +55,7 @@ Plugin::~Plugin() { // ! [plugin:transform_network] -std::shared_ptr TransformNetwork(const std::shared_ptr& function, - const InferenceEngine::InputsDataMap & inputInfoMap, +std::shared_ptr TransformNetwork(const std::shared_ptr& function, const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap) { // 1. Copy ngraph::Function first to apply some transformations which modify original ngraph::Function auto transformedNetwork = ngraph::clone_function(*function); @@ -67,7 +68,7 @@ std::shared_ptr TransformNetwork(const std::shared_ptr(); // Template plugin handles only FP32 networks - passManager.register_pass(precisions_array {{ngraph::element::f16, ngraph::element::f32 }}); + passManager.register_pass(precisions_array {{ngraph::element::f16, ngraph::element::f32}}); // Example: register plugin specific transformation passManager.register_pass(); passManager.register_pass(); @@ -83,36 +84,32 @@ std::shared_ptr TransformNetwork(const std::shared_ptr(network.getFunction(), - networkInputs, networkOutputs, fullConfig, - std::static_pointer_cast(shared_from_this())); + auto fullConfig = Configuration {config, _cfg}; + return std::make_shared(network.getFunction(), networkInputs, networkOutputs, fullConfig, + std::static_pointer_cast(shared_from_this())); } // ! [plugin:load_exe_network_impl] // ! [plugin:import_network_impl] -InferenceEngine::ExecutableNetworkInternal::Ptr -Plugin::ImportNetworkImpl(std::istream& modelStream, const std::map& config) { +InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::ImportNetworkImpl(std::istream& modelStream, const std::map& config) { OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::ImportNetworkImpl"); - auto fullConfig = Configuration{ config, _cfg }; - return std::make_shared(modelStream, fullConfig, - std::static_pointer_cast(shared_from_this())); + auto fullConfig = Configuration {config, _cfg}; + return std::make_shared(modelStream, fullConfig, std::static_pointer_cast(shared_from_this())); } // ! [plugin:import_network_impl] // ! [plugin:query_network] -InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::CNNNetwork &network, const ConfigMap& config) const { +InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::CNNNetwork& network, const ConfigMap& config) const { OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::QueryNetwork"); - Configuration fullConfig{config, _cfg, false}; + Configuration fullConfig {config, _cfg, false}; auto function = network.getFunction(); // 1. First of all we should store initial input operation set @@ -198,36 +195,28 @@ void Plugin::AddExtension(InferenceEngine::IExtensionPtr /*extension*/) { // ! [plugin:add_extension] // ! [plugin:set_config] -void Plugin::SetConfig(const ConfigMap &config) { - _cfg = Configuration{config, _cfg}; +void Plugin::SetConfig(const ConfigMap& config) { + _cfg = Configuration {config, _cfg}; } // ! [plugin:set_config] // ! [plugin:get_config] -InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map & /*options*/) const { +InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map& /*options*/) const { return _cfg.Get(name); } // ! [plugin:get_config] // ! [plugin:get_metric] -InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map & options) const { +InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map& options) const { if (METRIC_KEY(SUPPORTED_METRICS) == name) { - std::vector supportedMetrics = { - METRIC_KEY(AVAILABLE_DEVICES), - METRIC_KEY(SUPPORTED_METRICS), - METRIC_KEY(SUPPORTED_CONFIG_KEYS), - METRIC_KEY(FULL_DEVICE_NAME), - METRIC_KEY(IMPORT_EXPORT_SUPPORT), - METRIC_KEY(DEVICE_ARCHITECTURE), - METRIC_KEY(OPTIMIZATION_CAPABILITIES), - METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) }; + std::vector supportedMetrics = {METRIC_KEY(AVAILABLE_DEVICES), METRIC_KEY(SUPPORTED_METRICS), + METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(FULL_DEVICE_NAME), + METRIC_KEY(IMPORT_EXPORT_SUPPORT), METRIC_KEY(DEVICE_ARCHITECTURE), + METRIC_KEY(OPTIMIZATION_CAPABILITIES), METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS)}; IE_SET_METRIC_RETURN(SUPPORTED_METRICS, supportedMetrics); } else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) { - std::vector configKeys = { - CONFIG_KEY(DEVICE_ID), - CONFIG_KEY(PERF_COUNT), - TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)}; - auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys(); + std::vector configKeys = {CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)}; + auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys(); for (auto&& configKey : streamExecutorConfigKeys) { if (configKey != InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS) { configKeys.emplace_back(configKey); @@ -236,7 +225,7 @@ InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std: IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys); } else if (METRIC_KEY(AVAILABLE_DEVICES) == name) { // TODO: fill list of available devices - std::vector availableDevices = { "" }; + std::vector availableDevices = {""}; IE_SET_METRIC_RETURN(AVAILABLE_DEVICES, availableDevices); } else if (METRIC_KEY(FULL_DEVICE_NAME) == name) { std::string name = "Template Device Full Name"; @@ -249,13 +238,13 @@ InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std: IE_SET_METRIC_RETURN(DEVICE_ARCHITECTURE, arch); } else if (METRIC_KEY(OPTIMIZATION_CAPABILITIES) == name) { // TODO: fill actual list of supported capabilities: e.g. Template device supports only FP32 - std::vector capabilities = { METRIC_VALUE(FP32) /*, TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION)*/ }; + std::vector capabilities = {METRIC_VALUE(FP32) /*, TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION)*/}; IE_SET_METRIC_RETURN(OPTIMIZATION_CAPABILITIES, capabilities); } else if (METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) == name) { // TODO: fill with actual values using uint = unsigned int; - IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint{1}, uint{1}, uint{1})); - } else { + IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint {1}, uint {1}, uint {1})); + } else { IE_THROW(NotFound) << "Unsupported device metric: " << name; } } diff --git a/docs/template_plugin/src/template_plugin.hpp b/docs/template_plugin/src/template_plugin.hpp index 10b68d7af42f10..f065fad04e390c 100644 --- a/docs/template_plugin/src/template_plugin.hpp +++ b/docs/template_plugin/src/template_plugin.hpp @@ -4,11 +4,11 @@ #pragma once -#include "template_config.hpp" -#include "template_executable_network.hpp" #include #include "backend.hpp" +#include "template_config.hpp" +#include "template_executable_network.hpp" //! [plugin:header] namespace TemplatePlugin { @@ -20,26 +20,24 @@ class Plugin : public InferenceEngine::InferencePluginInternal { Plugin(); ~Plugin(); - void SetConfig(const std::map &config) override; - InferenceEngine::QueryNetworkResult - QueryNetwork(const InferenceEngine::CNNNetwork &network, - const std::map& config) const override; - InferenceEngine::ExecutableNetworkInternal::Ptr - LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, - const std::map &config) override; + void SetConfig(const std::map& config) override; + InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network, + const std::map& config) const override; + InferenceEngine::ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network, + const std::map& config) override; void AddExtension(InferenceEngine::IExtensionPtr extension) override; - InferenceEngine::Parameter GetConfig(const std::string& name, const std::map & options) const override; - InferenceEngine::Parameter GetMetric(const std::string& name, const std::map & options) const override; + InferenceEngine::Parameter GetConfig(const std::string& name, const std::map& options) const override; + InferenceEngine::Parameter GetMetric(const std::string& name, const std::map& options) const override; InferenceEngine::ExecutableNetworkInternal::Ptr ImportNetworkImpl(std::istream& model, const std::map& config) override; private: friend class ExecutableNetwork; friend class TemplateInferRequest; - std::shared_ptr _backend; - Configuration _cfg; - InferenceEngine::ITaskExecutor::Ptr _waitExecutor; + std::shared_ptr _backend; + Configuration _cfg; + InferenceEngine::ITaskExecutor::Ptr _waitExecutor; }; } // namespace TemplatePlugin -//! [plugin:header] \ No newline at end of file + //! [plugin:header] diff --git a/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.cpp b/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.cpp index a74d8501981163..39fd79423876db 100644 --- a/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.cpp +++ b/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.cpp @@ -2,21 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "transformations/preprocessing/mean_image_or_value.hpp" + #include #include #include -#include "transformations/preprocessing/mean_image_or_value.hpp" - using namespace ngraph; NGRAPH_RTTI_DEFINITION(ngraph::pass::AddMeanSubtract, "AddMeanSubtract", 0); -ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap & inputInfoMap) { +ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap& inputInfoMap) { // RUN_ON_FUNCTION_SCOPE(AddMeanSubtract); auto label = ngraph::pattern::wrap_type(); - ngraph::matcher_pass_callback callback = [=] (pattern::Matcher& m) { + ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) { auto param = std::dynamic_pointer_cast(m.get_match_root()); if (!param) { return false; @@ -28,8 +28,7 @@ ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap & inputInfoMap) { } auto mean_const = it->second; - NGRAPH_CHECK(mean_const->get_element_type() == ngraph::element::f32, - "Mean for ", param->get_friendly_name(), " must have f32 type"); + NGRAPH_CHECK(mean_const->get_element_type() == ngraph::element::f32, "Mean for ", param->get_friendly_name(), " must have f32 type"); auto copy_param = param->clone_with_new_inputs({}); auto sub = std::make_shared(copy_param, mean_const); diff --git a/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.hpp b/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.hpp index 906bfdc0aa4513..f465ad9f948e72 100644 --- a/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.hpp +++ b/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.hpp @@ -5,10 +5,9 @@ #pragma once #include -#include - #include #include +#include #include "transformations_visibility.hpp" @@ -29,5 +28,5 @@ class ngraph::pass::AddMeanSubtract : public ngraph::pass::MatcherPass { using MeanMap = std::map>; NGRAPH_RTTI_DECLARATION; - explicit AddMeanSubtract(const MeanMap & inputInfoMap); + explicit AddMeanSubtract(const MeanMap& inputInfoMap); }; diff --git a/docs/template_plugin/src/transformations/preprocessing/preprocessing.cpp b/docs/template_plugin/src/transformations/preprocessing/preprocessing.cpp index 4f68deb3f93cd8..b6f211d113505a 100644 --- a/docs/template_plugin/src/transformations/preprocessing/preprocessing.cpp +++ b/docs/template_plugin/src/transformations/preprocessing/preprocessing.cpp @@ -2,26 +2,26 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "transformations/preprocessing/preprocessing.hpp" + #include +#include #include "transformations/preprocessing/mean_image_or_value.hpp" #include "transformations/preprocessing/std_scale.hpp" -#include "transformations/preprocessing/preprocessing.hpp" NGRAPH_RTTI_DEFINITION(ngraph::pass::AddPreprocessing, "AddPreprocessing", 0); -ngraph::pass::AddPreprocessing::AddPreprocessing(const InferenceEngine::InputsDataMap & inputInfoMap) - : m_inputInfoMap(inputInfoMap) { } +ngraph::pass::AddPreprocessing::AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap): m_inputInfoMap(inputInfoMap) {} bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr f) { ngraph::pass::AddMeanSubtract::MeanMap meanMap; ngraph::pass::AddStdScale::ScaleMap scaleMap; - for (const auto & it : m_inputInfoMap) { + for (const auto& it : m_inputInfoMap) { bool has_scales = false, has_mean_values = false, has_mean_image = false; - const InferenceEngine::PreProcessInfo & pInfo = it.second->getPreProcess(); - const auto & inputDims = it.second->getTensorDesc().getDims(); + const InferenceEngine::PreProcessInfo& pInfo = it.second->getPreProcess(); + const auto& inputDims = it.second->getTensorDesc().getDims(); const size_t cn = pInfo.getNumberOfChannels(); std::vector meanValues(cn), stdScales(cn); InferenceEngine::Blob::Ptr meanImage = nullptr; @@ -40,11 +40,10 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptrmeanData; NGRAPH_CHECK(meanImage->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32, - "Only InferenceEngine::Precision::FP32 precision is supported for PreProcessChannel::meanData"); + "Only InferenceEngine::Precision::FP32 precision is supported for PreProcessChannel::meanData"); } else { NGRAPH_CHECK(pInfo[c]->meanData != nullptr, "pInfo[c]->meanData is nullptr"); - NGRAPH_CHECK(meanImage->getTensorDesc() == pInfo[c]->meanData->getTensorDesc(), - "TensorDesc for PreProcessChannel::meanData must be equal"); + NGRAPH_CHECK(meanImage->getTensorDesc() == pInfo[c]->meanData->getTensorDesc(), "TensorDesc for PreProcessChannel::meanData must be equal"); } } } @@ -54,35 +53,33 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptrgetTensorDesc().getDims(); std::copy(dims.begin(), dims.end(), std::back_inserter(shape)); std::vector meanImageData(ngraph::shape_size(shape)); for (size_t c = 0, i = 0; c < cn; ++c) { auto lm = pInfo[c]->meanData->buffer(); - const float *data = lm.as(); + const float* data = lm.as(); std::memcpy(&meanImageData[i], data, meanImage->byteSize()); i += meanImage->size(); } - meanMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, - shape, meanImageData); + meanMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, shape, meanImageData); } } diff --git a/docs/template_plugin/src/transformations/preprocessing/preprocessing.hpp b/docs/template_plugin/src/transformations/preprocessing/preprocessing.hpp index 3ff95fc95ea12e..c724f06aa0ea73 100644 --- a/docs/template_plugin/src/transformations/preprocessing/preprocessing.hpp +++ b/docs/template_plugin/src/transformations/preprocessing/preprocessing.hpp @@ -26,10 +26,11 @@ class AddPreprocessing; * (x - mean) * stdScale */ class ngraph::pass::AddPreprocessing : public ngraph::pass::FunctionPass { - const InferenceEngine::InputsDataMap & m_inputInfoMap; + const InferenceEngine::InputsDataMap& m_inputInfoMap; + public: NGRAPH_RTTI_DECLARATION; - explicit AddPreprocessing(const InferenceEngine::InputsDataMap & inputInfoMap); + explicit AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap); bool run_on_function(std::shared_ptr f) override; }; diff --git a/docs/template_plugin/src/transformations/preprocessing/std_scale.cpp b/docs/template_plugin/src/transformations/preprocessing/std_scale.cpp index 44ad4d6080c570..90c5163bdf2a1d 100644 --- a/docs/template_plugin/src/transformations/preprocessing/std_scale.cpp +++ b/docs/template_plugin/src/transformations/preprocessing/std_scale.cpp @@ -2,12 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "transformations/preprocessing/std_scale.hpp" + #include #include #include -#include "transformations/preprocessing/std_scale.hpp" - using namespace ngraph; NGRAPH_RTTI_DEFINITION(ngraph::pass::AddStdScale, "AddStdScale", 0); @@ -16,7 +16,7 @@ ngraph::pass::AddStdScale::AddStdScale(const ScaleMap& inputInfoMap) { // RUN_ON_FUNCTION_SCOPE(AddStdScale); auto label = ngraph::pattern::wrap_type(); - ngraph::matcher_pass_callback callback = [=] (pattern::Matcher& m) { + ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) { auto param = std::dynamic_pointer_cast(m.get_match_root()); if (!param) { return false; @@ -28,8 +28,7 @@ ngraph::pass::AddStdScale::AddStdScale(const ScaleMap& inputInfoMap) { } auto scale_const = it->second; - NGRAPH_CHECK(scale_const->get_element_type() == ngraph::element::f32, - "Scale for ", param->get_friendly_name(), " must have f32 type"); + NGRAPH_CHECK(scale_const->get_element_type() == ngraph::element::f32, "Scale for ", param->get_friendly_name(), " must have f32 type"); auto copy_param = param->clone_with_new_inputs({}); auto mul = std::make_shared(copy_param, it->second); diff --git a/docs/template_plugin/src/transformations/preprocessing/std_scale.hpp b/docs/template_plugin/src/transformations/preprocessing/std_scale.hpp index edc2838bd46259..cd809727f10ba3 100644 --- a/docs/template_plugin/src/transformations/preprocessing/std_scale.hpp +++ b/docs/template_plugin/src/transformations/preprocessing/std_scale.hpp @@ -5,10 +5,9 @@ #pragma once #include -#include - #include #include +#include #include "transformations_visibility.hpp" diff --git a/docs/template_plugin/src/transformations/template_function_transformation.cpp b/docs/template_plugin/src/transformations/template_function_transformation.cpp index 0c58de4c00cf87..410993d680b556 100644 --- a/docs/template_plugin/src/transformations/template_function_transformation.cpp +++ b/docs/template_plugin/src/transformations/template_function_transformation.cpp @@ -15,7 +15,7 @@ bool pass::MyFunctionTransformation::run_on_function(std::shared_ptrget_ordered_ops()) { + for (auto& node : f->get_ordered_ops()) { // Check that number of input and output ports are equal to 1 if (node->inputs().size() == 1 && node->outputs().size() == 1) { // Check that input and output shape a fully defined (not dynamic) and number of consumers equal to 1 @@ -28,9 +28,8 @@ bool pass::MyFunctionTransformation::run_on_function(std::shared_ptrget_type_info().name << std::endl - << "Name: " << node->get_friendly_name() << std::endl; + for (auto& node : nodes) { + std::cout << "Type: " << node->get_type_info().name << std::endl << "Name: " << node->get_friendly_name() << std::endl; } // Return false because we didn't change nGraph Function diff --git a/docs/template_plugin/src/transformations/template_function_transformation.hpp b/docs/template_plugin/src/transformations/template_function_transformation.hpp index 3cd330edce2503..ae665be6a62ff7 100644 --- a/docs/template_plugin/src/transformations/template_function_transformation.hpp +++ b/docs/template_plugin/src/transformations/template_function_transformation.hpp @@ -16,7 +16,7 @@ class MyFunctionTransformation; // ! [function_pass:template_transformation_hpp] // template_function_transformation.hpp -class ngraph::pass::MyFunctionTransformation: public ngraph::pass::FunctionPass { +class ngraph::pass::MyFunctionTransformation : public ngraph::pass::FunctionPass { public: NGRAPH_RTTI_DECLARATION; bool run_on_function(std::shared_ptr f) override; diff --git a/docs/template_plugin/src/transformations/template_pattern_transformation.cpp b/docs/template_plugin/src/transformations/template_pattern_transformation.cpp index c1a3a92fa15bb2..063f52ad736dc2 100644 --- a/docs/template_plugin/src/transformations/template_pattern_transformation.cpp +++ b/docs/template_plugin/src/transformations/template_pattern_transformation.cpp @@ -3,13 +3,14 @@ // #include "transformations/template_pattern_transformation.hpp" -#include "transformations/template_function_transformation.hpp" #include #include #include #include +#include "transformations/template_function_transformation.hpp" + using namespace ngraph; // ! [graph_rewrite:template_transformation_cpp] @@ -23,15 +24,14 @@ ngraph::pass::DecomposeDivideMatcher::DecomposeDivideMatcher() { auto div = std::make_shared(input0, input1); ngraph::matcher_pass_callback callback = [](pattern::Matcher& m) { - auto div = std::dynamic_pointer_cast (m.get_match_root()); + auto div = std::dynamic_pointer_cast(m.get_match_root()); // We can not apply this transformation in case with integer input data type if (!div || div->input(0).get_element_type().is_integral()) { return false; } // Decompose Divide into Multiply with Power operations - auto pow = std::make_shared(div->input_value(1), - opset3::Constant::create(div->get_input_element_type(1), Shape{1}, {-1})); + auto pow = std::make_shared(div->input_value(1), opset3::Constant::create(div->get_input_element_type(1), Shape {1}, {-1})); auto mul = std::make_shared(div->input_value(0), pow); @@ -67,8 +67,7 @@ ngraph::pass::ReluReluFusionMatcher::ReluReluFusionMatcher() { auto& node_to_output = m.get_pattern_value_map(); // Create new Relu operation and add register it for additional execution - auto new_relu = register_new_node( - node_to_output.at(m_relu1).get_node_shared_ptr()->input_value(0)); + auto new_relu = register_new_node(node_to_output.at(m_relu1).get_node_shared_ptr()->input_value(0)); // Copy runtime info attributes to newly created operation ngraph::copy_runtime_info(m.get_matched_nodes(), new_relu); @@ -91,60 +90,60 @@ ngraph::pass::ReluReluFusionMatcher::ReluReluFusionMatcher() { // ! [matcher_pass:relu_fusion] void run_matcher_on_node(std::shared_ptr node) { -// ! [matcher_pass:run_on_node] -if (ngraph::pass::DecomposeDivideMatcher().apply(node)) { - // successful execution (root node was replaced) -} -// ! [matcher_pass:run_on_node] + // ! [matcher_pass:run_on_node] + if (ngraph::pass::DecomposeDivideMatcher().apply(node)) { + // successful execution (root node was replaced) + } + // ! [matcher_pass:run_on_node] } void run_matcher_with_manager(std::shared_ptr f) { -// ! [matcher_pass:manager] -// Two matchers will run independently (two independent graph traversals) -// pass::Manager automatically creates GraphRewrite container for each MatcherPass -pass::Manager manager; -manager.register_pass(); -manager.register_pass(); -manager.run_passes(f); -// ! [matcher_pass:manager] + // ! [matcher_pass:manager] + // Two matchers will run independently (two independent graph traversals) + // pass::Manager automatically creates GraphRewrite container for each MatcherPass + pass::Manager manager; + manager.register_pass(); + manager.register_pass(); + manager.run_passes(f); + // ! [matcher_pass:manager] } void run_matcher_with_manager2(std::shared_ptr f) { -// ! [matcher_pass:manager2] -// Register anchor GraphRewrite pass inside manager that will execute two matchers simultaneously -pass::Manager manager; -auto anchor = manager.register_pass(); -anchor->add_matcher(); -anchor->add_matcher(); -manager.run_passes(f); -// ! [matcher_pass:manager2] + // ! [matcher_pass:manager2] + // Register anchor GraphRewrite pass inside manager that will execute two matchers simultaneously + pass::Manager manager; + auto anchor = manager.register_pass(); + anchor->add_matcher(); + anchor->add_matcher(); + manager.run_passes(f); + // ! [matcher_pass:manager2] } void run_matcher_with_manager3(std::shared_ptr f) { -// ! [matcher_pass:manager3] -pass::Manager manager; -manager.register_pass(); -// Two matchers will run independently (two independent graph traversals) -// pass::Manager automatically creates GraphRewrite container for each MatcherPass -manager.register_pass(); -manager.register_pass(); -manager.run_passes(f); -// ! [matcher_pass:manager3] + // ! [matcher_pass:manager3] + pass::Manager manager; + manager.register_pass(); + // Two matchers will run independently (two independent graph traversals) + // pass::Manager automatically creates GraphRewrite container for each MatcherPass + manager.register_pass(); + manager.register_pass(); + manager.run_passes(f); + // ! [matcher_pass:manager3] } void run_matcher_with_gr(std::shared_ptr f) { -// ! [matcher_pass:graph_rewrite] -// Two matcher passes will run simultaneously in a single graph traversal -ngraph::pass::GraphRewrite pass; -pass.add_matcher(); -pass.add_matcher(); -pass.run_on_function(f); -// ! [matcher_pass:graph_rewrite] + // ! [matcher_pass:graph_rewrite] + // Two matcher passes will run simultaneously in a single graph traversal + ngraph::pass::GraphRewrite pass; + pass.add_matcher(); + pass.add_matcher(); + pass.run_on_function(f); + // ! [matcher_pass:graph_rewrite] } // ! [manual_constant_folding] template -Output eltwise_fold(const Output & input0, const Output & input1) { +Output eltwise_fold(const Output& input0, const Output& input1) { auto eltwise = std::make_shared(input0, input1); OutputVector output(eltwise->get_output_size()); // If constant folding wasn't successful return eltwise output diff --git a/docs/template_plugin/src/transformations/template_pattern_transformation.hpp b/docs/template_plugin/src/transformations/template_pattern_transformation.hpp index f2b8d400988db9..f4628afdc3f2fe 100644 --- a/docs/template_plugin/src/transformations/template_pattern_transformation.hpp +++ b/docs/template_plugin/src/transformations/template_pattern_transformation.hpp @@ -21,14 +21,14 @@ class ReluReluFusionMatcher; * @ingroup ie_transformation_common_api * @brief Add transformation description. */ -class ngraph::pass::DecomposeDivideMatcher: public ngraph::pass::MatcherPass { +class ngraph::pass::DecomposeDivideMatcher : public ngraph::pass::MatcherPass { public: NGRAPH_RTTI_DECLARATION; DecomposeDivideMatcher(); }; // ! [graph_rewrite:template_transformation_hpp] -class ngraph::pass::ReluReluFusionMatcher: public ngraph::pass::MatcherPass { +class ngraph::pass::ReluReluFusionMatcher : public ngraph::pass::MatcherPass { public: NGRAPH_RTTI_DECLARATION; ReluReluFusionMatcher(); diff --git a/docs/template_plugin/tests/functional/CMakeLists.txt b/docs/template_plugin/tests/functional/CMakeLists.txt index a2962cea0ae2c6..96ab3fdcbe41a7 100644 --- a/docs/template_plugin/tests/functional/CMakeLists.txt +++ b/docs/template_plugin/tests/functional/CMakeLists.txt @@ -14,7 +14,7 @@ addIeTargetTest( IE::funcSharedTests INCLUDES "${IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR}/include" - ADD_CPPLINT + ADD_CLANG_FORMAT LABELS TEMPLATE ) diff --git a/docs/template_plugin/tests/functional/core_config.cpp b/docs/template_plugin/tests/functional/core_config.cpp index e75091f571fa70..6c70d61d35a113 100644 --- a/docs/template_plugin/tests/functional/core_config.cpp +++ b/docs/template_plugin/tests/functional/core_config.cpp @@ -4,5 +4,4 @@ #include "functional_test_utils/core_config.hpp" -void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) { -} +void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {} diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/caching_tests.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/caching_tests.cpp index f61e4c54d7ec81..547c073ddbc8c0 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/caching_tests.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/caching_tests.cpp @@ -7,19 +7,14 @@ using namespace LayerTestsDefinitions; namespace { - static const std::vector precisionsTemplate = { - ngraph::element::f32, - }; +static const std::vector precisionsTemplate = { + ngraph::element::f32, +}; - static const std::vector batchSizesTemplate = { - 1, 2 - }; +static const std::vector batchSizesTemplate = {1, 2}; - INSTANTIATE_TEST_CASE_P(smoke_CachingSupportCase_Template, LoadNetworkCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(LoadNetworkCacheTestBase::getStandardFunctions()), - ::testing::ValuesIn(precisionsTemplate), - ::testing::ValuesIn(batchSizesTemplate), - ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)), - LoadNetworkCacheTestBase::getTestCaseName); -} // namespace +INSTANTIATE_TEST_CASE_P(smoke_CachingSupportCase_Template, LoadNetworkCacheTestBase, + ::testing::Combine(::testing::ValuesIn(LoadNetworkCacheTestBase::getStandardFunctions()), ::testing::ValuesIn(precisionsTemplate), + ::testing::ValuesIn(batchSizesTemplate), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)), + LoadNetworkCacheTestBase::getTestCaseName); +} // namespace diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/config.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/config.cpp index e10f8d64c688af..3a832a5dd040be 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/config.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/config.cpp @@ -2,19 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "multi-device/multi_device_config.hpp" - #include "behavior/config.hpp" + #include