Skip to content

Commit

Permalink
Rework model loading in FE manager, implement PDPD probing (openvinot…
Browse files Browse the repository at this point in the history
…oolkit#6358)

* Rework model loading in FE manager, implement PDPD probing

* Fix build

* Fix build

* Fix build

* Fix unicode

* Fix merge issues

* Fix codestyle

* Read frontends path from frontend_manager library location

* Fix codestyle

* Fix FE dependency

* Fix dependencies

* Fix codestyle

* Check if model file exists

* Revert adding model to lfs

* Add test model

* Fix cmake dependencies

* Apply review feedback

* Revert pugixml

* make getFrontendLibraryPath not public API

* Fix codestyle

* Apply fix from Ilya Lavrenov

* Add FE dependency in legacy tests

* Remove not needed dependency

* Better support Unicode

* Fix build

* Fix build

* Fix build

* Add dependency foe deprecated tests

* Fix dependency

* Fix typo

* Revert adding FE dependency to IESharedTests

* Remove relative paths from frontend unit tests

* Apply review feedback

* Fix typo

* Return allow-undefined, since kmb dependecies fail to link

* Fix merge conflict

* Compare functions in reader tests

* Simplify code to load from variants

* Remove supported_by_arguments from public api

* Fix codestyle

* Fix build

* Compare names in reader tests

* Fix wchar in variant

Co-authored-by: Ilya Churaev <[email protected]>
  • Loading branch information
2 people authored and rnugmanx committed Aug 26, 2021
1 parent 7488e68 commit 3f186d7
Show file tree
Hide file tree
Showing 48 changed files with 727 additions and 390 deletions.
4 changes: 4 additions & 0 deletions cmake/templates/InferenceEngineConfig.cmake.in
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,10 @@ function(_ie_target_no_deprecation_error)
else()
set(flags "-Wno-error=deprecated-declarations")
endif()
if(CMAKE_CROSSCOMPILING)
set_target_properties(${ARGV} PROPERTIES
INTERFACE_LINK_OPTIONS "-Wl,--allow-shlib-undefined")
endif()

set_target_properties(${ARGV} PROPERTIES INTERFACE_COMPILE_OPTIONS ${flags})
endif()
Expand Down
5 changes: 3 additions & 2 deletions inference-engine/src/inference_engine/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ target_compile_definitions(${TARGET_NAME}_obj PRIVATE IMPLEMENT_INFERENCE_ENGINE

target_include_directories(${TARGET_NAME}_obj SYSTEM PRIVATE $<TARGET_PROPERTY:ngraph::ngraph,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:pugixml::static,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:ngraph::frontend_manager,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:xbyak,INTERFACE_INCLUDE_DIRECTORIES>)

target_include_directories(${TARGET_NAME}_obj PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}"
Expand Down Expand Up @@ -160,7 +161,7 @@ if (TBBBIND_2_4_FOUND)
endif()

target_link_libraries(${TARGET_NAME} PRIVATE pugixml::static openvino::itt ${CMAKE_DL_LIBS} Threads::Threads
ngraph inference_engine_transformations)
ngraph ngraph::frontend_manager inference_engine_transformations)

target_include_directories(${TARGET_NAME} INTERFACE
$<BUILD_INTERFACE:${PUBLIC_HEADERS_DIR}>
Expand Down Expand Up @@ -200,7 +201,7 @@ if(WIN32)
set_target_properties(${TARGET_NAME}_s PROPERTIES COMPILE_PDB_NAME ${TARGET_NAME}_s)
endif()

target_link_libraries(${TARGET_NAME}_s PRIVATE openvino::itt ${CMAKE_DL_LIBS} ngraph
target_link_libraries(${TARGET_NAME}_s PRIVATE openvino::itt ${CMAKE_DL_LIBS} ngraph ngraph::frontend_manager
inference_engine_transformations pugixml::static)

target_compile_definitions(${TARGET_NAME}_s PUBLIC USE_STATIC_IE)
Expand Down
23 changes: 22 additions & 1 deletion inference-engine/src/inference_engine/ie_network_reader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <file_utils.h>
#include <ie_reader.hpp>
#include <ie_ir_version.hpp>
#include <frontend_manager/frontend_manager.hpp>

#include <fstream>
#include <istream>
Expand Down Expand Up @@ -226,6 +227,26 @@ CNNNetwork details::ReadNetwork(const std::string& modelPath, const std::string&
return reader->read(modelStream, exts);
}
}
// Try to load with FrontEndManager
static ngraph::frontend::FrontEndManager manager;
ngraph::frontend::FrontEnd::Ptr FE;
ngraph::frontend::InputModel::Ptr inputModel;
if (!binPath.empty()) {
#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
std::wstring weights_path = FileUtils::multiByteCharToWString(binPath.c_str());
#else
std::string weights_path = binPath;
#endif
FE = manager.load_by_model(model_path, weights_path);
if (FE) inputModel = FE->load(model_path, weights_path);
} else {
FE = manager.load_by_model(model_path);
if (FE) inputModel = FE->load(model_path);
}
if (inputModel) {
auto ngFunc = FE->convert(inputModel);
return CNNNetwork(ngFunc);
}
IE_THROW() << "Unknown model format! Cannot find reader for model format: " << fileExt << " and read the model: " << modelPath <<
". Please check that reader library exists in your PATH.";
}
Expand All @@ -248,4 +269,4 @@ CNNNetwork details::ReadNetwork(const std::string& model, const Blob::CPtr& weig
IE_THROW() << "Unknown model format! Cannot find reader for the model and read it. Please check that reader library exists in your PATH.";
}

} // namespace InferenceEngine
} // namespace InferenceEngine
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,11 @@ if(NGRAPH_ONNX_IMPORT_ENABLE)
add_dependencies(${TARGET_NAME} inference_engine_onnx_reader)
endif()

if(NGRAPH_PDPD_FRONTEND_ENABLE)
target_compile_definitions(${TARGET_NAME} PRIVATE
PDPD_TEST_MODELS="${CMAKE_CURRENT_SOURCE_DIR}/pdpd_reader/models/")
endif()

ie_faster_build(${TARGET_NAME}
PCH PRIVATE "precomp.hpp"
)
Expand Down
Binary file not shown.
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <gtest/gtest.h>
#include <set>
#include <string>
#include <fstream>

#include <ie_blob.h>
#include <ie_core.hpp>
#include <file_utils.h>
#include <ngraph/ngraph.hpp>
#include <ngraph/opsets/opset8.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"

TEST(PDPD_Reader_Tests, ImportBasicModelToCore) {
auto model = std::string(PDPD_TEST_MODELS) + "relu.pdmodel";
InferenceEngine::Core ie;
auto cnnNetwork = ie.ReadNetwork(model);
auto function = cnnNetwork.getFunction();

const auto inputType = ngraph::element::f32;
const auto inputShape = ngraph::Shape{ 3 };

const auto data = std::make_shared<ngraph::opset8::Parameter>(inputType, inputShape);
data->set_friendly_name("x");
data->output(0).get_tensor().add_names({ "x" });
const auto relu = std::make_shared<ngraph::opset8::Relu>(data->output(0));
relu->set_friendly_name("relu_0.tmp_0");
relu->output(0).get_tensor().add_names({ "relu_0.tmp_0" });
const auto scale = std::make_shared<ngraph::opset8::Constant>(ngraph::element::f32, ngraph::Shape{ 1 }, std::vector<float>{1});
const auto bias = std::make_shared<ngraph::opset8::Constant>(ngraph::element::f32, ngraph::Shape{ 1 }, std::vector<float>{0});
const auto node_multiply = std::make_shared<ngraph::opset8::Multiply>(relu->output(0), scale);
const auto node_add = std::make_shared<ngraph::opset8::Add>(node_multiply, bias);
node_add->set_friendly_name("save_infer_model/scale_0.tmp_1");
node_add->output(0).get_tensor().add_names({ "save_infer_model/scale_0.tmp_1" });
const auto result = std::make_shared<ngraph::opset8::Result>(node_add->output(0));
result->set_friendly_name("save_infer_model/scale_0.tmp_1/Result");
const auto reference = std::make_shared<ngraph::Function>(
ngraph::NodeVector{ result },
ngraph::ParameterVector{ data },
"RefPDPDFunction");
const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::NAMES);
const FunctionsComparator::Result res = func_comparator(function, reference);
ASSERT_TRUE(res.valid);
}

#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
TEST(PDPD_Reader_Tests, ImportBasicModelToCoreWstring) {
std::string win_dir_path{ PDPD_TEST_MODELS };
std::replace(win_dir_path.begin(), win_dir_path.end(), '/', '\\');
const std::wstring unicode_win_dir_path = FileUtils::multiByteCharToWString(win_dir_path.c_str());
auto model = unicode_win_dir_path + L"ひらがな日本語.pdmodel";
InferenceEngine::Core ie;
auto cnnNetwork = ie.ReadNetwork(model);
auto function = cnnNetwork.getFunction();

const auto inputType = ngraph::element::f32;
const auto inputShape = ngraph::Shape{ 3 };

const auto data = std::make_shared<ngraph::opset8::Parameter>(inputType, inputShape);
data->set_friendly_name("x");
data->output(0).get_tensor().add_names({ "x" });
const auto relu = std::make_shared<ngraph::opset8::Relu>(data->output(0));
relu->set_friendly_name("relu_0.tmp_0");
relu->output(0).get_tensor().add_names({ "relu_0.tmp_0" });
const auto scale = std::make_shared<ngraph::opset8::Constant>(ngraph::element::f32, ngraph::Shape{ 1 }, std::vector<float>{1});
const auto bias = std::make_shared<ngraph::opset8::Constant>(ngraph::element::f32, ngraph::Shape{ 1 }, std::vector<float>{0});
const auto node_multiply = std::make_shared<ngraph::opset8::Multiply>(relu->output(0), scale);
const auto node_add = std::make_shared<ngraph::opset8::Add>(node_multiply, bias);
node_add->set_friendly_name("save_infer_model/scale_0.tmp_1");
node_add->output(0).get_tensor().add_names({ "save_infer_model/scale_0.tmp_1" });
const auto result = std::make_shared<ngraph::opset8::Result>(node_add->output(0));
result->set_friendly_name("save_infer_model/scale_0.tmp_1/Result");
const auto reference = std::make_shared<ngraph::Function>(
ngraph::NodeVector{ result },
ngraph::ParameterVector{ data },
"RefPDPDFunction");
const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::NAMES);
const FunctionsComparator::Result res = func_comparator(function, reference);
ASSERT_TRUE(res.valid);
}
#endif
2 changes: 1 addition & 1 deletion model-optimizer/mo/moc_frontend/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def moc_pipeline(argv: argparse.Namespace):
str(fem.get_available_front_ends())))
log.debug('Initializing new FE for framework {}'.format(argv.framework))
fe = fem.load_by_framework(argv.framework)
input_model = fe.load_from_file(argv.input_model)
input_model = fe.load(argv.input_model)

user_shapes, outputs, freeze_placeholder = fe_user_data_repack(
input_model, argv.placeholder_shapes, argv.placeholder_data_types,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ extern "C" MOCK_API void* GetFrontEndData()
{
FrontEndPluginInfo* res = new FrontEndPluginInfo();
res->m_name = "mock_mo_ngraph_frontend";
res->m_creator = [](FrontEndCapFlags flags) { return std::make_shared<FrontEndMockPy>(flags); };
res->m_creator = []() { return std::make_shared<FrontEndMockPy>(); };

return res;
}
Original file line number Diff line number Diff line change
Expand Up @@ -292,11 +292,9 @@ class MOCK_API InputModelMockPy : public InputModel
/// was called with correct arguments during test execution
struct MOCK_API FeStat
{
FrontEndCapFlags m_load_flags;
std::vector<std::string> m_load_paths;
int m_convert_model = 0;
// Getters
FrontEndCapFlags load_flags() const { return m_load_flags; }
std::vector<std::string> load_paths() const { return m_load_paths; }
int convert_model() const { return m_convert_model; }
};
Expand All @@ -309,13 +307,8 @@ class MOCK_API FrontEndMockPy : public FrontEnd
static FeStat m_stat;

public:
FrontEndMockPy(FrontEndCapFlags flags) { m_stat.m_load_flags = flags; }
FrontEndMockPy() {}

InputModel::Ptr load_from_file(const std::string& path) const override
{
m_stat.m_load_paths.push_back(path);
return std::make_shared<InputModelMockPy>();
}

std::shared_ptr<ngraph::Function> convert(InputModel::Ptr model) const override
{
Expand All @@ -326,4 +319,15 @@ class MOCK_API FrontEndMockPy : public FrontEnd
static FeStat get_stat() { return m_stat; }

static void clear_stat() { m_stat = {}; }

protected:
InputModel::Ptr load_impl(const std::vector<std::shared_ptr<Variant>>& params) const override
{
if (params.size() > 0 && is_type<VariantWrapper<std::string>>(params[0]))
{
auto path = as_type_ptr<VariantWrapper<std::string>>(params[0])->get();
m_stat.m_load_paths.push_back(path);
}
return std::make_shared<InputModelMockPy>();
}
};
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ static void register_mock_frontend_stat(py::module m)
m.def("clear_frontend_statistic", &FrontEndMockPy::clear_stat);

py::class_<FeStat> feStat(m, "FeStat", py::dynamic_attr());
feStat.def_property_readonly("load_flags", &FeStat::load_flags);
feStat.def_property_readonly("load_paths", &FeStat::load_paths);
feStat.def_property_readonly("convert_model", &FeStat::convert_model);
}
Expand Down
23 changes: 23 additions & 0 deletions ngraph/core/include/ngraph/variant.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,4 +75,27 @@ namespace ngraph
{
}
};

template <typename T>
inline std::shared_ptr<Variant> make_variant(const T& p)
{
return std::dynamic_pointer_cast<VariantImpl<T>>(std::make_shared<VariantWrapper<T>>(p));
}

template <size_t N>
inline std::shared_ptr<Variant> make_variant(const char (&s)[N])
{
return std::dynamic_pointer_cast<VariantImpl<std::string>>(
std::make_shared<VariantWrapper<std::string>>(s));
}

#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
template <size_t N>
inline std::shared_ptr<Variant> make_variant(const wchar_t (&s)[N])
{
return std::dynamic_pointer_cast<VariantImpl<std::wstring>>(
std::make_shared<VariantWrapper<std::wstring>>(s));
}
#endif

} // namespace ngraph
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include "frontend_manager_defs.hpp"
#include "input_model.hpp"
#include "ngraph/function.hpp"
#include "ngraph/variant.hpp"

namespace ngraph
{
Expand All @@ -26,43 +27,31 @@ namespace ngraph

virtual ~FrontEnd();

/// \brief Loads an input model by specified model file path
/// If model is stored in several files (e.g. model topology and model weights) -
/// frontend implementation is responsible to handle this case, generally frontend may
/// retrieve other file names from main file
/// \param path Main model file path
/// \return Loaded input model
virtual InputModel::Ptr load_from_file(const std::string& path) const;

/// \brief Loads an input model by specified number of model files
/// This shall be used for cases when client knows all model files (model, weights, etc)
/// \param paths Array of model files
/// \return Loaded input model
virtual InputModel::Ptr load_from_files(const std::vector<std::string>& paths) const;

/// \brief Loads an input model by already loaded memory buffer
/// Memory structure is frontend-defined and is not specified in generic API
/// \param model Model memory buffer
/// \return Loaded input model
virtual InputModel::Ptr load_from_memory(const void* model) const;

/// \brief Loads an input model from set of memory buffers
/// Memory structure is frontend-defined and is not specified in generic API
/// \param modelParts Array of model memory buffers
/// \return Loaded input model
virtual InputModel::Ptr
load_from_memory_fragments(const std::vector<const void*>& modelParts) const;

/// \brief Loads an input model by input stream representing main model file
/// \param stream Input stream of main model
/// \return Loaded input model
virtual InputModel::Ptr load_from_stream(std::istream& stream) const;

/// \brief Loads an input model by input streams representing all model files
/// \param streams Array of input streams for model
/// \return Loaded input model
virtual InputModel::Ptr
load_from_streams(const std::vector<std::istream*>& streams) const;
/// \brief Validates if FrontEnd can recognize model with parameters specified.
/// Same parameters should be used to load model.
/// \param vars Any number of parameters of any type. What kind of parameters
/// are accepted is determined by each FrontEnd individually, typically it is
/// std::string containing path to the model file. For more information please
/// refer to specific FrontEnd documentation.
/// \return true if model recognized, false - otherwise.
template <typename... Types>
inline bool supported(const Types&... vars) const
{
return supported_impl({make_variant(vars)...});
}

/// \brief Loads an input model by any specified arguments. Each FrontEnd separately
/// defines what arguments it can accept.
/// \param vars Any number of parameters of any type. What kind of parameters
/// are accepted is determined by each FrontEnd individually, typically it is
/// std::string containing path to the model file. For more information please
/// refer to specific FrontEnd documentation.
/// \return Loaded input model.
template <typename... Types>
inline InputModel::Ptr load(const Types&... vars) const
{
return load_impl({make_variant(vars)...});
}

/// \brief Completely convert and normalize entire function, throws if it is not
/// possible
Expand Down Expand Up @@ -95,8 +84,20 @@ namespace ngraph
/// \brief Runs normalization passes on function that was loaded with partial conversion
/// \param function partially converted nGraph function
virtual void normalize(std::shared_ptr<ngraph::Function> function) const;

protected:
virtual bool
supported_impl(const std::vector<std::shared_ptr<Variant>>& variants) const;
virtual InputModel::Ptr
load_impl(const std::vector<std::shared_ptr<Variant>>& variants) const;
};

template <>
inline bool FrontEnd::supported(const std::vector<std::shared_ptr<Variant>>& variants) const
{
return supported_impl(variants);
}

} // namespace frontend

} // namespace ngraph
Loading

0 comments on commit 3f186d7

Please sign in to comment.