Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement reading pdpd model in ReadNetwork #51

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
Expand Up @@ -69,4 +69,6 @@
*.jpg filter=lfs diff=lfs merge=lfs -text
*.gif filter=lfs diff=lfs merge=lfs -text
*.vsdx filter=lfs diff=lfs merge=lfs -text
*.pdmodel filter=lfs diff=lfs merge=lfs -text
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What is the size of pdpd models?

*.pdiparams filter=lfs diff=lfs merge=lfs -text
ngraph/test/files/paddlepaddle/models/ filter=lfs diff=lfs merge=lfs -text
7 changes: 0 additions & 7 deletions inference-engine/samples/benchmark_app/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -362,13 +362,6 @@ int main(int argc, char* argv[]) {

auto startTime = Time::now();
CNNNetwork cnnNetwork = ie.ReadNetwork(FLAGS_m);
// ngraph::frontend::FrontEndManager manager;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I propose to remove all redundant comments from benchmark app.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't see any more redundant comments in benchmark_app

// auto FE = manager.loadByFramework("pdpd");
// auto inputModel = FE->loadFromFile(FLAGS_m);
// //inputModel->setPartialShape(inputModel->getInputs()[0], ngraph::PartialShape({1, 224, 224, 3}));
// auto ngFunc = FE->convert(inputModel);
// CNNNetwork cnnNetwork(ngFunc);
// cnnNetwork.serialize("benchmark_app_loaded_network.xml");

auto duration_ms = double_to_string(get_total_ms_time(startTime));
slog::info << "Read network took " << duration_ms << " ms" << slog::endl;
Expand Down
23 changes: 22 additions & 1 deletion inference-engine/src/inference_engine/ie_network_reader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <file_utils.h>
#include <ie_reader.hpp>
#include <ie_ir_version.hpp>
#include <frontend_manager/frontend_manager.hpp>

#include <fstream>
#include <istream>
Expand Down Expand Up @@ -226,6 +227,26 @@ CNNNetwork details::ReadNetwork(const std::string& modelPath, const std::string&
return reader->read(modelStream, exts);
}
}
// Try to load with FrontEndManager
static ngraph::frontend::FrontEndManager manager;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do you need static manager?

ngraph::frontend::FrontEnd::Ptr FE;
ngraph::frontend::InputModel::Ptr inputModel;
if (!binPath.empty()) {
#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
std::wstring weights_path = FileUtils::multiByteCharToWString(binPath.c_str());
#else
std::string weights_path = binPath;
#endif
FE = manager.load_by_model(model_path, weights_path);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In this line do we load FE from the library?
Do we load FE only for the first instantiation?

if (FE) inputModel = FE->load(model_path, weights_path);
} else {
FE = manager.load_by_model(model_path);
if (FE) inputModel = FE->load(model_path);
}
if (inputModel) {
auto ngFunc = FE->convert(inputModel);
return CNNNetwork(ngFunc);
}
IE_THROW() << "Unknown model format! Cannot find reader for model format: " << fileExt << " and read the model: " << modelPath <<
". Please check that reader library exists in your PATH.";
}
Expand All @@ -248,4 +269,4 @@ CNNNetwork details::ReadNetwork(const std::string& model, const Blob::CPtr& weig
IE_THROW() << "Unknown model format! Cannot find reader for the model and read it. Please check that reader library exists in your PATH.";
}

} // namespace InferenceEngine
} // namespace InferenceEngine
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,11 @@ if(NGRAPH_ONNX_IMPORT_ENABLE)
add_dependencies(${TARGET_NAME} inference_engine_onnx_reader)
endif()

if(NGRAPH_PDPD_FRONTEND_ENABLE)
target_compile_definitions(${TARGET_NAME} PRIVATE
PDPD_TEST_MODELS="${CMAKE_CURRENT_SOURCE_DIR}/pdpd_reader/models/")
endif()

ie_faster_build(${TARGET_NAME}
PCH PRIVATE "precomp.hpp"
)
Expand Down
Git LFS file not shown
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <gtest/gtest.h>
#include <set>
#include <string>
#include <fstream>

#include <ie_blob.h>
#include <ie_core.hpp>
#include <ngraph/ngraph.hpp>

TEST(PDPD_Reader_Tests, ImportBasicModelToCore) {
auto model = std::string(PDPD_TEST_MODELS) + "relu.pdmodel";
InferenceEngine::Core ie;
auto cnnNetwork = ie.ReadNetwork(model);
auto function = cnnNetwork.getFunction();

int count_relus = 0;
int count_constants = 0;
int count_parameters = 0;

for (auto op : function->get_ops()) {
const auto op_type = std::string(op->get_type_name());
count_relus += (op_type == "Relu" ? 1 : 0);
count_constants += (op_type == "Constant" ? 1 : 0);
count_parameters += (op_type == "Parameter" ? 1 : 0);
}

ASSERT_EQ(function->get_output_size(), 1);
ASSERT_EQ(std::string(function->get_output_op(0)->get_type_name()), "Result");
ASSERT_EQ(function->get_output_element_type(0), ngraph::element::f32);
ASSERT_EQ(function->get_output_shape(0), ngraph::Shape({ 3 }));
ASSERT_EQ(count_relus, 1);
ASSERT_EQ(count_constants, 6);
ASSERT_EQ(count_parameters, 1);
}

2 changes: 1 addition & 1 deletion model-optimizer/mo/front_ng/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def moc_pipeline(argv: argparse.Namespace):
log.info(f'fem.availableFrontEnds: {str(fem.get_available_front_ends())}')
log.info(f'Initializing new FE for framework {argv.framework}')
fe = fem.load_by_framework(argv.framework)
inputModel = fe.load_from_file(argv.input_model)
inputModel = fe.load(argv.input_model)

user_shapes, outputs, freeze_placeholder = fe_user_data_repack(
inputModel, argv.placeholder_shapes, argv.placeholder_data_types,
Expand Down
14 changes: 14 additions & 0 deletions ngraph/core/include/ngraph/variant.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,4 +74,18 @@ namespace ngraph
{
}
};

template <typename T>
inline std::shared_ptr<Variant> make_variant(const T& p)
mvafin marked this conversation as resolved.
Show resolved Hide resolved
{
return std::dynamic_pointer_cast<VariantImpl<T>>(std::make_shared<VariantWrapper<T>>(p));
}

template <size_t N>
inline std::shared_ptr<Variant> make_variant(const char (&s)[N])
{
return std::dynamic_pointer_cast<VariantImpl<std::string>>(
std::make_shared<VariantWrapper<std::string>>(s));
}

} // namespace ngraph
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include "frontend_manager_defs.hpp"
#include "input_model.hpp"
#include "ngraph/function.hpp"
#include "ngraph/variant.hpp"

namespace ngraph
{
Expand All @@ -26,43 +27,34 @@ namespace ngraph

virtual ~FrontEnd();

/// \brief Loads an input model by specified model file path
/// If model is stored in several files (e.g. model topology and model weights) -
/// frontend implementation is responsible to handle this case, generally frontend may
/// retrieve other file names from main file
/// \param path Main model file path
/// \return Loaded input model
virtual InputModel::Ptr load_from_file(const std::string& path) const;

/// \brief Loads an input model by specified number of model files
/// This shall be used for cases when client knows all model files (model, weights, etc)
/// \param paths Array of model files
/// \return Loaded input model
virtual InputModel::Ptr load_from_files(const std::vector<std::string>& paths) const;

/// \brief Loads an input model by already loaded memory buffer
/// Memory structure is frontend-defined and is not specified in generic API
/// \param model Model memory buffer
/// \return Loaded input model
virtual InputModel::Ptr load_from_memory(const void* model) const;

/// \brief Loads an input model from set of memory buffers
/// Memory structure is frontend-defined and is not specified in generic API
/// \param modelParts Array of model memory buffers
/// \return Loaded input model
virtual InputModel::Ptr
load_from_memory_fragments(const std::vector<const void*>& modelParts) const;

/// \brief Loads an input model by input stream representing main model file
/// \param stream Input stream of main model
/// \return Loaded input model
virtual InputModel::Ptr load_from_stream(std::istream& stream) const;

/// \brief Loads an input model by input streams representing all model files
/// \param streams Array of input streams for model
/// \return Loaded input model
virtual InputModel::Ptr
load_from_streams(const std::vector<std::istream*>& streams) const;
/// \brief Validates if FrontEnd can recognize model with parameters specified.
/// Same parameters should be used to load model.
mvafin marked this conversation as resolved.
Show resolved Hide resolved
/// \param vars Any number of parameters of any type. What kind of parameters
/// are accepted is determined by each FrontEnd individually, typically it is
/// std::string containing path to the model file. For more information please
/// refer to specific FrontEnd documentation.
/// \return true if model recognized, false - otherwise.
template <typename... Types>
bool supported(const Types&... vars) const
mvafin marked this conversation as resolved.
Show resolved Hide resolved
{
return supported_by_variants({make_variant(vars)...});
}

virtual bool
supported_by_variants(const std::vector<std::shared_ptr<Variant>>& variants) const;

/// \brief Loads an input model by any specified arguments. Each FrontEnd separately
/// defines what arguments it can accept.
/// \param vars Any number of parameters of any type. What kind of parameters
/// are accepted is determined by each FrontEnd individually, typically it is
/// std::string containing path to the model file. For more information please
/// refer to specific FrontEnd documentation.
/// \return Loaded input model.
template <typename... Types>
InputModel::Ptr load(const Types&... vars) const
{
return load_impl({make_variant(vars)...});
}

/// \brief Completely convert and normalize entire function, throws if it is not
/// possible
Expand Down Expand Up @@ -95,6 +87,10 @@ namespace ngraph
/// \brief Runs normalization passes on function that was loaded with partial conversion
/// \param function partially converted nGraph function
virtual void normalize(std::shared_ptr<ngraph::Function> function) const;

protected:
virtual InputModel::Ptr
load_impl(const std::vector<std::shared_ptr<Variant>>& variants) const;
};

} // namespace frontend
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,36 +8,14 @@
#include <string>
#include "frontend.hpp"
#include "frontend_manager_defs.hpp"
#include "ngraph/variant.hpp"

namespace ngraph
{
namespace frontend
{
/// Capabilities for requested FrontEnd
/// In general, frontend implementation may be divided into several libraries by capability
/// level It will allow faster load of frontend when only limited usage is expected by
/// client application as well as binary size can be minimized by removing not needed parts
/// from application's package
namespace FrontEndCapabilities
{
/// \brief Just reading and conversion, w/o any modifications; intended to be used in
/// Reader
static const int FEC_DEFAULT = 0;

/// \brief Topology cutting capability
static const int FEC_CUT = 1;

/// \brief Query entities by names, renaming and adding new names for operations and
/// tensors
static const int FEC_NAMES = 2;

/// \brief Partial model conversion and decoding capability
static const int FEC_WILDCARDS = 4;
}; // namespace FrontEndCapabilities

// -------------- FrontEndManager -----------------
using FrontEndCapFlags = int;
using FrontEndFactory = std::function<FrontEnd::Ptr(FrontEndCapFlags fec)>;
using FrontEndFactory = std::function<FrontEnd::Ptr()>;

/// \brief Frontend management class, loads available frontend plugins on construction
/// Allows load of frontends for particular framework, register new and list available
Expand All @@ -62,26 +40,22 @@ namespace ngraph
/// \param framework Framework name. Throws exception if name is not in list of
/// available frontends
///
/// \param fec Frontend capabilities. It is recommended to use only
/// those capabilities which are needed to minimize load time
///
/// \return Frontend interface for further loading of models
FrontEnd::Ptr
load_by_framework(const std::string& framework,
FrontEndCapFlags fec = FrontEndCapabilities::FEC_DEFAULT);
FrontEnd::Ptr load_by_framework(const std::string& framework);

/// \brief Loads frontend by model file path. Selects and loads appropriate frontend
/// depending on model file extension and other file info (header)
/// \brief Loads frontend by model fragments described by each FrontEnd documentation.
/// Selects and loads appropriate frontend depending on model file extension and other
/// file info (header)
///
/// \param framework
/// Framework name. Throws exception if name is not in list of available frontends
///
/// \param fec Frontend capabilities. It is recommended to use only those capabilities
/// which are needed to minimize load time
///
/// \return Frontend interface for further loading of model
FrontEnd::Ptr load_by_model(const std::string& path,
FrontEndCapFlags fec = FrontEndCapabilities::FEC_DEFAULT);
template <typename... Types>
mvafin marked this conversation as resolved.
Show resolved Hide resolved
FrontEnd::Ptr load_by_model(const Types&... vars)
{
return load_by_variants({make_variant(vars)...});
}

/// \brief Gets list of registered frontends
std::vector<std::string> get_available_front_ends() const;
Expand All @@ -97,6 +71,8 @@ namespace ngraph
private:
class Impl;

FrontEnd::Ptr load_by_variants(const std::vector<std::shared_ptr<Variant>>& variants);

std::unique_ptr<Impl> m_impl;
};

Expand All @@ -119,4 +95,31 @@ namespace ngraph

} // namespace frontend

template <>
class NGRAPH_API VariantWrapper<std::shared_ptr<std::istream>>
: public VariantImpl<std::shared_ptr<std::istream>>
{
public:
static constexpr VariantTypeInfo type_info{"Variant::std::shared_ptr<std::istream>", 0};
slyalin marked this conversation as resolved.
Show resolved Hide resolved
const VariantTypeInfo& get_type_info() const override { return type_info; }
VariantWrapper(const value_type& value)
: VariantImpl<value_type>(value)
{
}
};

#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
template <>
class NGRAPH_API VariantWrapper<std::wstring> : public VariantImpl<std::wstring>
{
public:
static constexpr VariantTypeInfo type_info{"Variant::std::wstring", 0};
const VariantTypeInfo& get_type_info() const override { return type_info; }
VariantWrapper(const value_type& value)
: VariantImpl<value_type>(value)
{
}
};
#endif

} // namespace ngraph
Loading