Skip to content

Commit

Permalink
Port some changes from proxy branch (openvinotoolkit#17961)
Browse files Browse the repository at this point in the history
* Port some changes from proxy branch

* Port test changes

* Rewrite approach for compile model and tensor

* Fixed review
  • Loading branch information
ilyachur authored and mryzhov committed Jun 29, 2023
1 parent 6742f6e commit bd93876
Show file tree
Hide file tree
Showing 11 changed files with 77 additions and 32 deletions.
8 changes: 8 additions & 0 deletions src/core/include/openvino/runtime/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,14 @@ class OPENVINO_API Tensor {
/// @brief Default constructor
Tensor() = default;

/**
* @brief Copy constructor with adding new shared object
*
* @param other Original tensor
* @param so Shared object
*/
Tensor(const Tensor& other, const std::shared_ptr<void>& so);

/// @brief Default copy constructor
/// @param other other Tensor object
Tensor(const Tensor& other) = default;
Expand Down
5 changes: 5 additions & 0 deletions src/core/src/runtime/ov_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,11 @@ Tensor::~Tensor() {
_impl = {};
}

Tensor::Tensor(const Tensor& tensor, const std::shared_ptr<void>& so) : _impl{tensor._impl}, _so{tensor._so} {
OPENVINO_ASSERT(_impl != nullptr, "Tensor was not initialized.");
_so.emplace_back(so);
}

Tensor::Tensor(const std::shared_ptr<ITensor>& impl, const std::vector<std::shared_ptr<void>>& so)
: _impl{impl},
_so{so} {
Expand Down
6 changes: 4 additions & 2 deletions src/inference/dev_api/openvino/runtime/icompiled_model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,14 +83,14 @@ class OPENVINO_RUNTIME_API ICompiledModel : public std::enable_shared_from_this<
*
* @return model outputs
*/
const std::vector<ov::Output<const ov::Node>>& outputs() const;
virtual const std::vector<ov::Output<const ov::Node>>& outputs() const;

/**
* @brief Gets all inputs from compiled model
*
* @return model inputs
*/
const std::vector<ov::Output<const ov::Node>>& inputs() const;
virtual const std::vector<ov::Output<const ov::Node>>& inputs() const;

/**
* @brief Create infer request
Expand Down Expand Up @@ -136,6 +136,8 @@ class OPENVINO_RUNTIME_API ICompiledModel : public std::enable_shared_from_this<
*/
std::shared_ptr<ov::IRemoteContext> get_context() const;

virtual ~ICompiledModel() = default;

private:
std::shared_ptr<const ov::IPlugin> m_plugin;
std::vector<ov::Output<const ov::Node>> m_inputs;
Expand Down
48 changes: 30 additions & 18 deletions src/inference/dev_api/openvino/runtime/icore.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,83 +46,95 @@ class OPENVINO_RUNTIME_API ICore {
virtual std::shared_ptr<ov::Model> read_model(const std::string& model_path, const std::string& bin_path) const = 0;

/**
* @brief Creates an executable network from a network object.
* @brief Creates a compiled mdel from a model object.
*
* Users can create as many networks as they need and use
* Users can create as many models as they need and use
* them simultaneously (up to the limitation of the hardware resources)
*
* @param model OpenVINO Model
* @param device_name Name of device to load network to
* @param device_name Name of device to load model to
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation
* @return An executable network reference
* @return A pointer to compiled model
*/
virtual ov::SoPtr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
const std::string& device_name,
const ov::AnyMap& config = {}) const = 0;

/**
* @brief Creates an executable network from a network object.
* @brief Creates a compiled model from a model object.
*
* Users can create as many networks as they need and use
* Users can create as many models as they need and use
* them simultaneously (up to the limitation of the hardware resources)
*
* @param model OpenVINO Model
* @param context "Remote" (non-CPU) accelerator device-specific execution context to use
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation
* @return An executable network reference
* @return A pointer to compiled model
*/
virtual ov::SoPtr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
const ov::RemoteContext& context,
const ov::AnyMap& config = {}) const = 0;

/**
* @brief Creates an executable network from a model file.
* @brief Creates a compiled model from a model file.
*
* Users can create as many networks as they need and use
* Users can create as many models as they need and use
* them simultaneously (up to the limitation of the hardware resources)
*
* @param model_path Path to model
* @param device_name Name of device to load network to
* @param device_name Name of device to load model to
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation
* @return An executable network reference
* @return A pointer to compiled model
*/
virtual ov::SoPtr<ov::ICompiledModel> compile_model(const std::string& model_path,
const std::string& device_name,
const ov::AnyMap& config) const = 0;

/**
* @brief Creates an executable network from a model memory.
* @brief Creates a compiled model from a model memory.
*
* Users can create as many networks as they need and use
* Users can create as many models as they need and use
* them simultaneously (up to the limitation of the hardware resources)
*
* @param model_str String data of model
* @param weights Model's weights
* @param device_name Name of device to load network to
* @param device_name Name of device to load model to
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation
* @return An executable network reference
* @return A pointer to compiled model
*/
virtual ov::SoPtr<ov::ICompiledModel> compile_model(const std::string& model_str,
const ov::Tensor& weights,
const std::string& device_name,
const ov::AnyMap& config) const = 0;

/**
* @brief Creates an executable network from a previously exported network
* @brief Creates a compiled model from a previously exported model
* @param model model stream
* @param device_name Name of device load executable network on
* @param device_name Name of device load executable model on
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation*
* @return An executable network reference
* @return A pointer to compiled model
*/
virtual ov::SoPtr<ov::ICompiledModel> import_model(std::istream& model,
const std::string& device_name,
const ov::AnyMap& config = {}) const = 0;

/**
* @brief Creates a compiled model from a previously exported model
* @param model model stream
* @param context Remote context
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation*
* @return A pointer to compiled model
*/
virtual ov::SoPtr<ov::ICompiledModel> import_model(std::istream& modelStream,
const ov::RemoteContext& context,
const ov::AnyMap& config = {}) const = 0;

/**
* @brief Query device if it supports specified network with specified configuration
*
Expand Down
4 changes: 1 addition & 3 deletions src/inference/dev_api/openvino/runtime/iplugin.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ class OPENVINO_RUNTIME_API IPlugin : public std::enable_shared_from_this<IPlugin
*/
const std::shared_ptr<ov::threading::ExecutorManager>& get_executor_manager() const;

~IPlugin() = default;
virtual ~IPlugin() = default;

protected:
IPlugin();
Expand Down Expand Up @@ -294,8 +294,6 @@ constexpr static const auto create_plugin_function = OV_PP_TOSTRING(OV_CREATE_PL
try { \
plugin = ::std::make_shared<PluginType>(__VA_ARGS__); \
plugin->set_version(version); \
} catch (const InferenceEngine::Exception& ex) { \
OPENVINO_THROW(ex.what()); \
} catch (const std::exception& ex) { \
OPENVINO_THROW(ex.what()); \
} \
Expand Down
2 changes: 2 additions & 0 deletions src/inference/dev_api/openvino/runtime/iremote_context.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ namespace ov {

class OPENVINO_RUNTIME_API IRemoteContext : public std::enable_shared_from_this<IRemoteContext> {
public:
virtual ~IRemoteContext() = default;

/**
* @brief Returns name of a device on which underlying object is allocated.
* Abstract method.
Expand Down
3 changes: 1 addition & 2 deletions src/inference/src/core.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -221,9 +221,8 @@ CompiledModel Core::import_model(std::istream& modelStream, const std::string& d
CompiledModel Core::import_model(std::istream& modelStream, const RemoteContext& context, const AnyMap& config) {
OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model");

auto parsed = parseDeviceNameIntoConfig(context.get_device_name(), config);
OV_CORE_CALL_STATEMENT({
auto exec = _impl->get_plugin(parsed._deviceName).import_model(modelStream, context, parsed._config);
auto exec = _impl->import_model(modelStream, context, config);
return {exec._ptr, exec._so};
});
}
Expand Down
16 changes: 15 additions & 1 deletion src/inference/src/dev/core_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -597,7 +597,8 @@ ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::compile_model_with_preprocess(ov::Pl
const ov::AnyMap& config) const {
std::shared_ptr<const ov::Model> preprocessed_model = model;

if (!is_new_api() && !std::dynamic_pointer_cast<InferenceEngine::IPluginWrapper>(plugin.m_ptr)) {
if (!is_new_api() && !std::dynamic_pointer_cast<InferenceEngine::IPluginWrapper>(plugin.m_ptr) &&
!is_virtual_device(plugin.get_name())) {
ov::pass::Manager manager;
manager.register_pass<ov::pass::AddPreprocessing>();

Expand Down Expand Up @@ -680,6 +681,19 @@ ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::import_model(std::istream& model,
return compiled_model;
}

ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::import_model(std::istream& modelStream,
const ov::RemoteContext& context,
const ov::AnyMap& config) const {
OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model");
auto parsed = parseDeviceNameIntoConfig(context.get_device_name(), config);
auto compiled_model = get_plugin(parsed._deviceName).import_model(modelStream, parsed._config);
if (auto wrapper = std::dynamic_pointer_cast<InferenceEngine::ICompiledModelWrapper>(compiled_model._ptr)) {
wrapper->get_executable_network()->loadedFromCache();
}

return compiled_model;
}

ov::SupportedOpsMap ov::CoreImpl::query_model(const std::shared_ptr<const ov::Model>& model,
const std::string& device_name,
const ov::AnyMap& config) const {
Expand Down
8 changes: 6 additions & 2 deletions src/inference/src/dev/core_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -219,8 +219,8 @@ class CoreImpl : public InferenceEngine::ICore, public std::enable_shared_from_t
void register_plugins_in_registry(const std::string& xml_config_file, const bool& by_abs_path = false);

std::shared_ptr<const ov::Model> apply_auto_batching(const std::shared_ptr<const ov::Model>& model,
std::string& deviceName,
ov::AnyMap& config) const;
std::string& deviceName,
ov::AnyMap& config) const;

/*
* @brief Register plugins according to the build configuration
Expand Down Expand Up @@ -383,6 +383,10 @@ class CoreImpl : public InferenceEngine::ICore, public std::enable_shared_from_t
const std::string& device_name = {},
const ov::AnyMap& config = {}) const override;

ov::SoPtr<ov::ICompiledModel> import_model(std::istream& modelStream,
const ov::RemoteContext& context,
const ov::AnyMap& config) const override;

ov::SupportedOpsMap query_model(const std::shared_ptr<const ov::Model>& model,
const std::string& device_name,
const ov::AnyMap& config) const override;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,7 @@ class MockICore : public InferenceEngine::ICore {
MOCK_CONST_METHOD1(DeviceSupportsModelCaching, bool(const std::string&)); // NOLINT not a cast to bool
MOCK_METHOD2(GetSupportedConfig,
std::map<std::string, std::string>(const std::string&, const std::map<std::string, std::string>&));
MOCK_CONST_METHOD2(get_supported_property,
ov::AnyMap(const std::string&, const ov::AnyMap&));
MOCK_CONST_METHOD2(get_supported_property, ov::AnyMap(const std::string&, const ov::AnyMap&));
MOCK_CONST_METHOD0(isNewAPI, bool());
MOCK_METHOD1(GetDefaultContext, InferenceEngine::RemoteContext::Ptr(const std::string&));

Expand Down Expand Up @@ -92,6 +91,8 @@ class MockICore : public InferenceEngine::ICore {
MOCK_CONST_METHOD3(read_model, std::shared_ptr<ov::Model>(const std::string&, const ov::Tensor&, bool));
MOCK_CONST_METHOD2(read_model, std::shared_ptr<ov::Model>(const std::string&, const std::string&));
MOCK_CONST_METHOD1(get_default_context, ov::RemoteContext(const std::string&));
MOCK_CONST_METHOD3(import_model,
ov::SoPtr<ov::ICompiledModel>(std::istream&, const ov::RemoteContext&, const ov::AnyMap&));

~MockICore() = default;
};
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@
#include "openvino/runtime/iplugin.hpp"

class MockInternalPlugin : public ov::IPlugin {
ov::IPlugin* m_plugin;
ov::IPlugin* m_plugin = nullptr;
std::shared_ptr<ov::IPlugin> m_converted_plugin;
InferenceEngine::IInferencePlugin* m_old_plugin;
InferenceEngine::IInferencePlugin* m_old_plugin = nullptr;
ov::AnyMap config;

public:
Expand Down

0 comments on commit bd93876

Please sign in to comment.