Skip to content

Commit

Permalink
Fixed some leftovers after plugin API merge (#15932)
Browse files Browse the repository at this point in the history
  • Loading branch information
ilyachur authored Feb 27, 2023
1 parent 2d91c36 commit 957ff6e
Show file tree
Hide file tree
Showing 6 changed files with 16 additions and 70 deletions.
2 changes: 1 addition & 1 deletion src/inference/dev_api/openvino/runtime/icore.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace ov {
/**
* @interface ICore
* @brief Minimal ICore interface to allow plugin to get information from Core Inference Engine class.
* @ingroup ie_dev_api_plugin_api
* @ingroup ov_dev_api_plugin_api
*/
class ICore {
public:
Expand Down
2 changes: 1 addition & 1 deletion src/inference/dev_api/openvino/runtime/iplugin.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ class OPENVINO_RUNTIME_API IPlugin : public std::enable_shared_from_this<IPlugin
/**
* @def OV_CREATE_PLUGIN
* @brief Defines a name of a function creating plugin instance
* @ingroup ie_dev_api_plugin_api
* @ingroup ov_dev_api_plugin_api
*/
#ifndef OV_CREATE_PLUGIN
# define OV_CREATE_PLUGIN CreatePluginEngine
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@
#pragma once

#include <memory>
#include <openvino/runtime/common.hpp>
#include <string>

#include "openvino/runtime/common.hpp"
#include "openvino/runtime/threading/istreams_executor.hpp"

namespace ov {
Expand Down
72 changes: 8 additions & 64 deletions src/plugins/template/src/compiled_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,73 +9,14 @@
#include "async_infer_request.hpp"
#include "ie_ngraph_utils.hpp"
#include "ie_plugin_config.hpp"
#include "openvino/runtime/properties.hpp"
#include "plugin.hpp"
#include "template/config.hpp"
#include "template_itt.hpp"
#include "transformations/utils/utils.hpp"

using namespace TemplatePlugin;

namespace {

InferenceEngine::SizeVector get_dims(const ov::Output<ov::Node>& port) {
InferenceEngine::SizeVector dims = {};
const auto& p_shape = port.get_partial_shape();
if (p_shape.is_static())
dims = p_shape.get_shape();
return dims;
}

} // namespace

namespace ov {
namespace legacy_convert {

void fill_input_info(const ov::Output<ov::Node>& input, InferenceEngine::InputInfo::Ptr& input_info) {
if (!input_info) {
// Create input info
auto param_name = input.get_node()->get_friendly_name();
auto dims = get_dims(input);
InferenceEngine::TensorDesc desc(InferenceEngine::details::convertPrecision(input.get_element_type()),
dims,
InferenceEngine::TensorDesc::getLayoutByDims(dims));
auto data = std::make_shared<InferenceEngine::Data>(param_name, desc);
input_info = std::make_shared<InferenceEngine::InputInfo>();
input_info->setInputData(data);
}
auto& rt_info = input.get_rt_info();
auto it = rt_info.find("ie_legacy_preproc");
if (it != rt_info.end()) {
input_info->getPreProcess() = it->second.as<InferenceEngine::PreProcessInfo>();
}
it = rt_info.find("ie_legacy_td");
if (it != rt_info.end()) {
auto td = it->second.as<InferenceEngine::TensorDesc>();
input_info->getInputData()->reshape(td.getDims(), td.getLayout());
input_info->setPrecision(td.getPrecision());
}
}
void fill_output_info(const ov::Output<ov::Node>& output, InferenceEngine::DataPtr& output_info) {
if (!output_info) {
// Create input info
const auto& res_name = ov::op::util::create_ie_output_name(output);
auto dims = get_dims(output);
InferenceEngine::TensorDesc desc(InferenceEngine::details::convertPrecision(output.get_element_type()),
dims,
InferenceEngine::TensorDesc::getLayoutByDims(dims));
output_info = std::make_shared<InferenceEngine::Data>(res_name, desc);
}
auto& rt_info = output.get_rt_info();
auto it = rt_info.find("ie_legacy_td");
if (it != rt_info.end()) {
auto td = it->second.as<InferenceEngine::TensorDesc>();
output_info->reshape(td.getDims(), td.getLayout());
output_info->setPrecision(td.getPrecision());
}
}
} // namespace legacy_convert
} // namespace ov

// ! [executable_network:ctor_cnnnetwork]
TemplatePlugin::CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model,
const std::shared_ptr<const ov::IPlugin>& plugin,
Expand All @@ -89,8 +30,9 @@ TemplatePlugin::CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& m
// In this case, _waitExecutor should also be created per device.
try {
compile_model(m_model);
} catch (const InferenceEngine::Exception&) {
throw;
} catch (const InferenceEngine::Exception& e) {
// Some transformations can throw legacy exception
throw ov::Exception(e.what());
} catch (const std::exception& e) {
OPENVINO_ASSERT(false, "Standard exception from compilation library: ", e.what());
} catch (...) {
Expand Down Expand Up @@ -148,7 +90,7 @@ std::shared_ptr<const Plugin> TemplatePlugin::CompiledModel::get_template_plugin
}

// ! [executable_network:get_config]
InferenceEngine::Parameter TemplatePlugin::CompiledModel::get_property(const std::string& name) const {
ov::Any TemplatePlugin::CompiledModel::get_property(const std::string& name) const {
const auto& add_ro_properties = [](const std::string& name, std::vector<ov::PropertyName>& properties) {
properties.emplace_back(ov::PropertyName{name, ov::PropertyMutability::RO});
};
Expand Down Expand Up @@ -179,7 +121,9 @@ InferenceEngine::Parameter TemplatePlugin::CompiledModel::get_property(const std
return to_string_vector(metrics);
} else if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
auto configs = default_rw_properties();
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys();
auto streamExecutorConfigKeys = ov::threading::IStreamsExecutor::Config{}
.get_property(ov::supported_properties.name())
.as<std::vector<std::string>>();
for (auto&& configKey : streamExecutorConfigKeys) {
configs.emplace_back(configKey);
}
Expand Down
6 changes: 4 additions & 2 deletions src/plugins/template/src/plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ std::shared_ptr<ov::ICompiledModel> TemplatePlugin::Plugin::import_model(std::is

auto ov_model = get_core()->read_model(xmlString, weights);
auto streamsExecutorConfig =
InferenceEngine::IStreamsExecutor::Config::MakeDefaultMultiThreaded(fullConfig._streamsExecutorConfig);
ov::threading::IStreamsExecutor::Config::make_default_multi_threaded(fullConfig._streamsExecutorConfig);
streamsExecutorConfig._name = stream_executor_name;
auto compiled_model =
std::make_shared<CompiledModel>(ov_model,
Expand Down Expand Up @@ -236,7 +236,9 @@ ov::Any TemplatePlugin::Plugin::get_property(const std::string& name, const ov::
return to_string_vector(metrics);
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
auto configs = default_rw_properties();
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys();
auto streamExecutorConfigKeys = ov::threading::IStreamsExecutor::Config{}
.get_property(ov::supported_properties.name())
.as<std::vector<std::string>>();
for (auto&& configKey : streamExecutorConfigKeys) {
if (configKey != InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS) {
configs.emplace_back(configKey);
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/template/src/template_config.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ Configuration::Configuration() {}

Configuration::Configuration(const ConfigMap& config, const Configuration& defaultCfg, bool throwOnUnsupported) {
*this = defaultCfg;
// If plugin needs to use InferenceEngine::StreamsExecutor it should be able to process its configuration
// If plugin needs to use ov::threading::StreamsExecutor it should be able to process its configuration
auto streamExecutorConfigKeys =
_streamsExecutorConfig.get_property(ov::supported_properties.name()).as<std::vector<std::string>>();
for (auto&& c : config) {
Expand Down

0 comments on commit 957ff6e

Please sign in to comment.