diff --git a/inference-engine/src/vpu/graph_transformer/include/vpu/frontend/frontend.hpp b/inference-engine/src/vpu/graph_transformer/include/vpu/frontend/frontend.hpp index 0b85b6c42fead7..2954966ea56a4b 100644 --- a/inference-engine/src/vpu/graph_transformer/include/vpu/frontend/frontend.hpp +++ b/inference-engine/src/vpu/graph_transformer/include/vpu/frontend/frontend.hpp @@ -12,6 +12,7 @@ #include #include +#include #include #include
@@ -30,7 +31,7 @@ class FrontEnd final { public: using Ptr = std::shared_ptr; - explicit FrontEnd(StageBuilder::Ptr stageBuilder); + explicit FrontEnd(StageBuilder::Ptr stageBuilder, const ie::ICore* core); ModelPtr buildInitialModel(ie::ICNNNetwork& network); @@ -202,6 +203,7 @@ class FrontEnd final { private: StageBuilder::Ptr _stageBuilder; + const ie::ICore* _core; IeParsedNetwork _ieParsedNetwork; std::unordered_set _unbatchedOutputs; diff --git a/inference-engine/src/vpu/graph_transformer/include/vpu/graph_transformer.hpp b/inference-engine/src/vpu/graph_transformer/include/vpu/graph_transformer.hpp index 11d1523f07eadd..ac6be232319be5 100644 --- a/inference-engine/src/vpu/graph_transformer/include/vpu/graph_transformer.hpp +++ b/inference-engine/src/vpu/graph_transformer/include/vpu/graph_transformer.hpp @@ -14,6 +14,7 @@ #include #include +#include #include #include
@@ -163,11 +164,13 @@ CompiledGraph::Ptr compileNetwork( ie::ICNNNetwork& network, Platform platform, const CompilationConfig& config, - const Logger::Ptr& log); + const Logger::Ptr& log, + const ie::ICore* core); CompiledGraph::Ptr compileSubNetwork( ie::ICNNNetwork& network, - const CompilationConfig& subConfig); + const CompilationConfig& subConfig, + const ie::ICore* core); // // getSupportedLayers @@ -177,7 +180,8 @@ std::set getSupportedLayers( const ie::ICNNNetwork& network, Platform platform, const CompilationConfig& config, - const Logger::Ptr& log); + const Logger::Ptr& log, + const ie::ICore* core); // // Blob version and checks diff --git a/inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp b/inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp index aee12f41d84381..13176227c35555 100644 --- a/inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp @@ -32,9 +32,10 @@ namespace vpu { [this](const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) \ { functor_name(model, layer, inputs, outputs); } -FrontEnd::FrontEnd(StageBuilder::Ptr stageBuilder) - : _stageBuilder(std::move(stageBuilder)) - , parsers{{ +FrontEnd::FrontEnd(StageBuilder::Ptr stageBuilder, const ie::ICore* core) + : _stageBuilder(std::move(stageBuilder)), + _core(core), + parsers{{ {"Convolution", LAYER_PARSER(parseConvolution)}, {"Pooling", LAYER_PARSER(parsePooling)}, {"ReLU", LAYER_PARSER(parseReLU)}, @@ -117,7 +118,9 @@ FrontEnd::FrontEnd(StageBuilder::Ptr stageBuilder) {"StaticShapeBroadcast", LAYER_PARSER(parseBroadcast)}, {"StaticShapeNonMaxSuppression", LAYER_PARSER(parseStaticShapeNMS)}, {"StaticShapeReshape", LAYER_PARSER(parseReshape)}, - }} {} + }} { + VPU_THROW_UNLESS(_core != nullptr, "Argument core is null"); + } ModelPtr FrontEnd::buildInitialModel(ie::ICNNNetwork& network) { VPU_PROFILE(buildInitialModel); diff --git a/inference-engine/src/vpu/graph_transformer/src/graph_transformer.cpp b/inference-engine/src/vpu/graph_transformer/src/graph_transformer.cpp index b75255c2e41d72..85c845b9816824 100644 --- a/inference-engine/src/vpu/graph_transformer/src/graph_transformer.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/graph_transformer.cpp @@ -138,14 +138,15 @@ void CompileEnv::free() { namespace { -CompiledGraph::Ptr compileImpl(ie::ICNNNetwork& network) { +CompiledGraph::Ptr compileImpl(ie::ICNNNetwork& network, + const ie::ICore* core) { const auto& env = CompileEnv::get(); env.log->debug("Compile network [%s]", network.getName()); VPU_LOGGER_SECTION(env.log); auto stageBuilder = std::make_shared(); - auto frontEnd = std::make_shared(stageBuilder); + auto frontEnd = std::make_shared(stageBuilder, core); auto backEnd = std::make_shared(); auto passManager = std::make_shared(stageBuilder, backEnd); @@ -190,7 +191,8 @@ CompiledGraph::Ptr compileNetwork( ie::ICNNNetwork& network, Platform platform, const CompilationConfig& config, - const Logger::Ptr& log) { + const Logger::Ptr& log, + const ie::ICore* core) { CompileEnv::init(platform, config, log); AutoScope autoDeinit([] { CompileEnv::free(); @@ -198,7 +200,7 @@ CompiledGraph::Ptr compileNetwork( VPU_PROFILE(compileNetwork); - return compileImpl(network); + return compileImpl(network, core); } CompiledGraph::Ptr compileModel( @@ -218,7 +220,8 @@ CompiledGraph::Ptr compileModel( CompiledGraph::Ptr compileSubNetwork( ie::ICNNNetwork& network, - const CompilationConfig& subConfig) { + const CompilationConfig& subConfig, + const ie::ICore* core) { VPU_PROFILE(compileSubNetwork); const auto& env = CompileEnv::get(); @@ -230,7 +233,7 @@ CompiledGraph::Ptr compileSubNetwork( CompileEnv::updateConfig(subConfig); - return compileImpl(network); + return compileImpl(network, core); } // @@ -241,7 +244,8 @@ std::set getSupportedLayers( const ie::ICNNNetwork& network, Platform platform, const CompilationConfig& config, - const Logger::Ptr& log) { + const Logger::Ptr& log, + const ie::ICore* core) { CompileEnv::init(platform, config, log); AutoScope autoDeinit([] { CompileEnv::free(); @@ -250,7 +254,7 @@ std::set getSupportedLayers( VPU_PROFILE(getSupportedLayers); auto stageBuilder = std::make_shared(); - auto frontEnd = std::make_shared(stageBuilder); + auto frontEnd = std::make_shared(stageBuilder, core); auto clonedNetworkImpl = ie::cloneNet(network); diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/mtcnn.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/mtcnn.cpp index f4be0abae13147..8d7d90a28a7082 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/mtcnn.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/mtcnn.cpp @@ -107,14 +107,14 @@ std::pair getResolution(const std::string& str) { ie::CNNNetwork loadSubNetwork( const std::string& fileName, - const std::pair& imgSize, int* zdir_batchsize = nullptr) { + const std::pair& imgSize, + const ie::ICore* core, + int* zdir_batchsize = nullptr) { // // Load network // - // ticket 30632 : replace with ICore interface - InferenceEngine::Core reader; - auto network = reader.ReadNetwork(fileName); + auto network = core->ReadNetwork(fileName, std::string()); // // Set precision of input/output @@ -206,8 +206,8 @@ void FrontEnd::parseMTCNN(const Model& model, const ie::CNNLayerPtr& layer, cons // Convert p-nets for (const auto& p_net_input : pyramid) { - auto pNet = loadSubNetwork(pnet_ir_name, p_net_input); - auto res = compileSubNetwork(pNet, env.config); + auto pNet = loadSubNetwork(pnet_ir_name, p_net_input, _core); + auto res = compileSubNetwork(pNet, env.config, _core); mergedBlobSize += res->blob.size(); compiledSubNetworks.emplace_back(std::move(res)); } @@ -215,16 +215,16 @@ void FrontEnd::parseMTCNN(const Model& model, const ie::CNNLayerPtr& layer, cons int stage2_zdir_batchsize = 1; // Convert r-net { - auto rNet = loadSubNetwork(rnet_ir_name, r_net_input, &stage2_zdir_batchsize); - auto res = compileSubNetwork(rNet, env.config); + auto rNet = loadSubNetwork(rnet_ir_name, r_net_input, _core, &stage2_zdir_batchsize); + auto res = compileSubNetwork(rNet, env.config, _core); mergedBlobSize += res->blob.size(); compiledSubNetworks.emplace_back(std::move(res)); } // Convert o-net { - auto oNet = loadSubNetwork(onet_ir_name, o_net_input); - auto res = compileSubNetwork(oNet, env.config); + auto oNet = loadSubNetwork(onet_ir_name, o_net_input, _core); + auto res = compileSubNetwork(oNet, env.config, _core); mergedBlobSize += res->blob.size(); compiledSubNetworks.emplace_back(std::move(res)); } diff --git a/inference-engine/src/vpu/myriad_plugin/myriad_executable_network.cpp b/inference-engine/src/vpu/myriad_plugin/myriad_executable_network.cpp index ea29b5b43cd63b..803fd3927d4218 100644 --- a/inference-engine/src/vpu/myriad_plugin/myriad_executable_network.cpp +++ b/inference-engine/src/vpu/myriad_plugin/myriad_executable_network.cpp @@ -24,8 +24,10 @@ namespace MyriadPlugin { ExecutableNetwork::ExecutableNetwork( std::shared_ptr mvnc, std::vector& devicePool, - const MyriadConfig& config) : - _config(config) { + const MyriadConfig& config, + const ie::ICore* core) : + _config(config), + _core(core) { VPU_PROFILE(ExecutableNetwork); _log = std::make_shared( @@ -53,8 +55,9 @@ ExecutableNetwork::ExecutableNetwork( ICNNNetwork& network, std::shared_ptr mvnc, std::vector& devicePool, - const MyriadConfig& config) : - ExecutableNetwork(std::move(mvnc), devicePool, config) { + const MyriadConfig& config, + const ie::ICore* core) : + ExecutableNetwork(std::move(mvnc), devicePool, config, core) { VPU_PROFILE(ExecutableNetwork); const auto compilerLog = std::make_shared( @@ -68,7 +71,8 @@ ExecutableNetwork::ExecutableNetwork( network, static_cast(_device->_platform), _config.compileConfig(), - compilerLog); + compilerLog, + core); _actualNumExecutors = compiledGraph->numExecutors; _graphBlob = std::move(compiledGraph->blob); @@ -146,8 +150,9 @@ void ExecutableNetwork::Import(std::istream& strm, ExecutableNetwork::ExecutableNetwork(std::istream& strm, std::shared_ptr mvnc, std::vector &devicePool, - const MyriadConfig& config) : - ExecutableNetwork(std::move(mvnc), devicePool, config) { + const MyriadConfig& config, + const ie::ICore* core) : + ExecutableNetwork(std::move(mvnc), devicePool, config, core) { VPU_PROFILE(ExecutableNetwork); Import(strm, devicePool, config); } @@ -156,8 +161,9 @@ ExecutableNetwork::ExecutableNetwork( const std::string& blobFilename, std::shared_ptr mvnc, std::vector& devicePool, - const MyriadConfig& config) : - ExecutableNetwork(std::move(mvnc), devicePool, config) { + const MyriadConfig& config, + const ie::ICore* core) : + ExecutableNetwork(std::move(mvnc), devicePool, config, core) { VPU_PROFILE(ExecutableNetwork); std::ifstream blobFile{blobFilename, std::ios::binary}; Import(blobFile, devicePool, config); diff --git a/inference-engine/src/vpu/myriad_plugin/myriad_executable_network.h b/inference-engine/src/vpu/myriad_plugin/myriad_executable_network.h index 1e106c06cbc6ab..85381b586c04ce 100644 --- a/inference-engine/src/vpu/myriad_plugin/myriad_executable_network.h +++ b/inference-engine/src/vpu/myriad_plugin/myriad_executable_network.h @@ -35,17 +35,20 @@ class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDef explicit ExecutableNetwork(InferenceEngine::ICNNNetwork &network, std::shared_ptr mvnc, std::vector &devicePool, - const MyriadConfig& config); + const MyriadConfig& config, + const ie::ICore* core); explicit ExecutableNetwork(std::istream& strm, std::shared_ptr mvnc, std::vector &devicePool, - const MyriadConfig& config); + const MyriadConfig& config, + const ie::ICore* core); explicit ExecutableNetwork(const std::string &blobFilename, std::shared_ptr mvnc, std::vector &devicePool, - const MyriadConfig& config); + const MyriadConfig& config, + const ie::ICore* core); virtual ~ExecutableNetwork() { @@ -120,6 +123,7 @@ class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDef DevicePtr _device; GraphMetaInfo _graphMetaData; MyriadConfig _config; + const ie::ICore* _core; int _actualNumExecutors = 0; std::vector _supportedMetrics; @@ -131,7 +135,8 @@ class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDef ExecutableNetwork(std::shared_ptr mvnc, std::vector &devicePool, - const MyriadConfig& config); + const MyriadConfig& config, + const ie::ICore* core); InferenceEngine::ITaskExecutor::Ptr getNextTaskExecutor() { std::string id = _taskExecutorGetResultIds.front(); diff --git a/inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp b/inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp index bc756d7a62085a..be3a3f18046fcc 100644 --- a/inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp +++ b/inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp @@ -46,7 +46,7 @@ ExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl( vpu::EliminateShapeOfAfterDSR().run_on_function(function); } - return std::make_shared(*clonedNetwork, _mvnc, _devicePool, parsedConfigCopy); + return std::make_shared(*clonedNetwork, _mvnc, _devicePool, parsedConfigCopy, GetCore()); } void Engine::SetConfig(const std::map &config) { @@ -96,7 +96,8 @@ void Engine::QueryNetwork( network, static_cast(parsedConfigCopy.platform()), parsedConfigCopy.compileConfig(), - log); + log, + GetCore()); for (const auto& layerName : layerNames) { res.supportedLayersMap.insert({ layerName, GetName() }); @@ -134,7 +135,7 @@ InferenceEngine::ExecutableNetwork Engine::ImportNetwork( const auto executableNetwork = std::make_shared( - model, _mvnc, _devicePool, parsedConfigCopy); + model, _mvnc, _devicePool, parsedConfigCopy, GetCore()); return InferenceEngine::ExecutableNetwork{IExecutableNetwork::Ptr( new ExecutableNetworkBase(executableNetwork),