Skip to content

Commit

Permalink
[IE Myriad] Use instance of InferenceEngine::Core via ie::ICore inter…
Browse files Browse the repository at this point in the history
…face in Myriad plugin (#1316)

* [ci-skip][IE Myriad] ie::ICore pointer passed into FrontEnd from plugin

* [ci-skip][IE Myriad] Added MockICore to fix graph transformer tests

* [ci-skip][IE Myriad] IN renamed to I_N to avoid compile error in Windows build: C2513: 'int': no variable declared before '='
  • Loading branch information
nikita-kud authored Jul 29, 2020
1 parent 3a87653 commit a644cb8
Show file tree
Hide file tree
Showing 19 changed files with 130 additions and 72 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include <tuple>
#include <set>

#include <ie_icore.hpp>
#include <cpp/ie_cnn_network.h>
#include <details/caseless.hpp>

Expand All @@ -30,7 +31,7 @@ class FrontEnd final {
public:
using Ptr = std::shared_ptr<FrontEnd>;

explicit FrontEnd(StageBuilder::Ptr stageBuilder);
explicit FrontEnd(StageBuilder::Ptr stageBuilder, const ie::ICore* core);

ModelPtr buildInitialModel(ie::ICNNNetwork& network);

Expand Down Expand Up @@ -204,6 +205,7 @@ class FrontEnd final {

private:
StageBuilder::Ptr _stageBuilder;
const ie::ICore* _core = nullptr;

IeParsedNetwork _ieParsedNetwork;
std::unordered_set<ie::DataPtr> _unbatchedOutputs;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <set>
#include <utility>

#include <ie_icore.hpp>
#include <ie_icnn_network.hpp>
#include <details/caseless.hpp>

Expand Down Expand Up @@ -164,11 +165,13 @@ CompiledGraph::Ptr compileNetwork(
ie::ICNNNetwork& network,
Platform platform,
const CompilationConfig& config,
const Logger::Ptr& log);
const Logger::Ptr& log,
const ie::ICore* core);

CompiledGraph::Ptr compileSubNetwork(
ie::ICNNNetwork& network,
const CompilationConfig& subConfig);
const CompilationConfig& subConfig,
const ie::ICore* core);

//
// getSupportedLayers
Expand All @@ -178,7 +181,8 @@ std::set<std::string> getSupportedLayers(
const ie::ICNNNetwork& network,
Platform platform,
const CompilationConfig& config,
const Logger::Ptr& log);
const Logger::Ptr& log,
const ie::ICore* core);

//
// Blob version and checks
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,10 @@ namespace vpu {
[this](const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) \
{ functor_name(model, layer, inputs, outputs); }

FrontEnd::FrontEnd(StageBuilder::Ptr stageBuilder)
: _stageBuilder(std::move(stageBuilder))
, parsers{{
FrontEnd::FrontEnd(StageBuilder::Ptr stageBuilder, const ie::ICore* core)
: _stageBuilder(std::move(stageBuilder)),
_core(core),
parsers{{
{"Convolution", LAYER_PARSER(parseConvolution)},
{"Pooling", LAYER_PARSER(parsePooling)},
{"ReLU", LAYER_PARSER(parseReLU)},
Expand Down Expand Up @@ -120,7 +121,9 @@ FrontEnd::FrontEnd(StageBuilder::Ptr stageBuilder)
{"StaticShapeReshape", LAYER_PARSER(parseReshape)},
{"Mish", LAYER_PARSER(parseMish)},
{"Gelu", LAYER_PARSER(parseGelu)},
}} {}
}} {
VPU_THROW_UNLESS(_core != nullptr, "Argument core is null");
}

ModelPtr FrontEnd::buildInitialModel(ie::ICNNNetwork& network) {
VPU_PROFILE(buildInitialModel);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -145,14 +145,15 @@ void CompileEnv::free() {

namespace {

CompiledGraph::Ptr compileImpl(ie::ICNNNetwork& network) {
CompiledGraph::Ptr compileImpl(ie::ICNNNetwork& network,
const ie::ICore* core) {
const auto& env = CompileEnv::get();

env.log->debug("Compile network [%s]", network.getName());
VPU_LOGGER_SECTION(env.log);

auto stageBuilder = std::make_shared<StageBuilder>();
auto frontEnd = std::make_shared<FrontEnd>(stageBuilder);
auto frontEnd = std::make_shared<FrontEnd>(stageBuilder, core);
auto backEnd = std::make_shared<BackEnd>();
auto passManager = std::make_shared<PassManager>(stageBuilder, backEnd);

Expand Down Expand Up @@ -197,15 +198,16 @@ CompiledGraph::Ptr compileNetwork(
ie::ICNNNetwork& network,
Platform platform,
const CompilationConfig& config,
const Logger::Ptr& log) {
const Logger::Ptr& log,
const ie::ICore* core) {
CompileEnv::init(platform, config, log);
AutoScope autoDeinit([] {
CompileEnv::free();
});

VPU_PROFILE(compileNetwork);

return compileImpl(network);
return compileImpl(network, core);
}

CompiledGraph::Ptr compileModel(
Expand All @@ -225,7 +227,8 @@ CompiledGraph::Ptr compileModel(

CompiledGraph::Ptr compileSubNetwork(
ie::ICNNNetwork& network,
const CompilationConfig& subConfig) {
const CompilationConfig& subConfig,
const ie::ICore* core) {
VPU_PROFILE(compileSubNetwork);

const auto& env = CompileEnv::get();
Expand All @@ -237,7 +240,7 @@ CompiledGraph::Ptr compileSubNetwork(

CompileEnv::updateConfig(subConfig);

return compileImpl(network);
return compileImpl(network, core);
}

//
Expand All @@ -248,7 +251,8 @@ std::set<std::string> getSupportedLayers(
const ie::ICNNNetwork& network,
Platform platform,
const CompilationConfig& config,
const Logger::Ptr& log) {
const Logger::Ptr& log,
const ie::ICore* core) {
CompileEnv::init(platform, config, log);
AutoScope autoDeinit([] {
CompileEnv::free();
Expand All @@ -257,7 +261,7 @@ std::set<std::string> getSupportedLayers(
VPU_PROFILE(getSupportedLayers);

auto stageBuilder = std::make_shared<StageBuilder>();
auto frontEnd = std::make_shared<FrontEnd>(stageBuilder);
auto frontEnd = std::make_shared<FrontEnd>(stageBuilder, core);

auto clonedNetworkImpl = ie::cloneNet(network);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ void PassImpl::run(const Model& model) {
continue;
}

int IN = inputDesc.dim(Dim::N);
int I_N = inputDesc.dim(Dim::N);
int IC = inputDesc.dim(Dim::C);
int ID = inputDesc.dim(Dim::D);
int IH = inputDesc.dim(Dim::H);
Expand Down Expand Up @@ -144,7 +144,7 @@ void PassImpl::run(const Model& model) {
"but: KO=%d, OC=%d", KO, OC);

// check spacial dims of output
int inputShape[] = {IW, IH, ID, IC, IN};
int inputShape[] = {IW, IH, ID, IC, I_N};
int outputShape[] = {OW, OH, OD, OC, ON};
int weightsShape[] = {KW, KH, KD, KI, KO};
for (int i = 0; i < 3; i++) {
Expand Down Expand Up @@ -311,7 +311,7 @@ void PassImpl::run(const Model& model) {
// create subInputs[i], if it was not created previously
if (subInputs[i] == nullptr) {
auto postfix = formatString("@input_depth=%d/%d", i + 1, ID);
DataDesc subInputsDesc(inputDesc.type(), DimsOrder::NCHW, {IW, IH, IC, IN});
DataDesc subInputsDesc(inputDesc.type(), DimsOrder::NCHW, {IW, IH, IC, I_N});
subInputs[i] = model->duplicateData(input, postfix, subInputsDesc);
}

Expand Down Expand Up @@ -378,7 +378,7 @@ void PassImpl::run(const Model& model) {
continue; // this subInputs[d] is not needed
}
auto postfix = formatString("@input_depth=%d/%d", d + 1, ID);
DataDesc subInputsDesc3D(inputDesc.type(), DimsOrder::NCDHW, {IW, IH, 1, IC, IN});
DataDesc subInputsDesc3D(inputDesc.type(), DimsOrder::NCDHW, {IW, IH, 1, IC, I_N});
subInputs3D[d] = model->duplicateData(input, postfix + "@3D", subInputsDesc3D);
_stageBuilder->addReshapeStage(model,
stage->name() + "@split",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ void PassImpl::run(const Model& model) {
VPU_THROW_UNLESS(inputDesc.type() == outputDesc.type(), "incompatible data types");
VPU_THROW_UNLESS(inputDesc.dimsOrder() == outputDesc.dimsOrder(), "incompatible dim orders");

int IN = inputDesc.dim(Dim::N);
int I_N = inputDesc.dim(Dim::N);
int IC = inputDesc.dim(Dim::C);
int ID = inputDesc.dim(Dim::D);
int IH = inputDesc.dim(Dim::H);
Expand All @@ -128,11 +128,11 @@ void PassImpl::run(const Model& model) {
int OH = outputDesc.dim(Dim::H);
int OW = outputDesc.dim(Dim::W);

VPU_THROW_UNLESS(IN == ON, "incompatible: input batch=%d, output batch=%d", IN, ON);
VPU_THROW_UNLESS(I_N == ON, "incompatible: input batch=%d, output batch=%d", I_N, ON);
VPU_THROW_UNLESS(IC == OC, "incompatible: input channels=%d, output channels=%d", IC, OC);

// check spacial dims of output
int inputShape[] = {IW, IH, ID, IC, IN};
int inputShape[] = {IW, IH, ID, IC, I_N};
int outputShape[] = {OW, OH, OD, OC, ON};
for (int i = 0; i < 3; i++) {
int expectedOutputSize = (inputShape[i]
Expand Down Expand Up @@ -318,7 +318,7 @@ void PassImpl::run(const Model& model) {
// create subInputs[i], if it was not created previously
if (subInputs[i] == nullptr) {
auto postfix = formatString("@input_depth=%d/%d", i + 1, ID);
DataDesc subInputsDesc(inputDesc.type(), DimsOrder::NCHW, {IW, IH, IC, IN});
DataDesc subInputsDesc(inputDesc.type(), DimsOrder::NCHW, {IW, IH, IC, I_N});
subInputs[i] = model->duplicateData(input, postfix, subInputsDesc);
}

Expand Down Expand Up @@ -358,7 +358,7 @@ void PassImpl::run(const Model& model) {
continue; // this subInputs[d] is not needed
}
auto postfix = formatString("@input_depth=%d/%d", d + 1, ID);
DataDesc subInputsDesc3D(inputDesc.type(), DimsOrder::NCDHW, {IW, IH, 1, IC, IN});
DataDesc subInputsDesc3D(inputDesc.type(), DimsOrder::NCDHW, {IW, IH, 1, IC, I_N});
subInputs3D[d] = model->duplicateData(input, postfix + "@3D", subInputsDesc3D);
_stageBuilder->addReshapeStage(model,
stage->name() + "@split",
Expand Down
20 changes: 10 additions & 10 deletions inference-engine/src/vpu/graph_transformer/src/stages/mtcnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -107,14 +107,14 @@ std::pair<int, int> getResolution(const std::string& str) {

ie::CNNNetwork loadSubNetwork(
const std::string& fileName,
const std::pair<int, int>& imgSize, int* zdir_batchsize = nullptr) {
const std::pair<int, int>& imgSize,
const ie::ICore* core,
int* zdir_batchsize = nullptr) {
//
// Load network
//

// ticket 30632 : replace with ICore interface
InferenceEngine::Core reader;
auto network = reader.ReadNetwork(fileName);
auto network = core->ReadNetwork(fileName, std::string());

//
// Set precision of input/output
Expand Down Expand Up @@ -206,25 +206,25 @@ void FrontEnd::parseMTCNN(const Model& model, const ie::CNNLayerPtr& layer, cons

// Convert p-nets
for (const auto& p_net_input : pyramid) {
auto pNet = loadSubNetwork(pnet_ir_name, p_net_input);
auto res = compileSubNetwork(pNet, env.config);
auto pNet = loadSubNetwork(pnet_ir_name, p_net_input, _core);
auto res = compileSubNetwork(pNet, env.config, _core);
mergedBlobSize += res->blob.size();
compiledSubNetworks.emplace_back(std::move(res));
}

int stage2_zdir_batchsize = 1;
// Convert r-net
{
auto rNet = loadSubNetwork(rnet_ir_name, r_net_input, &stage2_zdir_batchsize);
auto res = compileSubNetwork(rNet, env.config);
auto rNet = loadSubNetwork(rnet_ir_name, r_net_input, _core, &stage2_zdir_batchsize);
auto res = compileSubNetwork(rNet, env.config, _core);
mergedBlobSize += res->blob.size();
compiledSubNetworks.emplace_back(std::move(res));
}

// Convert o-net
{
auto oNet = loadSubNetwork(onet_ir_name, o_net_input);
auto res = compileSubNetwork(oNet, env.config);
auto oNet = loadSubNetwork(onet_ir_name, o_net_input, _core);
auto res = compileSubNetwork(oNet, env.config, _core);
mergedBlobSize += res->blob.size();
compiledSubNetworks.emplace_back(std::move(res));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,10 @@ namespace MyriadPlugin {
ExecutableNetwork::ExecutableNetwork(
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr>& devicePool,
const MyriadConfig& config) :
_config(config) {
const MyriadConfig& config,
const ie::ICore* core) :
_config(config),
_core(core) {
VPU_PROFILE(ExecutableNetwork);

_log = std::make_shared<Logger>(
Expand Down Expand Up @@ -52,8 +54,9 @@ ExecutableNetwork::ExecutableNetwork(
ICNNNetwork& network,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr>& devicePool,
const MyriadConfig& config) :
ExecutableNetwork(std::move(mvnc), devicePool, config) {
const MyriadConfig& config,
const ie::ICore* core) :
ExecutableNetwork(std::move(mvnc), devicePool, config, core) {
VPU_PROFILE(ExecutableNetwork);

const auto compilerLog = std::make_shared<Logger>(
Expand All @@ -67,7 +70,8 @@ ExecutableNetwork::ExecutableNetwork(
network,
static_cast<Platform>(_device->_platform),
_config.compileConfig(),
compilerLog);
compilerLog,
_core);

_actualNumExecutors = compiledGraph->numExecutors;
_graphBlob = std::move(compiledGraph->blob);
Expand Down Expand Up @@ -145,8 +149,9 @@ void ExecutableNetwork::Import(std::istream& strm,
ExecutableNetwork::ExecutableNetwork(std::istream& strm,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr> &devicePool,
const MyriadConfig& config) :
ExecutableNetwork(std::move(mvnc), devicePool, config) {
const MyriadConfig& config,
const ie::ICore* core) :
ExecutableNetwork(std::move(mvnc), devicePool, config, core) {
VPU_PROFILE(ExecutableNetwork);
Import(strm, devicePool, config);
}
Expand All @@ -155,8 +160,9 @@ ExecutableNetwork::ExecutableNetwork(
const std::string& blobFilename,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr>& devicePool,
const MyriadConfig& config) :
ExecutableNetwork(std::move(mvnc), devicePool, config) {
const MyriadConfig& config,
const ie::ICore* core) :
ExecutableNetwork(std::move(mvnc), devicePool, config, core) {
VPU_PROFILE(ExecutableNetwork);
std::ifstream blobFile{blobFilename, std::ios::binary};
Import(blobFile, devicePool, config);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,17 +35,20 @@ class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDef
explicit ExecutableNetwork(InferenceEngine::ICNNNetwork &network,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr> &devicePool,
const MyriadConfig& config);
const MyriadConfig& config,
const ie::ICore* core);

explicit ExecutableNetwork(std::istream& strm,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr> &devicePool,
const MyriadConfig& config);
const MyriadConfig& config,
const ie::ICore* core);

explicit ExecutableNetwork(const std::string &blobFilename,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr> &devicePool,
const MyriadConfig& config);
const MyriadConfig& config,
const ie::ICore* core);


virtual ~ExecutableNetwork() {
Expand Down Expand Up @@ -120,6 +123,7 @@ class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDef
DevicePtr _device;
GraphMetaInfo _graphMetaData;
MyriadConfig _config;
const ie::ICore* _core = nullptr;
int _actualNumExecutors = 0;
std::vector<std::string> _supportedMetrics;

Expand All @@ -131,7 +135,8 @@ class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDef

ExecutableNetwork(std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr> &devicePool,
const MyriadConfig& config);
const MyriadConfig& config,
const ie::ICore* core);

InferenceEngine::ITaskExecutor::Ptr getNextTaskExecutor() {
std::string id = _taskExecutorGetResultIds.front();
Expand Down
Loading

0 comments on commit a644cb8

Please sign in to comment.