Skip to content

Commit

Permalink
Merge changes from 2020.4.0.1 (openvinotoolkit#1809)
Browse files Browse the repository at this point in the history
* [GNA] Propagate QoS timeout to the calling app (openvinotoolkit#1188)

* [GNA] Support timeout value set in Wait (openvinotoolkit#1499)

* [GNA] stored request id for completed sync infer request in order to get status later using wait() (openvinotoolkit#1458)

* stored request id for completed async infer request in order to get it's status later

* preserved status not started for multiple sequential calls to wait()

Co-authored-by: Denis Orlov <[email protected]>

* [GNA] Fix callbacks (openvinotoolkit#1607)

Co-authored-by: Eugene Smirnov <[email protected]>
  • Loading branch information
2 people authored and Rom committed Aug 28, 2020
1 parent 8304cac commit 3772036
Show file tree
Hide file tree
Showing 5 changed files with 55 additions and 21 deletions.
12 changes: 8 additions & 4 deletions inference-engine/src/gna_plugin/gna_device.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -269,19 +269,23 @@ const std::map <const std::pair<Gna2OperationType, int32_t>, const std::string>
};
#endif

void GNADeviceHelper::wait(uint32_t reqId) {
bool GNADeviceHelper::wait(uint32_t reqId, int64_t millisTimeout) {
#if GNA_LIB_VER == 2
const auto status = Gna2RequestWait(reqId, GNA_TIMEOUT);
const auto status = Gna2RequestWait(reqId, millisTimeout);
if (status == Gna2StatusDriverQoSTimeoutExceeded) {
return false;
}
checkGna2Status(status);
#else
if (isPerformanceMeasuring) {
nGNAStatus = GNAWaitPerfRes(nGNAHandle, GNA_TIMEOUT, reqId, &nGNAPerfResults);
nGNAStatus = GNAWaitPerfRes(nGNAHandle, millisTimeout, reqId, &nGNAPerfResults);
} else {
nGNAStatus = GNAWait(nGNAHandle, GNA_TIMEOUT, reqId);
nGNAStatus = GNAWait(nGNAHandle, millisTimeout, reqId);
}
checkStatus();
#endif
updateGnaPerfCounters();
return true;
}

#if GNA_LIB_VER == 1
Expand Down
3 changes: 1 addition & 2 deletions inference-engine/src/gna_plugin/gna_device.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ class GNADeviceHelper {

#define MAX_TIMEOUT 500000
#endif
const uint32_t GNA_TIMEOUT = MAX_TIMEOUT;
bool isPerformanceMeasuring = false;
bool deviceOpened = false;
public:
Expand Down Expand Up @@ -115,7 +114,7 @@ class GNADeviceHelper {
static void checkGna2Status(Gna2Status status);
static void checkGna2Status(Gna2Status status, const Gna2Model& gnaModel);
#endif
void wait(uint32_t id);
bool wait(uint32_t id, int64_t millisTimeout = MAX_TIMEOUT);

struct DumpResult {
#if GNA_LIB_VER == 2
Expand Down
30 changes: 26 additions & 4 deletions inference-engine/src/gna_plugin/gna_infer_request.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,16 @@ class GNAInferRequest : public InferenceEngine::AsyncInferRequestInternal {
void InferImpl() override {
// execute input pre-processing.
execDataPreprocessing(_inputs);
plg->Infer(_inputs, _outputs);
// result returned from sync infer wait method
auto result = plg->Infer(_inputs, _outputs);

// if result is false we are dealing with QoS feature
// if result is ok, next call to wait() will return Ok, if request not in gna_queue
if (!result) {
inferRequestIdx = -1;
} else {
inferRequestIdx = -2;
}
}

/**
Expand All @@ -73,20 +82,33 @@ class GNAInferRequest : public InferenceEngine::AsyncInferRequestInternal {
if (_callback) {
auto infer_request = _publicInterface.lock();
IE_ASSERT(infer_request != nullptr);
auto res = Wait(0);
auto res = Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY);
_callback(infer_request, res);
}
}


InferenceEngine::StatusCode Wait(int64_t millis_timeout) override {
if (inferRequestIdx == -1) {
return InferenceEngine::INFER_NOT_STARTED;
} else if (millis_timeout < -1) {
THROW_IE_EXCEPTION << PARAMETER_MISMATCH_str;
}

plg->Wait(inferRequestIdx);
return InferenceEngine::OK;
bool qosOK;
if (millis_timeout == InferenceEngine::IInferRequest::WaitMode::RESULT_READY) {
qosOK = plg->Wait(inferRequestIdx);
} else {
qosOK = plg->WaitFor(inferRequestIdx, millis_timeout);
}

if (qosOK) {
return InferenceEngine::OK;
} else {
// need to preserve invalid state here to avoid next Wait() from clearing it
inferRequestIdx = -1;
return InferenceEngine::INFER_NOT_STARTED;
}
}
};
} // namespace GNAPluginNS
24 changes: 16 additions & 8 deletions inference-engine/src/gna_plugin/gna_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -967,16 +967,23 @@ uint32_t GNAPlugin::QueueInference(const InferenceEngine::BlobMap &inputs, Infer
return idx;
}

void GNAPlugin::Wait(uint32_t request_idx) {
bool GNAPlugin::Wait(uint32_t request_idx) {
return WaitFor(request_idx, MAX_TIMEOUT);
}

bool GNAPlugin::WaitFor(uint32_t request_idx, int64_t millisTimeout) {
#if GNA_LIB_VER == 2
auto& nnets = gnaRequestConfigToRequestIdMap;
#endif
if (nnets.size() <= request_idx) return; // TODO: GNA2: check whether necessary
if (nnets.size() <= request_idx) return true; // TODO: GNA2: check whether necessary
// already synced TODO: might be copy required ???
if (std::get<1>(nnets[request_idx]) == -1) return;
if (std::get<1>(nnets[request_idx]) == -1) return true;

if (gnadevice) {
gnadevice->wait(std::get<1>(nnets[request_idx]));
if (!gnadevice->wait(std::get<1>(nnets[request_idx]), millisTimeout)) {
std::get<1>(nnets[request_idx]) = -1;
return false;
}
}

std::get<1>(nnets[request_idx]) = -1;
Expand Down Expand Up @@ -1063,13 +1070,14 @@ void GNAPlugin::Wait(uint32_t request_idx) {
}
output_idx++;
}
return true;
}

void GNAPlugin::Reset() {
graphCompiler.Reset();
}

void GNAPlugin::Infer(const InferenceEngine::Blob &input, InferenceEngine::Blob &output) {
bool GNAPlugin::Infer(const InferenceEngine::Blob &input, InferenceEngine::Blob &output) {
BlobMap bmInput;
BlobMap bmOutput;
if (inputsDataMap.size() != 1) {
Expand All @@ -1080,11 +1088,11 @@ void GNAPlugin::Infer(const InferenceEngine::Blob &input, InferenceEngine::Blob
bmInput[inputsDataMap.begin()->first] = std::shared_ptr<Blob>(const_cast<Blob*>(&input), [](Blob*){});
IE_ASSERT(!outputsDataMap.empty());
bmOutput[outputsDataMap.begin()->first] = std::shared_ptr<Blob>(&output, [](Blob*){});
Infer(bmInput, bmOutput);
return Infer(bmInput, bmOutput);
}

void GNAPlugin::Infer(const InferenceEngine::BlobMap &input, InferenceEngine::BlobMap &result) {
Wait(QueueInference(input, result));
bool GNAPlugin::Infer(const InferenceEngine::BlobMap &input, InferenceEngine::BlobMap &result) {
return Wait(QueueInference(input, result));
}

Blob::Ptr GNAPlugin::GetOutputBlob(const std::string& name, InferenceEngine::Precision precision) {
Expand Down
7 changes: 4 additions & 3 deletions inference-engine/src/gna_plugin/gna_plugin.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ class GNAPlugin : public InferenceEngine::IInferencePlugin {

void LoadNetwork(InferenceEngine::ICNNNetwork &network);

void Infer(const InferenceEngine::BlobMap &input, InferenceEngine::BlobMap &result);
bool Infer(const InferenceEngine::BlobMap &input, InferenceEngine::BlobMap &result);
void GetPerformanceCounts(std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> &perfMap);
void AddExtension(InferenceEngine::IExtensionPtr extension) override;

Expand All @@ -109,15 +109,16 @@ class GNAPlugin : public InferenceEngine::IInferencePlugin {
InferenceEngine::ExecutableNetwork LoadNetwork(const InferenceEngine::ICNNNetwork &network,
const std::map<std::string, std::string> &config_map,
InferenceEngine::RemoteContext::Ptr context) override { THROW_GNA_EXCEPTION << "Not implemented"; }
void Infer(const InferenceEngine::Blob &input, InferenceEngine::Blob &result);
bool Infer(const InferenceEngine::Blob &input, InferenceEngine::Blob &result);
void SetCore(InferenceEngine::ICore*) noexcept override {}
InferenceEngine::ICore* GetCore() const noexcept override {return nullptr;}
void Reset();
void QueryNetwork(const InferenceEngine::ICNNNetwork &network,
const std::map<std::string, std::string>& config,
InferenceEngine::QueryNetworkResult &res) const override;
uint32_t QueueInference(const InferenceEngine::BlobMap &input, InferenceEngine::BlobMap &result);
void Wait(uint32_t idx = 0);
bool Wait(uint32_t idx);
bool WaitFor(uint32_t idx, int64_t millisTimeout);

InferenceEngine::Parameter GetConfig(const std::string& name,
const std::map<std::string, InferenceEngine::Parameter> & options) const override;
Expand Down

0 comments on commit 3772036

Please sign in to comment.