Skip to content

Commit

Permalink
- [PrepProcessing] Addded ability to preprocess inputs into plugin
Browse files Browse the repository at this point in the history
desired format

changed InferRequestInternal:
 - added _deviceInputs member to store plugin desired perprocessing
   targets
 - added default argument to preProcessingRequired to describe plugin
   specific desired preprocessing target
 - SetBlob and GetBlob to deal with plugin desired perprocessing targets
   (_deviceInputs)
 - added addInputPreProcessingFor helper method to avoid code
   duplication

changed TEMPLATE plugin to use new fucntionality:
 - removed explicit presicion conversion (to use built-in one of
   InferRequestInternal)
 - _networkInputBlobs to use InferRequestInternal::_deviceInputs
  • Loading branch information
anton-potapov committed Dec 1, 2020
1 parent 0671916 commit b63d411
Show file tree
Hide file tree
Showing 9 changed files with 94 additions and 65 deletions.
17 changes: 5 additions & 12 deletions docs/template_plugin/src/template_infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ static void AllocateImpl(const BlobDataMap& blobDataMap,

void TemplateInferRequest::allocateBlobs() {
auto&& parameters = _executableNetwork->_function->get_parameters();
AllocateImpl(_networkInputs, _inputs, _networkInputBlobs, [&] (const std::string& blobName) {
AllocateImpl(_networkInputs, _inputs, _deviceInputs, [&] (const std::string& blobName) {
return parameters.at(_executableNetwork->_inputIndex.at(blobName))->get_element_type();
});
auto&& results = _executableNetwork->_function->get_results();
Expand Down Expand Up @@ -176,21 +176,14 @@ void TemplateInferRequest::inferPreprocess() {
auto start = Time::now();
// NOTE: After InferRequestInternal::execDataPreprocessing call
// input can points to other memory region than it was allocated in constructor.
InferRequestInternal::execDataPreprocessing(_inputs);
for (auto&& input : _inputs) {
auto inputBlob = input.second;
auto networkInput = _networkInputBlobs[input.first];
if (inputBlob->getTensorDesc().getPrecision() == networkInput->getTensorDesc().getPrecision()) {
networkInput = inputBlob;
} else {
blobCopy(inputBlob, networkInput);
}
auto index = _executableNetwork->_inputIndex[input.first];
InferRequestInternal::execDataPreprocessing(_deviceInputs);
for (auto&& networkInput : _deviceInputs) {
auto index = _executableNetwork->_inputIndex[networkInput.first];
const auto& parameter = _parameters[index];
const auto& parameterShape = parameter->get_shape();
const auto& parameterType = parameter->get_element_type();
_inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(parameterType, parameterShape,
InferenceEngine::as<InferenceEngine::MemoryBlob>(networkInput)->rmap().as<void*>());
InferenceEngine::as<InferenceEngine::MemoryBlob>(networkInput.second)->rmap().as<void*>());
}
for (auto&& output : _outputs) {
auto outputBlob = output.second;
Expand Down
1 change: 0 additions & 1 deletion docs/template_plugin/src/template_infer_request.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@ class TemplateInferRequest : public InferenceEngine::InferRequestInternal {
// for performance counters
std::array<std::chrono::duration<float, std::micro>, numOfStages> _durations;

InferenceEngine::BlobMap _networkInputBlobs;
InferenceEngine::BlobMap _networkOutputBlobs;
ngraph::ParameterVector _parameters;
ngraph::ResultVector _results;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ const std::vector<std::map<std::string, std::string>> configs = {
INSTANTIATE_TEST_CASE_P(PreprocessingPrecisionConvertTestsViaSetInput, PreprocessingPrecisionConvertTest,
::testing::Combine(
::testing::ValuesIn(inputPrecisions),
::testing::Values(1, 2, 3, 4, 5), // Number of input tensor channels
::testing::Values(4), // Number of input tensor channels
::testing::Values(true), // Use SetInput
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
Expand All @@ -31,7 +31,7 @@ INSTANTIATE_TEST_CASE_P(PreprocessingPrecisionConvertTestsViaSetInput, Preproces
INSTANTIATE_TEST_CASE_P(PreprocessingPrecisionConvertTestsViaGetBlob, PreprocessingPrecisionConvertTest,
::testing::Combine(
::testing::ValuesIn(inputPrecisions),
::testing::Values(4, 5), // Number of input tensor channels (blob_copy only supports 4d and 5d tensors)
::testing::Values(4), // Number of input tensor channels (blob_copy only supports 4d and 5d tensors)
::testing::Values(false), // use GetBlob
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,27 +91,23 @@ class InferRequestInternal : virtual public IInferRequestInternal {
<< "Failed to set Blob with precision not corresponding to user input precision";
}

const bool preProcRequired = preProcessingRequired(foundInput, data);
auto& devBlob = _deviceInputs[name];
const bool preProcRequired = preProcessingRequired(foundInput, data, devBlob);
if (compoundBlobPassed && !preProcRequired) {
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str
<< "cannot set compound blob: supported only for input pre-processing";
}

if (preProcRequired) {
if (_preProcData.find(name) == _preProcData.end()) {
_preProcData.emplace(name, CreatePreprocDataHelper());
}
_preProcData[name]->isApplicable(data, _inputs[name]);
// Stores the given blob as ROI blob. It will be used to fill in network input
// during pre-processing
_preProcData[name]->setRoiBlob(data);
addInputPreProcessingFor(name, data, devBlob ? devBlob : _inputs[name]);
} else {
size_t inputSize = details::product(foundInput->getTensorDesc().getDims());
if (dataSize != inputSize) {
THROW_IE_EXCEPTION << "Input blob size is not equal network input size (" << dataSize
<< "!=" << inputSize << ").";
}
_inputs[name] = data;
devBlob = data;
}
} else {
if (compoundBlobPassed) {
Expand Down Expand Up @@ -154,6 +150,10 @@ class InferRequestInternal : virtual public IInferRequestInternal {
foundInput->getTensorDesc().getLayout() != SCALAR
? foundInput->getTensorDesc().getDims()
: oneVector);

if (preProcessingRequired(foundInput, data, _deviceInputs[name])) {
addInputPreProcessingFor(name, data, _deviceInputs[name]);
}
}
} else {
data = _outputs[name];
Expand Down Expand Up @@ -233,16 +233,16 @@ class InferRequestInternal : virtual public IInferRequestInternal {
InferenceEngine::InputsDataMap _networkInputs; //!< Holds information about network inputs info
InferenceEngine::OutputsDataMap _networkOutputs; //!< Holds information about network outputs data
InferenceEngine::BlobMap _inputs; //!< A map of network input blobs
InferenceEngine::BlobMap _deviceInputs;
InferenceEngine::BlobMap _outputs; //!< A map of network output blobs
std::map<std::string, PreProcessDataPtr> _preProcData; //!< A map of pre-process data per input
std::map<std::string, PreProcessDataPtr> _preProcData; //!< A map of pre-process data per input
int m_curBatch; //!< Current batch value used in dynamic batching

/**
* @brief A shared pointer to ExecutableNetworkInternal interface
* @note Needed to correctly handle ownership between objects.
*/
std::shared_ptr<ExecutableNetworkInternal> _exeNetwork;

/**
* @brief Checks and executes input data pre-processing if needed.
* @param inputs Inputs blobs to perform preprocessing on
Expand All @@ -259,7 +259,6 @@ class InferRequestInternal : virtual public IInferRequestInternal {
}
}
}

/**
* @brief Helper function to find input or output blob by name
* @param name A name of input or output blob.
Expand Down Expand Up @@ -355,24 +354,51 @@ class InferRequestInternal : virtual public IInferRequestInternal {
* @brief Checks whether pre-processing step is required for a given input
* @param info InputInfo corresponding to input blob
* @param blob Input Blob object corresponding to input info
* @param outBlob Blob object in plugin's desired format
* @return `True` if pre-processing is required, `false` otherwise
*/
bool preProcessingRequired(const InputInfo::Ptr& info, const Blob::Ptr& blob) {
bool preProcessingRequired(const InputInfo::Ptr& info, const Blob::Ptr& blob, const Blob::Ptr& outBlob = nullptr) {
// pre-processing is required if:
// 1. resize algorithm is specified (resize required)
// 2. color format specified:
// 2.a. color format is not equal to network's expected (color conversion required)
// 2.b. network's layout != blob's layout (reorder required)
// 3. precision conversion is required

const auto& preProcessInfo = info->getPreProcess();
const auto inputColorFormat = preProcessInfo.getColorFormat();
// FIXME: support other network's input formats once the API is ready. Assuming input is in
// the BGR format by default
const auto networkColorFormat = ColorFormat::BGR;

const bool colorFormatSpecified = inputColorFormat != ColorFormat::RAW;

auto blob_layout = [](const Blob::Ptr& b) { return b->getTensorDesc().getLayout(); };
auto blob_prec = [](const Blob::Ptr& b) { return b->getTensorDesc().getPrecision();};

auto dst_layout = outBlob ? blob_layout(outBlob) : info->getLayout();
auto dst_prec = outBlob ? blob_prec(outBlob) : info->getPrecision();

//FIXME: remove the first part to allow any needed conversion?
const bool need_layout_conv = (colorFormatSpecified || outBlob) &&
(blob_layout(blob) != dst_layout);

return preProcessInfo.getResizeAlgorithm() != ResizeAlgorithm::NO_RESIZE ||
(colorFormatSpecified && inputColorFormat != networkColorFormat) ||
(colorFormatSpecified && info->getLayout() != blob->getTensorDesc().getLayout());
need_layout_conv ||
(blob_prec(blob) != dst_prec);
}

void addInputPreProcessingFor(const std::string& name, Blob::Ptr const& inBlob, const Blob::Ptr& dstBlob) {
auto ppDataIt = _preProcData.find(name);
if (ppDataIt == _preProcData.end()) {
ppDataIt = (_preProcData.emplace(name, CreatePreprocDataHelper())).first;
}

auto& preproc_ptr = ppDataIt->second;
preproc_ptr->isApplicable(inBlob, dstBlob);
// Stores the given blob as ROI blob. It will be used to fill in network input
// during pre-processing
preproc_ptr->setRoiBlob(inBlob);
}
};

Expand Down
57 changes: 29 additions & 28 deletions inference-engine/src/preprocessing/ie_preprocess_data.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -758,7 +758,7 @@ class PreProcessData : public IPreProcessData {
/**
* @brief ROI blob.
*/
Blob::Ptr _roiBlob = nullptr;
Blob::Ptr _userBlob = nullptr;
Blob::Ptr _tmp1 = nullptr;
Blob::Ptr _tmp2 = nullptr;

Expand All @@ -773,7 +773,7 @@ class PreProcessData : public IPreProcessData {

Blob::Ptr getRoiBlob() const override;

void execute(Blob::Ptr &outBlob, const PreProcessInfo& info, bool serial, int batchSize = -1) override;
void execute(Blob::Ptr &preprocessedBlob, const PreProcessInfo &info, bool serial, int batchSize = -1) override;

void Release() noexcept override;

Expand All @@ -790,38 +790,39 @@ void PreProcessData::Release() noexcept {
}

void PreProcessData::setRoiBlob(const Blob::Ptr &blob) {
_roiBlob = blob;
_userBlob = blob;
}

Blob::Ptr PreProcessData::getRoiBlob() const {
return _roiBlob;
return _userBlob;
}

void PreProcessData::execute(Blob::Ptr &outBlob, const PreProcessInfo& info, bool serial,
void PreProcessData::execute(Blob::Ptr &preprocessedBlob, const PreProcessInfo &info, bool serial,
int batchSize) {
OV_ITT_SCOPED_TASK(itt::domains::IEPreproc, "Preprocessing");

auto algorithm = info.getResizeAlgorithm();
auto fmt = info.getColorFormat();

if (algorithm == NO_RESIZE && fmt == ColorFormat::RAW) {
THROW_IE_EXCEPTION << "Input pre-processing is called without the pre-processing info set: "
"there's nothing to be done";
if (_userBlob == nullptr || preprocessedBlob == nullptr) {
THROW_IE_EXCEPTION << "Input pre-processing is called with null " << (_userBlob == nullptr ? "_userBlob" : "preprocessedBlob");
}

if (_roiBlob == nullptr) {
THROW_IE_EXCEPTION << "Input pre-processing is called without ROI blob set";
}

batchSize = PreprocEngine::getCorrectBatchSize(batchSize, _roiBlob);
batchSize = PreprocEngine::getCorrectBatchSize(batchSize, _userBlob);

if (!_preproc) {
_preproc.reset(new PreprocEngine);
}
if (_preproc->preprocessWithGAPI(_roiBlob, outBlob, algorithm, fmt, serial, batchSize)) {

if (_preproc->preprocessWithGAPI(_userBlob, preprocessedBlob, algorithm, fmt, serial, batchSize)) {
return;
}

if (algorithm == NO_RESIZE) {
THROW_IE_EXCEPTION << "Input pre-processing is called without the pre-processing info set: "
"there's nothing to be done";
}

if (batchSize > 1) {
THROW_IE_EXCEPTION << "Batch pre-processing is unsupported in this mode. "
"Use default pre-processing instead to process batches.";
Expand All @@ -834,37 +835,37 @@ void PreProcessData::execute(Blob::Ptr &outBlob, const PreProcessInfo& info, boo
}

Blob::Ptr res_in, res_out;
if (_roiBlob->getTensorDesc().getLayout() == NHWC) {
if (!_tmp1 || _tmp1->size() != _roiBlob->size()) {
if (_roiBlob->getTensorDesc().getPrecision() == Precision::FP32) {
_tmp1 = make_shared_blob<float>({Precision::FP32, _roiBlob->getTensorDesc().getDims(), Layout::NCHW});
if (_userBlob->getTensorDesc().getLayout() == NHWC) {
if (!_tmp1 || _tmp1->size() != _userBlob->size()) {
if (_userBlob->getTensorDesc().getPrecision() == Precision::FP32) {
_tmp1 = make_shared_blob<float>({Precision::FP32, _userBlob->getTensorDesc().getDims(), Layout::NCHW});
} else {
_tmp1 = make_shared_blob<uint8_t>({Precision::U8, _roiBlob->getTensorDesc().getDims(), Layout::NCHW});
_tmp1 = make_shared_blob<uint8_t>({Precision::U8, _userBlob->getTensorDesc().getDims(), Layout::NCHW});
}
_tmp1->allocate();
}

{
OV_ITT_SCOPED_TASK(itt::domains::IEPreproc, "Reorder before");
blob_copy(_roiBlob, _tmp1);
blob_copy(_userBlob, _tmp1);
}
res_in = _tmp1;
} else {
res_in = _roiBlob;
res_in = _userBlob;
}

if (outBlob->getTensorDesc().getLayout() == NHWC) {
if (!_tmp2 || _tmp2->size() != outBlob->size()) {
if (outBlob->getTensorDesc().getPrecision() == Precision::FP32) {
_tmp2 = make_shared_blob<float>({Precision::FP32, outBlob->getTensorDesc().getDims(), Layout::NCHW});
if (preprocessedBlob->getTensorDesc().getLayout() == NHWC) {
if (!_tmp2 || _tmp2->size() != preprocessedBlob->size()) {
if (preprocessedBlob->getTensorDesc().getPrecision() == Precision::FP32) {
_tmp2 = make_shared_blob<float>({Precision::FP32, preprocessedBlob->getTensorDesc().getDims(), Layout::NCHW});
} else {
_tmp2 = make_shared_blob<uint8_t>({Precision::U8, outBlob->getTensorDesc().getDims(), Layout::NCHW});
_tmp2 = make_shared_blob<uint8_t>({Precision::U8, preprocessedBlob->getTensorDesc().getDims(), Layout::NCHW});
}
_tmp2->allocate();
}
res_out = _tmp2;
} else {
res_out = outBlob;
res_out = preprocessedBlob;
}

{
Expand All @@ -874,7 +875,7 @@ void PreProcessData::execute(Blob::Ptr &outBlob, const PreProcessInfo& info, boo

if (res_out == _tmp2) {
OV_ITT_SCOPED_TASK(itt::domains::IEPreproc, "Reorder after");
blob_copy(_tmp2, outBlob);
blob_copy(_tmp2, preprocessedBlob);
}
}

Expand Down
5 changes: 4 additions & 1 deletion inference-engine/src/preprocessing/ie_preprocess_data.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,14 @@ class IPreProcessData : public details::IRelease {
* @brief Sets ROI blob to be resized and placed to the default input blob during pre-processing.
* @param blob ROI blob.
*/
//FIXME: rename to setUserBlob
virtual void setRoiBlob(const Blob::Ptr &blob) = 0;

/**
* @brief Gets pointer to the ROI blob used for a given input.
* @return Blob pointer.
*/
//FIXME: rename to getUserBlob
virtual Blob::Ptr getRoiBlob() const = 0;

/**
Expand All @@ -53,8 +55,9 @@ class IPreProcessData : public details::IRelease {
* @param serial disable OpenMP threading if the value set to true.
* @param batchSize batch size for pre-processing.
*/
virtual void execute(Blob::Ptr &outBlob, const PreProcessInfo& info, bool serial, int batchSize = -1) = 0;
virtual void execute(Blob::Ptr &preprocessedBlob, const PreProcessInfo& info, bool serial, int batchSize = -1) = 0;

//FIXME: rename to verifyAplicable
virtual void isApplicable(const Blob::Ptr &src, const Blob::Ptr &dst) = 0;
};

Expand Down
15 changes: 11 additions & 4 deletions inference-engine/src/preprocessing/ie_preprocess_gapi.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -276,8 +276,8 @@ void validateColorFormats(const G::Desc &in_desc,
};

// verify inputs/outputs and throw on error

if (output_color_format == ColorFormat::RAW) {
const bool color_conv_required = !((output_color_format == input_color_format) || (input_color_format == ColorFormat::RAW));
if (color_conv_required && (output_color_format == ColorFormat::RAW)) {
THROW_IE_EXCEPTION << "Network's expected color format is unspecified";
}

Expand All @@ -288,7 +288,7 @@ void validateColorFormats(const G::Desc &in_desc,
verify_layout(in_layout, "Input blob");
verify_layout(out_layout, "Network's blob");

if (input_color_format == ColorFormat::RAW) {
if (!color_conv_required) {
// verify input and output have the same number of channels
if (in_desc.d.C != out_desc.d.C) {
THROW_IE_EXCEPTION << "Input and network expected blobs have different number of "
Expand Down Expand Up @@ -949,6 +949,13 @@ bool PreprocEngine::preprocessBlob(const BlobTypePtr &inBlob, MemoryBlob::Ptr &o
out_desc_ie.getDims(),
out_fmt },
algorithm };

if (algorithm == NO_RESIZE && std::get<0>(thisCall) == std::get<1>(thisCall)) {
//if requested output parameters match input blob no need to do anything
THROW_IE_EXCEPTION << "No job to do in the PreProcessing ?";
return true;
}

const Update update = needUpdate(thisCall);

Opt<cv::GComputation> _lastComputation;
Expand Down Expand Up @@ -986,7 +993,7 @@ bool PreprocEngine::preprocessWithGAPI(const Blob::Ptr &inBlob, Blob::Ptr &outBl
return false;
}

const auto out_fmt = ColorFormat::BGR; // FIXME: get expected color format from network
const auto out_fmt = (in_fmt == ColorFormat::RAW) ? ColorFormat::RAW : ColorFormat::BGR; // FIXME: get expected color format from network

// output is always a memory blob
auto outMemoryBlob = as<MemoryBlob>(outBlob);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1177,7 +1177,7 @@ TEST_P(PreprocTest, Performance)
{
case Precision::U8: Blob2Img<Precision::U8> (out_blob, out_mat, out_layout); break;
case Precision::FP32: Blob2Img<Precision::FP32>(out_blob, out_mat, out_layout); break;
case Precision::U16: Blob2Img<Precision::FP32>(out_blob, out_mat, out_layout); break;
case Precision::U16: Blob2Img<Precision::U16>(out_blob, out_mat, out_layout); break;
default: FAIL() << "Unsupported configuration";
}

Expand Down
Loading

0 comments on commit b63d411

Please sign in to comment.