Skip to content

Commit

Permalink
Removed legacy methods SetBatch and SetBlob (openvinotoolkit#17984)
Browse files Browse the repository at this point in the history
* Removed legacy methods SetBatch and SetBlob

* Fixed GPU plugin build

* Remove DYN_BATCH_LIMIT from tests

* Revert some changes in GPU plugin
  • Loading branch information
ilyachur authored and alvoron committed Jun 21, 2023
1 parent d057e77 commit cacf540
Show file tree
Hide file tree
Showing 47 changed files with 18 additions and 708 deletions.
2 changes: 0 additions & 2 deletions samples/cpp/hello_query_device/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,6 @@ The application prints all available devices with their supported metrics and de
[ INFO ] CPU_THREADS_NUM : 0
[ INFO ] CPU_THROUGHPUT_STREAMS : 1
[ INFO ] DUMP_EXEC_GRAPH_AS_DOT : ""
[ INFO ] DYN_BATCH_ENABLED : NO
[ INFO ] DYN_BATCH_LIMIT : 0
[ INFO ] ENFORCE_BF16 : NO
[ INFO ] EXCLUSIVE_ASYNC_REQUESTS : NO
[ INFO ] PERFORMANCE_HINT : ""
Expand Down
2 changes: 0 additions & 2 deletions samples/python/hello_query_device/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,6 @@ For example:
[ INFO ] CPU_THREADS_NUM: 0
[ INFO ] CPU_THROUGHPUT_STREAMS: 1
[ INFO ] DUMP_EXEC_GRAPH_AS_DOT:
[ INFO ] DYN_BATCH_ENABLED: NO
[ INFO ] DYN_BATCH_LIMIT: 0
[ INFO ] ENFORCE_BF16: NO
[ INFO ] EXCLUSIVE_ASYNC_REQUESTS: NO
[ INFO ] PERFORMANCE_HINT:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -240,11 +240,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED AsyncInferRequestThreadSafeDefault : publi
_syncRequest->SetBlob(name, data);
}

void SetBlob(const std::string& name, const Blob::Ptr& data, const PreProcessInfo& info) override {
CheckState();
_syncRequest->SetBlob(name, data, info);
}

void SetBlobs(const std::string& name, const std::vector<Blob::Ptr>& blobs) override {
CheckState();
_syncRequest->SetBlobs(name, blobs);
Expand All @@ -264,13 +259,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED AsyncInferRequestThreadSafeDefault : publi
return _syncRequest->GetPreProcess(name);
}

OPENVINO_SUPPRESS_DEPRECATED_START
void SetBatch(int batch) override {
CheckState();
_syncRequest->SetBatch(batch);
};
OPENVINO_SUPPRESS_DEPRECATED_END

void SetCallback(Callback callback) override {
CheckState();
_callback = std::move(callback);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,30 +131,13 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferRequestIn
*/
virtual BatchedBlob::Ptr GetBlobs(const std::string& name);

/**
* @brief Sets pre-process for input data
* @param name Name of input blob.
* @param data - a reference to input or output blob. The type of Blob must correspond to the network input
* precision and size.
* @param info Preprocess info for blob.
*/
virtual void SetBlob(const std::string& name, const Blob::Ptr& data, const PreProcessInfo& info);

/**
* @brief Gets pre-process for input data
* @param name Name of input blob.
* @param info pointer to a pointer to PreProcessInfo structure
*/
virtual const PreProcessInfo& GetPreProcess(const std::string& name) const;

/**
* @brief Sets new batch size when dynamic batching is enabled in executable network that created this request.
* @deprecated
* @param batch - new batch size to be used by all the following inference calls for this request.
*/
INFERENCE_ENGINE_DEPRECATED("This method is deprecated and will be removed in 2023.1 release")
virtual void SetBatch(int batch);

/**
* @brief Queries memory states.
* @return Returns memory states
Expand Down Expand Up @@ -347,7 +330,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferRequestIn
std::vector<std::shared_ptr<const ov::Node>> _results; //!< A vector of function outputs
std::map<std::string, PreProcessDataPtr> _preProcData; //!< A map of pre-process data per input
std::map<std::string, BatchedBlob::Ptr> _batched_inputs; //!< A map of user passed blobs for network inputs
int m_curBatch = -1; //!< Current batch value used in dynamic batching

/**
* @brief A shared pointer to IInferRequestInternal
Expand Down
20 changes: 0 additions & 20 deletions src/inference/include/ie/cpp/ie_infer_request.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,17 +118,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(InferRequest) {
*/
Blob::Ptr GetBlob(const std::string& name);

/**
* @deprecated This method will be removed in 2023.1 release
* @brief Sets blob with a pre-process information
* @note Returns an error in case if data blob is output
* @param name Name of input blob.
* @param data A reference to input. The type of Blob must correspond to the network input precision and size.
* @param info Preprocess info for blob.
*/
INFERENCE_ENGINE_DEPRECATED("This method is deprecated and will be removed in 2023.1 release")
void SetBlob(const std::string& name, const Blob::Ptr& data, const PreProcessInfo& info);

/**
* @brief Gets pre-process for input data
* @param name Name of input blob.
Expand Down Expand Up @@ -175,15 +164,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(InferRequest) {
*/
void SetOutput(const BlobMap& results);

/**
* @brief Sets new batch size when dynamic batching is enabled in executable network that created this request.
* @deprecated
*
* @param batch new batch size to be used by all the following inference calls for this request.
*/
INFERENCE_ENGINE_DEPRECATED("This method is deprecated and will be removed in 2023.1 release")
void SetBatch(const int batch);

/**
* @brief Start inference of specified input(s) in asynchronous mode
*
Expand Down
27 changes: 0 additions & 27 deletions src/inference/include/ie/ie_iinfer_request.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,21 +85,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED IInferRequest : public std::enable_shared_
*/
virtual StatusCode GetBlob(const char* name, Blob::Ptr& data, ResponseDesc* resp) noexcept = 0;

/**
* @deprecated This method will be removed in 2023.1 release
* @brief Sets pre-process for input data
* @param name Name of input blob.
* @param data Reference to input or output blob. The type of Blob must match the network input precision and size.
* @param info Preprocess info for blob.
* @param resp Optional: pointer to an already allocated object to contain information in case of failure
* @return Status code of the operation: OK (0) for success
*/
INFERENCE_ENGINE_DEPRECATED("This method is deprecated and will be removed in 2023.1 release")
virtual StatusCode SetBlob(const char* name,
const Blob::Ptr& data,
const PreProcessInfo& info,
ResponseDesc* resp) noexcept = 0;

/**
* @brief Gets pre-process for input data
* @param name Name of input blob.
Expand Down Expand Up @@ -200,18 +185,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED IInferRequest : public std::enable_shared_
*/
virtual StatusCode SetUserData(void* data, ResponseDesc* resp) noexcept = 0;

/**
* @brief Sets new batch size when dynamic batching is enabled in executable network that created this request.
*
* @deprecated
* @param batch_size new batch size to be used by all the following inference calls for this request.
* @param resp Optional: a pointer to an already allocated object to contain extra information of a failure (if
* occurred)
* @return Enumeration of the resulted action: InferenceEngine::OK (0) for success
*/
INFERENCE_ENGINE_DEPRECATED("This method is deprecated and will be removed in 2023.1 release")
virtual InferenceEngine::StatusCode SetBatch(int batch_size, ResponseDesc* resp) noexcept = 0;

protected:
virtual ~IInferRequest() = default;
};
Expand Down
24 changes: 0 additions & 24 deletions src/inference/include/ie/ie_plugin_config.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -405,30 +405,6 @@ DECLARE_CONFIG_VALUE(CPU_THROUGHPUT_AUTO);
INFERENCE_ENGINE_1_0_DEPRECATED
DECLARE_CONFIG_KEY(PERF_COUNT);

/**
* @brief The key defines dynamic limit of batch processing.
* @deprecated
*
* Specified value is applied to all following Infer() calls. Inference Engine processes
* min(batch_limit, original_batch_size) first pictures from input blob. For example, if input
* blob has sizes 32x3x224x224 after applying plugin.SetConfig({KEY_DYN_BATCH_LIMIT, 10})
* Inference Engine primitives processes only beginner subblobs with size 10x3x224x224.
* This value can be changed before any Infer() call to specify a new batch limit.
*
* The paired parameter value should be convertible to integer number. Acceptable values:
* -1 - Do not limit batch processing
* >0 - Direct value of limit. Batch size to process is min(new batch_limit, original_batch)
*/
INFERENCE_ENGINE_DEPRECATED("This config is deprecated and will be removed in 2023.1 release")
DECLARE_CONFIG_KEY(DYN_BATCH_LIMIT);

/**
* @brief The key checks whether dynamic batch is enabled.
* @deprecated
*/
INFERENCE_ENGINE_DEPRECATED("This config is deprecated and will be removed in 2023.1 release")
DECLARE_CONFIG_KEY(DYN_BATCH_ENABLED);

/**
* @brief This key directs the plugin to load a configuration file.
*
Expand Down
8 changes: 0 additions & 8 deletions src/inference/src/cpp/ie_infer_async_request_base.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -134,10 +134,6 @@ class InferRequestBase : public IInferRequest {
TO_STATUS(_impl->SetBlob(name, data));
}

StatusCode SetBlob(const char* name, const Blob::Ptr& data, const PreProcessInfo& info, ResponseDesc* resp) noexcept override {
TO_STATUS(_impl->SetBlob(name, data, info));
}

StatusCode GetBlob(const char* name, Blob::Ptr& data, ResponseDesc* resp) noexcept override {
TO_STATUS(data = _impl->GetBlob(name));
}
Expand Down Expand Up @@ -181,10 +177,6 @@ class InferRequestBase : public IInferRequest {
StatusCode SetUserData(void* data, ResponseDesc* resp) noexcept override {
TO_STATUS(_impl->SetUserData(data));
}

StatusCode SetBatch(int batch_size, ResponseDesc* resp) noexcept override {
TO_STATUS(_impl->SetBatch(batch_size));
}
};

IE_SUPPRESS_DEPRECATED_END
Expand Down
8 changes: 0 additions & 8 deletions src/inference/src/cpp/ie_infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,6 @@ Blob::Ptr InferRequest::GetBlob(const std::string& name) {
return blobPtr;
}

void InferRequest::SetBlob(const std::string& name, const Blob::Ptr& data, const PreProcessInfo& info) {
INFER_REQ_CALL_STATEMENT(_impl->SetBlob(name, data, info);)
}

const PreProcessInfo& InferRequest::GetPreProcess(const std::string& name) const {
INFER_REQ_CALL_STATEMENT(return _impl->GetPreProcess(name);)
}
Expand All @@ -86,10 +82,6 @@ void InferRequest::SetOutput(const BlobMap& results) {
INFER_REQ_CALL_STATEMENT(for (auto&& result : results) { _impl->SetBlob(result.first, result.second); })
}

void InferRequest::SetBatch(const int batch) {
INFER_REQ_CALL_STATEMENT(_impl->SetBatch(batch);)
}

void InferRequest::StartAsync() {
INFER_REQ_CALL_STATEMENT(_impl->StartAsync();)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -407,18 +407,6 @@ BatchedBlob::Ptr IInferRequestInternal::GetBlobs(const std::string& name) {
return nullptr;
}

void IInferRequestInternal::SetBlob(const std::string& name, const Blob::Ptr& data, const PreProcessInfo& info) {
InputInfo::Ptr foundInput;
DataPtr foundOutput;
if (findInputAndOutputBlobByName(name, foundInput, foundOutput)) {
foundInput->getPreProcess() = copyPreProcess(info);
} else {
IE_THROW() << "Pre-process can't be set to output blob";
}

SetBlob(name, data);
}

const PreProcessInfo& IInferRequestInternal::GetPreProcess(const std::string& name) const {
InputInfo::Ptr foundInput;
DataPtr foundOutput;
Expand All @@ -429,10 +417,6 @@ const PreProcessInfo& IInferRequestInternal::GetPreProcess(const std::string& na
}
}

void IInferRequestInternal::SetBatch(int batch) {
IE_THROW(NotImplemented);
}

std::vector<std::shared_ptr<IVariableStateInternal>> IInferRequestInternal::QueryState() {
IE_THROW(NotImplemented);
}
Expand Down Expand Up @@ -460,7 +444,7 @@ void IInferRequestInternal::execDataPreprocessing(InferenceEngine::BlobMap& prep
// using preconfigured resize algorithm.
auto it = _preProcData.find(input.first);
if (it != _preProcData.end()) {
it->second->execute(input.second, _networkInputs[input.first]->getPreProcess(), serial, m_curBatch);
it->second->execute(input.second, _networkInputs[input.first]->getPreProcess(), serial, -1);
}
}
}
Expand Down
10 changes: 0 additions & 10 deletions src/inference/src/dev/converter_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -541,20 +541,10 @@ class IInferRequestInternalWrapper : public InferenceEngine::IInferRequestIntern
return std::make_shared<InferenceEngine::BatchedBlob>(blobs);
}

void SetBlob(const std::string& name,
const InferenceEngine::Blob::Ptr& data,
const InferenceEngine::PreProcessInfo& info) override {
OPENVINO_NOT_IMPLEMENTED;
}

const InferenceEngine::PreProcessInfo& GetPreProcess(const std::string& name) const override {
OPENVINO_NOT_IMPLEMENTED;
}

void SetBatch(int batch) override {
OPENVINO_NOT_IMPLEMENTED;
}

std::vector<std::shared_ptr<InferenceEngine::IVariableStateInternal>> QueryState() override {
auto res = m_request->query_state();
std::vector<std::shared_ptr<InferenceEngine::IVariableStateInternal>> ret;
Expand Down
14 changes: 0 additions & 14 deletions src/inference/tests/functional/async_infer_request_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,6 @@ TEST(InferRequestCPPTests, throwsOnUninitializedGetBlob) {
ASSERT_THROW(req.GetBlob({}), InferenceEngine::NotAllocated);
}

TEST(InferRequestCPPTests, throwsOnUninitializedSetBlobPreproc) {
InferRequest req;
IE_SUPPRESS_DEPRECATED_START
ASSERT_THROW(req.SetBlob({}, {}, {}), InferenceEngine::NotAllocated);
IE_SUPPRESS_DEPRECATED_END
}

TEST(InferRequestCPPTests, throwsOnUninitializedGetPreProcess) {
InferRequest req;
ASSERT_THROW(req.GetPreProcess({}), InferenceEngine::NotAllocated);
Expand All @@ -55,13 +48,6 @@ TEST(InferRequestCPPTests, throwsOnUninitializedSetOutput) {
ASSERT_THROW(req.SetOutput({{}}), InferenceEngine::NotAllocated);
}

TEST(InferRequestCPPTests, throwsOnUninitializedSetBatch) {
InferRequest req;
IE_SUPPRESS_DEPRECATED_START
ASSERT_THROW(req.SetBatch({}), InferenceEngine::NotAllocated);
IE_SUPPRESS_DEPRECATED_END
}

TEST(InferRequestCPPTests, throwsOnUninitializedStartAsync) {
InferRequest req;
ASSERT_THROW(req.StartAsync(), InferenceEngine::NotAllocated);
Expand Down
9 changes: 0 additions & 9 deletions src/plugins/auto/src/infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -110,15 +110,6 @@ void MultiDeviceInferRequest::SetBlob(const std::string& name, const InferenceEn
IInferRequestInternal::SetBlob(name, blob);
}

IE_SUPPRESS_DEPRECATED_START
void MultiDeviceInferRequest::SetBlob(const std::string& name, const Blob::Ptr& blob, const PreProcessInfo& info) {
if (_sharedRequest)
_sharedRequest->SetBlob(name, blob, info);
else
IInferRequestInternal::SetBlob(name, blob, info);
}
IE_SUPPRESS_DEPRECATED_END

InferenceEngine::Blob::Ptr MultiDeviceInferRequest::GetBlob(const std::string& name) {
if (_sharedRequest)
return _sharedRequest->GetBlob(name);
Expand Down
7 changes: 0 additions & 7 deletions src/plugins/auto/src/infer_request.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,6 @@ class MultiDeviceInferRequest : public InferenceEngine::IInferRequestInternal {
InferenceEngine::RemoteContext::Ptr ctx = nullptr);
std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> GetPerformanceCounts() const override;
void SetBlob(const std::string& name, const InferenceEngine::Blob::Ptr& blob) override;
/**
* @deprecated This method will be removed in 2024.1 release
* @brief Sets blob with a pre-process information
*/
void SetBlob(const std::string& name,
const InferenceEngine::Blob::Ptr& blob,
const InferenceEngine::PreProcessInfo& info) override;
InferenceEngine::Blob::Ptr GetBlob(const std::string& name) override;
std::vector<std::shared_ptr<InferenceEngine::IVariableStateInternal>> QueryState() override;
// Multi-Device impl specific: sets the data (blobs from the device-less requests to the specific device request)
Expand Down
8 changes: 0 additions & 8 deletions src/plugins/hetero/infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,14 +102,6 @@ InferenceEngine::Blob::Ptr HeteroInferRequest::GetBlob(const std::string& name)
return itRequest->second->GetBlob(name);
}

void HeteroInferRequest::SetBlob(const std::string& name, const Blob::Ptr& blob, const PreProcessInfo& info) {
auto itRequest = _subRequestFromBlobName.find(name);
if (itRequest == _subRequestFromBlobName.end()) {
IE_THROW() << "There is no infer requests binded to blob with name: " << name;
}
itRequest->second->SetBlob(name, blob, info);
}

const InferenceEngine::PreProcessInfo& HeteroInferRequest::GetPreProcess(const std::string& name) const {
auto itRequest = _subRequestFromBlobName.find(name);
if (itRequest == _subRequestFromBlobName.end()) {
Expand Down
4 changes: 0 additions & 4 deletions src/plugins/hetero/infer_request.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,6 @@ class HeteroInferRequest : public InferenceEngine::IInferRequestInternal {

InferenceEngine::Blob::Ptr GetBlob(const std::string& name) override;

void SetBlob(const std::string& name,
const InferenceEngine::Blob::Ptr& blob,
const InferenceEngine::PreProcessInfo& info) override;

const InferenceEngine::PreProcessInfo& GetPreProcess(const std::string& name) const override;

std::vector<std::shared_ptr<InferenceEngine::IVariableStateInternal>> QueryState() override;
Expand Down
Loading

0 comments on commit cacf540

Please sign in to comment.