Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into move_node_to_ov
Browse files Browse the repository at this point in the history
  • Loading branch information
ilyachur committed Aug 29, 2021
2 parents b7c7803 + 4a07a0b commit 20e8fc9
Show file tree
Hide file tree
Showing 27 changed files with 234 additions and 154 deletions.
19 changes: 10 additions & 9 deletions docs/ops/logical/LogicalNot_1.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,31 +6,32 @@

**Short description**: *LogicalNot* performs element-wise logical negation operation with given tensor.

**Attributes**:
**Detailed description**: *LogicalNot* performs element-wise logical negation operation with given tensor, based on the following mathematical formula:

No attributes available.
\f[
a_{i} = \lnot a_{i}
\f]

**Attributes**: *LogicalNot* operation has no attributes.

**Inputs**

* **1**: An tensor of type *T*. **Required.**
* **1**: A tensor of type *T_BOOL* and arbitrary shape. **Required.**

**Outputs**

* **1**: The result of element-wise logical negation operation. A tensor of type *T*.
* **1**: The result of element-wise logical negation operation. A tensor of type *T_BOOL* and the same shape as input tensor.

**Types**

* *T*: boolean type.

*LogicalNot* does the following with the input tensor *a*:
* *T_BOOL*: `boolean`.

\f[
a_{i} = \lnot a_{i}
\f]

**Examples**

*Example 1*
**Example**

```xml
<layer ... type="LogicalNot">
Expand Down
2 changes: 1 addition & 1 deletion docs/optimization_guide/dldt_optimization_guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ For inference on the CPU there are multiple threads binding options, see
If you are building an app-level pipeline with third-party components like GStreamer*, the general guidance for NUMA machines is as follows:
- Whenever possible, use at least one instance of the pipeline per NUMA node:
- Pin the _entire_ pipeline instance to the specific NUMA node at the outer-most level (for example, use Kubernetes* and/or `numactl` command with proper settings before actual GStreamer commands).
- Disable any individual pinning by the pipeline components (e.g. set [CPU_BIND_THREADS to 'NO'](../IE_DG/supported_plugins/CPU.md)).
- Disable any individual pinning by the pipeline components (for example, set [CPU_BIND_THREAD to 'NO'](../IE_DG/supported_plugins/CPU.md)).
- Limit each instance with respect to number of inference threads. Use [CPU_THREADS_NUM](../IE_DG/supported_plugins/CPU.md) or or other means (e.g. virtualization, Kubernetes*, etc), to avoid oversubscription.
- If pinning instancing/pinning of the entire pipeline is not possible or desirable, relax the inference threads pinning to just 'NUMA'.
- This is less restrictive compared to the default pinning of threads to cores, yet avoids NUMA penalties.
Expand Down
34 changes: 19 additions & 15 deletions docs/template_plugin/tests/functional/op_reference/logical.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,44 +18,48 @@ namespace LogicalOpsRefTestDefinitions {

struct RefLogicalParams {
ngraph::helpers::LogicalTypes opType;
Tensor input1;
Tensor input2;
std::vector<Tensor> inputs;
Tensor expected;
};

struct Builder : ParamsBuilder<RefLogicalParams> {
REFERENCE_TESTS_ADD_SET_PARAM(Builder, opType);
REFERENCE_TESTS_ADD_SET_PARAM(Builder, input1);
REFERENCE_TESTS_ADD_SET_PARAM(Builder, input2);
REFERENCE_TESTS_ADD_SET_PARAM(Builder, inputs);
REFERENCE_TESTS_ADD_SET_PARAM(Builder, expected);
};

class ReferenceLogicalLayerTest : public testing::TestWithParam<RefLogicalParams>, public CommonReferenceTest {
public:
void SetUp() override {
const auto& params = GetParam();
function = CreateFunction(params.opType, params.input1.shape, params.input2.shape, params.input1.type);
inputData = {params.input1.data, params.input2.data};
function = CreateFunction(params.opType, params.inputs);
for (auto& input : params.inputs) {
inputData.push_back(input.data);
}
refOutData = {params.expected.data};
}
static std::string getTestCaseName(const testing::TestParamInfo<RefLogicalParams>& obj) {
const auto& param = obj.param;
std::ostringstream result;
result << "LogicalType=" << param.opType << "_";
result << "inpt_shape1=" << param.input1.shape << "_";
result << "inpt_shape2=" << param.input2.shape << "_";
result << "iType=" << param.input1.type << "_";
for (size_t i =0; i< param.inputs.size(); i++) {
const auto input = param.inputs[i];
result << "inpt_shape" << i << "=" << input.shape << "_";
result << "inpt_type" << i << "=" << input.type << "_";
}
result << "oType=" << param.expected.type;
return result.str();
}

private:
static std::shared_ptr<ngraph::Function> CreateFunction(ngraph::helpers::LogicalTypes op_type, const ngraph::PartialShape& input_shape1,
const ngraph::PartialShape& input_shape2, const ngraph::element::Type& elem_type) {
const auto in1 = std::make_shared<ngraph::op::Parameter>(elem_type, input_shape1);
const auto in2 = std::make_shared<ngraph::op::Parameter>(elem_type, input_shape2);
const auto logical_op = ngraph::builder::makeLogical(in1, in2, op_type);
return std::make_shared<ngraph::Function>(ngraph::NodeVector {logical_op}, ngraph::ParameterVector {in1, in2});
static std::shared_ptr<ngraph::Function> CreateFunction(ngraph::helpers::LogicalTypes op_type, const std::vector<Tensor>& inputs) {
ngraph::ParameterVector params_vec;
for (auto& input : inputs) {
params_vec.push_back(std::make_shared<ngraph::op::Parameter>(input.type, input.shape));
}

const auto logical_op = ngraph::builder::makeLogical(params_vec, op_type);
return std::make_shared<ngraph::Function>(ngraph::NodeVector {logical_op}, ngraph::ParameterVector {params_vec});
}
};
} // namespace LogicalOpsRefTestDefinitions
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,18 +24,18 @@ std::vector<RefLogicalParams> generateLogicalParams() {
std::vector<RefLogicalParams> logicalParams {
Builder {}
.opType(LogicalTypes::LOGICAL_AND)
.input1({{2, 2}, element::boolean, std::vector<char> {true, false, true, false}})
.input2({{2, 2}, element::boolean, std::vector<char> {false, true, true, false}})
.inputs({{{2, 2}, element::boolean, std::vector<char> {true, false, true, false}},
{{2, 2}, element::boolean, std::vector<char> {false, true, true, false}}})
.expected({{2, 2}, element::boolean, std::vector<char> {false, false, true, false}}),
Builder {}
.opType(LogicalTypes::LOGICAL_AND)
.input1({{2, 1, 2, 1}, element::boolean, std::vector<char> {true, false, true, false}})
.input2({{1, 1, 2, 1}, element::boolean, std::vector<char> {true, false}})
.inputs({{{2, 1, 2, 1}, element::boolean, std::vector<char> {true, false, true, false}},
{{1, 1, 2, 1}, element::boolean, std::vector<char> {true, false}}})
.expected({{2, 1, 2, 1}, element::boolean, std::vector<char> {true, false, true, false}}),
Builder {}
.opType(LogicalTypes::LOGICAL_AND)
.input1({{3, 4}, element::boolean, std::vector<char> {true, true, true, true, true, false, true, false, false, true, true, true}})
.input2({{3, 4}, element::boolean, std::vector<char> {true, true, true, true, true, false, true, false, false, true, true, false}})
.inputs({{{3, 4}, element::boolean, std::vector<char> {true, true, true, true, true, false, true, false, false, true, true, true}},
{{3, 4}, element::boolean, std::vector<char> {true, true, true, true, true, false, true, false, false, true, true, false}}})
.expected({{3, 4}, element::boolean, std::vector<char> {true, true, true, true, true, false, true, false, false, true, true, false}})};
return logicalParams;
}
Expand Down
37 changes: 37 additions & 0 deletions docs/template_plugin/tests/functional/op_reference/logical_not.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <gtest/gtest.h>

#include <ie_core.hpp>
#include <ie_ngraph_utils.hpp>
#include <ngraph/ngraph.hpp>
#include <shared_test_classes/base/layer_test_utils.hpp>
#include <tuple>

#include "logical.hpp"

using namespace ngraph;
using namespace InferenceEngine;
using LogicalTypes = ngraph::helpers::LogicalTypes;

namespace reference_tests {
namespace LogicalOpsRefTestDefinitions {
namespace {

std::vector<RefLogicalParams> generateLogicalParams() {
std::vector<RefLogicalParams> logicalParams {
Builder {}
.opType(LogicalTypes::LOGICAL_NOT)
.inputs({{{2, 2}, element::boolean, std::vector<char> {true, false, true, false}}})
.expected({{2, 2}, element::boolean, std::vector<char> {false, true, false, true}})};
return logicalParams;
}

INSTANTIATE_TEST_SUITE_P(smoke_LogicalNot_With_Hardcoded_Refs, ReferenceLogicalLayerTest, ::testing::ValuesIn(generateLogicalParams()),
ReferenceLogicalLayerTest::getTestCaseName);

} // namespace
} // namespace LogicalOpsRefTestDefinitions
} // namespace reference_tests
Original file line number Diff line number Diff line change
Expand Up @@ -24,18 +24,18 @@ std::vector<RefLogicalParams> generateLogicalParams() {
std::vector<RefLogicalParams> logicalParams {
Builder {}
.opType(LogicalTypes::LOGICAL_OR)
.input1({{2, 2}, element::boolean, std::vector<char> {true, false, true, false}})
.input2({{2, 2}, element::boolean, std::vector<char> {false, true, true, false}})
.inputs({{{2, 2}, element::boolean, std::vector<char> {true, false, true, false}},
{{2, 2}, element::boolean, std::vector<char> {false, true, true, false}}})
.expected({{2, 2}, element::boolean, std::vector<char> {true, true, true, false}}),
Builder {}
.opType(LogicalTypes::LOGICAL_OR)
.input1({{2, 1, 2, 1}, element::boolean, std::vector<char> {true, false, true, false}})
.input2({{1, 1, 2, 1}, element::boolean, std::vector<char> {true, false}})
.inputs({{{2, 1, 2, 1}, element::boolean, std::vector<char> {true, false, true, false}},
{{1, 1, 2, 1}, element::boolean, std::vector<char> {true, false}}})
.expected({{2, 1, 2, 1}, element::boolean, std::vector<char> {true, false, true, false}}),
Builder {}
.opType(LogicalTypes::LOGICAL_OR)
.input1({{3, 4}, element::boolean, std::vector<char> {true, true, true, true, true, false, true, false, false, true, true, true}})
.input2({{3, 4}, element::boolean, std::vector<char> {true, true, true, true, true, true, true, false, false, true, true, false}})
.inputs({{{3, 4}, element::boolean, std::vector<char> {true, true, true, true, true, false, true, false, false, true, true, true}},
{{3, 4}, element::boolean, std::vector<char> {true, true, true, true, true, true, true, false, false, true, true, false}}})
.expected({{3, 4}, element::boolean, std::vector<char> {true, true, true, true, true, true, true, false, false, true, true, true}})};
return logicalParams;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,18 +24,18 @@ std::vector<RefLogicalParams> generateLogicalParams() {
std::vector<RefLogicalParams> logicalParams {
Builder {}
.opType(LogicalTypes::LOGICAL_XOR)
.input1({{2, 2}, element::boolean, std::vector<char> {true, false, true, false}})
.input2({{2, 2}, element::boolean, std::vector<char> {false, true, true, false}})
.inputs({{{2, 2}, element::boolean, std::vector<char> {true, false, true, false}},
{{2, 2}, element::boolean, std::vector<char> {false, true, true, false}}})
.expected({{2, 2}, element::boolean, std::vector<char> {true, true, false, false}}),
Builder {}
.opType(LogicalTypes::LOGICAL_XOR)
.input1({{2, 1, 2, 1}, element::boolean, std::vector<char> {true, false, true, false}})
.input2({{1, 1, 2, 1}, element::boolean, std::vector<char> {true, false}})
.inputs({{{2, 1, 2, 1}, element::boolean, std::vector<char> {true, false, true, false}},
{{1, 1, 2, 1}, element::boolean, std::vector<char> {true, false}}})
.expected({{2, 1, 2, 1}, element::boolean, std::vector<char> {false, false, false, false}}),
Builder {}
.opType(LogicalTypes::LOGICAL_XOR)
.input1({{3, 4}, element::boolean, std::vector<char> {true, true, true, true, true, false, true, false, false, true, true, true}})
.input2({{3, 4}, element::boolean, std::vector<char> {true, true, true, true, true, true, true, false, false, true, true, false}})
.inputs({{{3, 4}, element::boolean, std::vector<char> {true, true, true, true, true, false, true, false, false, true, true, true}},
{{3, 4}, element::boolean, std::vector<char> {true, true, true, true, true, true, true, false, false, true, true, false}}})
.expected({{3, 4}, element::boolean, std::vector<char> {false, false, false, false, false, true, false, false, false, false, false, true}})};
return logicalParams;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ struct InputDesc {
std::unordered_map<std::string, intel_dnn_orientation_t> orientation_in;
/// order of scale factors matches inputs order in original topology
std::vector<float> inputScaleFactors;
std::vector<uint8_t> inputPrecisions;
std::map<std::string, int> bytes_allocated_for_input;
size_t minBytesRequiredForStoreInput(InferenceEngine::CNNLayerPtr);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

namespace GNAPluginNS {
struct OutputDesc {
uint8_t precision;
double scale_factor = 1.0;
uint32_t num_bytes_per_element = 0;
uint32_t num_elements = 0;
Expand Down
6 changes: 2 additions & 4 deletions inference-engine/src/gna_plugin/gna_model_serial.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -831,14 +831,13 @@ std::vector<HeaderLatest::RuntimeEndPoint> GNAModelSerial::serializeOutputs(cons
}
uint32_t elementsCount = static_cast<uint32_t>(InferenceEngine::details::product(outputDims.begin(), outputDims.end()));
InferenceEngine::Layout outputLayout = output.second->getLayout();
InferenceEngine::Precision::ePrecision outputPrecision = InferenceEngine::Precision::FP32;
HeaderLatest::RuntimeEndPoint endPoint(outputsDesc[outputIndex].scale_factor,
outputsDesc[outputIndex].ptrs[0],
outputsDesc[outputIndex].num_bytes_per_element,
elementsCount,
outputShape,
outputLayout,
outputPrecision,
outputsDesc[outputIndex].precision,
outputsDesc[outputIndex].orientation);
endPoints.push_back(endPoint);
outputIndex++;
Expand Down Expand Up @@ -866,7 +865,7 @@ std::vector<HeaderLatest::RuntimeEndPoint> GNAModelSerial::serializeInputs(const
uint32_t elementsCount = static_cast<uint32_t>(InferenceEngine::details::product(inputDims.begin(), inputDims.end()));
intel_dnn_orientation_t orientation = inputDesc->getOrientation(inputName);
InferenceEngine::Layout inputLayout = input.second->getLayout();
InferenceEngine::Precision::ePrecision inputPrecision = InferenceEngine::Precision::FP32;
uint8_t inputPrecision = inputDesc->inputPrecisions.at(inputIndex);
HeaderLatest::RuntimeEndPoint endPoint(scaleFactor,
descriptor_ptr[0],
element_size,
Expand All @@ -886,7 +885,6 @@ void GNAModelSerial::ImportInputs(std::istream &is,
std::shared_ptr<GNAPluginNS::InputDesc> inputsDesc,
InferenceEngine::InputsDataMap& dataMap) {
dataMap.clear();

for (uint32_t inputIndex = 0; inputIndex < modelHeader.nInputs; inputIndex++) {
const std::string& name = (modelHeader.version.major == 2 && modelHeader.version.minor >= 3)
? inputNames.at(inputIndex) : std::string("input" + std::to_string(inputIndex));
Expand Down
34 changes: 31 additions & 3 deletions inference-engine/src/gna_plugin/gna_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -518,6 +518,33 @@ void GNAPlugin::UpdateInputScaleFromNetwork(InferenceEngine::CNNNetwork & networ
}
}

void GNAPlugin::UpdateInputsAndOutputsInfoFromNetwork(InferenceEngine::CNNNetwork & network) {
OV_ITT_SCOPED_TASK(itt::domains::GNA_LT, "UpdateInputsAndOutputsInfoFromNetwork");

// update inputs
{
InputsDataMap inputs = network.getInputsInfo();
if (inputsDesc->inputPrecisions.size() != 0) {
inputsDesc->inputPrecisions.clear();
}
for (const auto input : inputs) {
inputsDesc->inputPrecisions.push_back(input.second->getPrecision().getPrecVal());
}
}

// update outputs
{
OutputsDataMap outputs = network.getOutputsInfo();
outputsDesc.resize(outputs.size());

size_t outputIdx = 0;
for (const auto output : outputs) {
outputsDesc[outputIdx].precision = output.second->getPrecision().getPrecVal();
++outputIdx;
}
}
}

bool GNAPlugin::TryToInitOutput(int portId, InferenceEngine::CNNLayerPtr layer) {
auto initOutput = [this, portId, layer]
(intel_dnn_orientation_t orientation, size_t numBytesPerElem, size_t numElem, void* outputPtr) {
Expand Down Expand Up @@ -759,6 +786,9 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
UpdateGnaQuantModeFromNetwork(network);
UpdateInputScaleFromNetwork(network);

// Set input and output information from orginal network
UpdateInputsAndOutputsInfoFromNetwork(network);

if (MustBeConvertedFromNCHWToNHWC(details::CNNNetSortTopologically(network))) {
FillInputsAndOutputsTranspositionInfo(network);
}
Expand Down Expand Up @@ -922,7 +952,7 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
inputsDesc->getPtrInputsGlobal(input.first).resize(gnaFlags->gna_lib_async_threads_num);
}

// CreatingLayer primitives
// Creating Layer primitives
for (auto & layer : sortedNoMem) {
graphCompiler.CreateLayerPrimitive(layer);
}
Expand All @@ -940,8 +970,6 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
}

/// setting-up output layers information
outputsDesc.resize(outputsDataMap.size());

int portId = 0;
for (auto && outPort : outputsDataMap) {
// gets output layer pointer in original topology not in cloned
Expand Down
1 change: 1 addition & 0 deletions inference-engine/src/gna_plugin/gna_plugin.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,7 @@ class GNAPlugin : public InferenceEngine::IInferencePlugin {
void UpdateFieldsFromConfig();
void UpdateGnaQuantModeFromNetwork(InferenceEngine::CNNNetwork &);
void UpdateInputScaleFromNetwork(InferenceEngine::CNNNetwork &);
void UpdateInputsAndOutputsInfoFromNetwork(InferenceEngine::CNNNetwork &);
/**
* @brief Tries to init an output on the base of a layer data
* @param portId output port identificator
Expand Down
Loading

0 comments on commit 20e8fc9

Please sign in to comment.