Skip to content

Commit

Permalink
Develop Bucketize Reference Implementation (#3693)
Browse files Browse the repository at this point in the history
* Bucketize: Revise op class and add type_prop unit tests

* Bucketize: Develop reference implementation

* Bucketize: Add unit tests

* Bucketize: Add single layer test and cpu instantiation

* Bucketize: Add unit test with empty buckets for INTERPRETER

* Bucketize: Typo in buckets element type check

* Bucketize: Add custom generated inputs in single layer test class

* Bucketize: Use random_device to generate seed for data blob

* Bucketize: Remove unsupported f64 precision

* Bucketize: Add function description

* Bucketize: Remove randomness of inputs generation by using static seed

* Bucketize: Support different precisions for data and bucket inputs

* Bucketize: Refactor type_prop tests and improve backend unit test coverage
  • Loading branch information
ggalieroc authored Jan 15, 2021
1 parent a280a3a commit a555908
Show file tree
Hide file tree
Showing 14 changed files with 625 additions and 21 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <vector>

#include "single_layer_tests/bucketize.hpp"
#include "common_test_utils/test_constants.hpp"

using namespace LayerTestsDefinitions;

const std::vector<std::vector<size_t>> dataShapes = {
{1, 20, 20},
{2, 3, 50, 50}
};

const std::vector<std::vector<size_t>> bucketsShapes = {
{5},
{20},
{100}
};

const std::vector<InferenceEngine::Precision> inPrc = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16,
InferenceEngine::Precision::I64,
InferenceEngine::Precision::I32
};

const std::vector<InferenceEngine::Precision> netPrc = {
InferenceEngine::Precision::I64,
InferenceEngine::Precision::I32
};

const auto test_Bucketize_right_edge = ::testing::Combine(
::testing::ValuesIn(dataShapes),
::testing::ValuesIn(bucketsShapes),
::testing::Values(true),
::testing::ValuesIn(inPrc),
::testing::ValuesIn(inPrc),
::testing::ValuesIn(netPrc),
::testing::Values(CommonTestUtils::DEVICE_CPU)
);

const auto test_Bucketize_left_edge = ::testing::Combine(
::testing::ValuesIn(dataShapes),
::testing::ValuesIn(bucketsShapes),
::testing::Values(false),
::testing::ValuesIn(inPrc),
::testing::ValuesIn(inPrc),
::testing::ValuesIn(netPrc),
::testing::Values(CommonTestUtils::DEVICE_CPU)
);

INSTANTIATE_TEST_CASE_P(smoke_TestsBucketize_right, BucketizeLayerTest, test_Bucketize_right_edge, BucketizeLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_TestsBucketize_left, BucketizeLayerTest, test_Bucketize_left_edge, BucketizeLayerTest::getTestCaseName);
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include "shared_test_classes/single_layer/bucketize.hpp"

namespace LayerTestsDefinitions {

TEST_P(BucketizeLayerTest, CompareWithRefs) {
Run();
}
} // namespace LayerTestsDefinitions
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include <tuple>
#include <string>

#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"

#include "shared_test_classes/base/layer_test_utils.hpp"

namespace LayerTestsDefinitions {

using bucketizeParamsTuple = std::tuple<
InferenceEngine::SizeVector, // Data shape
InferenceEngine::SizeVector, // Buckets shape
bool, // Right edge of interval
InferenceEngine::Precision, // Data input precision
InferenceEngine::Precision, // Buckets input precision
InferenceEngine::Precision, // Output precision
std::string>; // Device name

class BucketizeLayerTest : public testing::WithParamInterface<bucketizeParamsTuple>,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<bucketizeParamsTuple> obj);
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override;
protected:
void SetUp() override;
};

} // namespace LayerTestsDefinitions
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
// Copyright (C) 2020 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

#include "shared_test_classes/single_layer/bucketize.hpp"

namespace LayerTestsDefinitions {

std::string BucketizeLayerTest::getTestCaseName(testing::TestParamInfo<bucketizeParamsTuple> obj) {
InferenceEngine::SizeVector dataShape;
InferenceEngine::SizeVector bucketsShape;
bool with_right_bound;
InferenceEngine::Precision inDataPrc;
InferenceEngine::Precision inBucketsPrc;
InferenceEngine::Precision netPrc;
std::string targetDevice;

std::tie(dataShape, bucketsShape, with_right_bound, inDataPrc, inBucketsPrc, netPrc, targetDevice) = obj.param;

std::ostringstream result;
result << "DS=" << CommonTestUtils::vec2str(dataShape) << "_";
result << "BS=" << CommonTestUtils::vec2str(bucketsShape) << "_";
if (with_right_bound)
result << "rightIntervalEdge_";
else
result << "leftIntervalEdge_";
result << "inDataPrc=" << inDataPrc.name() << "_";
result << "inBucketsPrc=" << inBucketsPrc.name() << "_";
result << "netPrc=" << netPrc.name() << "_";
result << "trgDev=" << targetDevice;
return result.str();
}

InferenceEngine::Blob::Ptr BucketizeLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const {
InferenceEngine::Blob::Ptr blobPtr;
const std::string name = info.name();
if (name == "a_data") {
auto data_shape = info.getTensorDesc().getDims();
auto data_size = std::accumulate(begin(data_shape), end(data_shape), 1, std::multiplies<uint64_t>());
blobPtr = FuncTestUtils::createAndFillBlob(info.getTensorDesc(), data_size * 5, 0, 10, 7235346);
} else if (name == "b_buckets") {
blobPtr = FuncTestUtils::createAndFillBlobUniqueSequence(info.getTensorDesc(), 0, 10, 8234231);
}
return blobPtr;
}

void BucketizeLayerTest::SetUp() {
InferenceEngine::SizeVector dataShape;
InferenceEngine::SizeVector bucketsShape;
bool with_right_bound;
InferenceEngine::Precision inDataPrc;
InferenceEngine::Precision inBucketsPrc;
InferenceEngine::Precision netPrc;

std::tie(dataShape, bucketsShape, with_right_bound, inDataPrc, inBucketsPrc, netPrc, targetDevice) = this->GetParam();

auto ngInDataPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inDataPrc);
auto ngInBucketsPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inBucketsPrc);
auto ngNetPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrc);
auto data = std::make_shared<ngraph::op::Parameter>(ngInDataPrc, ngraph::Shape(dataShape));
data->set_friendly_name("a_data");
auto buckets = std::make_shared<ngraph::op::Parameter>(ngInBucketsPrc, ngraph::Shape(bucketsShape));
buckets->set_friendly_name("b_buckets");
auto bucketize = std::make_shared<ngraph::op::v3::Bucketize>(data, buckets, ngNetPrc, with_right_bound);
function = std::make_shared<ngraph::Function>(std::make_shared<ngraph::opset1::Result>(bucketize), ngraph::ParameterVector{data, buckets}, "Bucketize");
}
} // namespace LayerTestsDefinitions
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,52 @@ void inline fill_data_random(InferenceEngine::Blob::Ptr &blob, const uint32_t r
fill_data_random(rawBlobDataPtr, blob->size(), range, start_from, k, seed);
}

/** @brief Fill blob with a sorted sequence of unique elements randomly generated.
*
* This function generates and fills a blob of a certain precision, with a
* sorted sequence of unique elements.
*
* @param blob Target blob
* @param range Values range
* @param start_from Value from which range should start
* @param k Resolution of floating point numbers.
* - With k = 1 every random number will be basically integer number.
* - With k = 2 numbers resolution will 1/2 so outputs only .0 or .50
* - With k = 4 numbers resolution will 1/4 so outputs only .0 .25 .50 0.75 and etc.
*/
template<InferenceEngine::Precision::ePrecision PRC>
void inline fill_random_unique_sequence(InferenceEngine::Blob::Ptr& blob,
uint32_t range,
int32_t start_from = 0,
const int32_t k = 1,
const int32_t seed = 1) {
using dataType = typename InferenceEngine::PrecisionTrait<PRC>::value_type;
auto *rawBlobDataPtr = blob->buffer().as<dataType *>();

if (start_from < 0 && !std::is_signed<dataType>::value) {
start_from = 0;
}

if (range < blob->size()) {
range = blob->size() * 2;
}

std::mt19937 generator(seed);
std::uniform_int_distribution<int32_t> dist(k * start_from, k * (start_from + range));

std::set<dataType> elems;
while (elems.size() != blob->size()) {
auto value = static_cast<float>(dist(generator));
value /= static_cast<float>(k);
if (PRC == InferenceEngine::Precision::FP16) {
elems.insert(ngraph::float16(value).to_bits());
} else {
elems.insert(static_cast<dataType>(value));
}
}
std::copy(elems.begin(), elems.end(), rawBlobDataPtr);
}

template<InferenceEngine::Precision::ePrecision PRC>
void inline fill_data_consistently(InferenceEngine::Blob::Ptr &blob, const uint32_t range = 10, int32_t start_from = 0, const int32_t k = 1) {
using dataType = typename InferenceEngine::PrecisionTrait<PRC>::value_type;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -517,6 +517,32 @@ InferenceEngine::Blob::Ptr inline createAndFillBlobConsistently(
return blob;
}

InferenceEngine::Blob::Ptr inline createAndFillBlobUniqueSequence(
const InferenceEngine::TensorDesc &td,
const int32_t start_from = 0,
const int32_t resolution = 1,
const int32_t seed = 1) {
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td);
blob->allocate();
auto shape = td.getDims();
auto range = std::accumulate(begin(shape), end(shape), 1, std::multiplies<uint64_t>()) * 2;
switch (td.getPrecision()) {
#define CASE(X) case X: CommonTestUtils::fill_random_unique_sequence<X>(blob, range, start_from, resolution, seed); break;
CASE(InferenceEngine::Precision::FP32)
CASE(InferenceEngine::Precision::FP16)
CASE(InferenceEngine::Precision::U8)
CASE(InferenceEngine::Precision::U16)
CASE(InferenceEngine::Precision::I8)
CASE(InferenceEngine::Precision::I16)
CASE(InferenceEngine::Precision::I64)
CASE(InferenceEngine::Precision::I32)
#undef CASE
default:
THROW_IE_EXCEPTION << "Wrong precision specified: " << td.getPrecision().name();
}
return blob;
}

InferenceEngine::Blob::Ptr inline convertBlobLayout(const InferenceEngine::Blob::Ptr& in,
InferenceEngine::Layout layout) {
IE_ASSERT(in != nullptr) << "Got NULL pointer";
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************

#pragma once

#include <algorithm>

#include "ngraph/shape.hpp"

namespace ngraph
{
namespace runtime
{
namespace reference
{
template <typename T, typename B, typename P>
void bucketize(const T* data,
const B* buckets,
P* out,
const Shape& data_shape,
const Shape& buckets_shape,
bool with_right_bound)
{
size_t data_size = shape_size(data_shape);
size_t buckets_size = shape_size(buckets_shape);

// if buckets is empty, bucket index for all elements
// in output is equal to 0
if (buckets_size == 0)
{
std::fill_n(out, data_size, static_cast<P>(0));
return;
}

for (size_t i = 0; i < data_size; i++)
{
const T val = data[i];
const B* bound = nullptr;

bound = with_right_bound
? std::lower_bound(buckets, buckets + buckets_size, val)
: std::upper_bound(buckets, buckets + buckets_size, val);

out[i] = static_cast<P>(bound - buckets);
}
}

} // namespace reference

} // namespace runtime

} // namespace ngraph
25 changes: 19 additions & 6 deletions ngraph/core/src/op/bucketize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,15 +47,28 @@ void op::v3::Bucketize::validate_and_infer_types()
const PartialShape& data_pshape = get_input_partial_shape(0);
const PartialShape& buckets_pshape = get_input_partial_shape(1);

const auto data_et = get_input_element_type(0);
const auto buckets_et = get_input_element_type(1);

NODE_VALIDATION_CHECK(this,
data_et.is_real() || data_et.is_integral_number(),
"Data input type must be numeric. Got: ",
data_et);

NODE_VALIDATION_CHECK(this,
buckets_et.is_real() || buckets_et.is_integral_number(),
"Buckets input type must be numeric. Got: ",
buckets_et);

NODE_VALIDATION_CHECK(this,
m_output_type == element::i64 || m_output_type == element::i32,
"Output type must be i32 or i64. Default is i64");
"Output type must be i32 or i64. Got: ",
m_output_type);

if (buckets_pshape.is_static())
{
NODE_VALIDATION_CHECK(
this, buckets_pshape.rank().compatible(1), "buckets input must be a 1D tensor");
}
NODE_VALIDATION_CHECK(this,
buckets_pshape.rank().compatible(1),
"Buckets input must be a 1D tensor. Got: ",
buckets_pshape);

if (data_pshape.is_dynamic())
{
Expand Down
2 changes: 2 additions & 0 deletions ngraph/test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ set(SRC
ngraph_api.cpp
node_input_output.cpp
op.cpp
op_eval/bucketize.cpp
op_eval/floor_mod.cpp
op_eval/hsigmoid.cpp
op_eval/hswish.cpp
Expand Down Expand Up @@ -255,6 +256,7 @@ set(MULTI_TEST_SRC
backend/avg_pool.in.cpp
backend/batch_norm.in.cpp
backend/broadcast.in.cpp
backend/bucketize.in.cpp
backend/builder_reduce_ops_opset1.in.cpp
backend/ceiling.in.cpp
backend/comparison.in.cpp
Expand Down
Loading

0 comments on commit a555908

Please sign in to comment.