Skip to content

Commit

Permalink
RandomUniform reference implementation. (openvinotoolkit#7012)
Browse files Browse the repository at this point in the history
* Added RandomUniform reference implementation.

* Corrected comments.

* Small correction.

* Code style correction.

* Added has_evaluate() method.

* Added comments, added names to consts.

* Small fix.

* Replaced arrays with vectors.

* Apply suggestions from code review

Co-authored-by: Ilya Churaev <[email protected]>

* Code refactoring.

* Corrected tests, code style.

* Added comment.

* Added comments.

* Temporarily added debug output.

* Temporarily added debug output.

* Removed debug output.

* Added comment.

* Added comment.

* Enabled state saving for RandomUniform.

* Code style.

* Used to template to convert types.

* Added comments.

Co-authored-by: Ilya Churaev <[email protected]>
  • Loading branch information
2 people authored and akuporos committed Sep 6, 2021
1 parent 8a52a01 commit b7a8d8c
Show file tree
Hide file tree
Showing 8 changed files with 737 additions and 3 deletions.
205 changes: 205 additions & 0 deletions docs/template_plugin/tests/functional/op_reference/random_uniform.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,205 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <gtest/gtest.h>

#include <vector>

#include "base_reference_test.hpp"
#include "ngraph/opsets/opset8.hpp"
#include "ngraph/util.hpp"

using namespace ngraph;

namespace reference_tests {
namespace {

struct RandomUniformParams {
RandomUniformParams(const std::vector<int64_t>& paramOutShape,
const Tensor& paramMinValue,
const Tensor& paramMaxValue,
ngraph::element::Type paramOutType,
int64_t paramGlobalSeed,
int64_t paramOpSeed,
const Tensor& paramExpected,
const std::string& test_name)
: out_shape(paramOutShape),
min_val(paramMinValue),
max_val(paramMaxValue),
out_type(paramOutType),
global_seed(paramGlobalSeed),
op_seed(paramOpSeed),
expected(paramExpected),
test_case_name(test_name) {}
std::vector<int64_t> out_shape;
Tensor min_val;
Tensor max_val;
ngraph::element::Type out_type;
int64_t global_seed;
int64_t op_seed;
Tensor expected;
std::string test_case_name;
};

class ReferenceRandomUniformLayerTest : public testing::TestWithParam<RandomUniformParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.out_shape,
params.min_val,
params.max_val,
params.out_type,
params.global_seed,
params.op_seed);
inputData = {params.min_val.data, params.max_val.data};
refOutData = {params.expected.data};
}
static std::string getTestCaseName(const testing::TestParamInfo<RandomUniformParams>& obj) {
auto param = obj.param;
return param.test_case_name;
}

private:
static std::shared_ptr<Function> CreateFunction(const std::vector<int64_t>& out_shape,
const Tensor& min_val,
const Tensor& max_val,
const ngraph::element::Type& out_type,
int64_t global_seed,
int64_t op_seed) {
const auto min_val_param = std::make_shared<opset8::Parameter>(min_val.type, min_val.shape);
const auto max_val_param = std::make_shared<opset8::Parameter>(max_val.type, max_val.shape);
auto out_shape_ = std::make_shared<opset8::Constant>(element::i64, Shape{out_shape.size()}, out_shape);

return std::make_shared<Function>(NodeVector{std::make_shared<opset8::RandomUniform>(out_shape_,
min_val_param,
max_val_param,
out_type,
global_seed,
op_seed)},
ParameterVector{min_val_param, max_val_param});
}
};

TEST_P(ReferenceRandomUniformLayerTest, RandomUniformWithHardcodedRefs) {
Exec();
}

} // namespace

// Reference values for the following tests are obtained from single layer TensorFlow model with tf.random.uniform().
INSTANTIATE_TEST_SUITE_P(
smoke_RandomUniform_With_Hardcoded_Refs,
ReferenceRandomUniformLayerTest,
::testing::Values(
RandomUniformParams(std::vector<int64_t>{3, 2, 4},
Tensor{{1}, element::f32, std::vector<float>{0}},
Tensor{{1}, element::f32, std::vector<float>{1}},
element::Type_t::f32,
150,
10,
Tensor{{3, 2, 4},
element::f32,
std::vector<float>{0.70112360, 0.30539632, 0.93931055, 0.94560349, 0.11694777,
0.50770056, 0.51971972, 0.22727466, 0.99137402, 0.35519040,
0.82692313, 0.59864855, 0.31364107, 0.57481313, 0.41399086,
0.96308255, 0.37140799, 0.85253167, 0.09358585, 0.08200955,
0.23655081, 0.81056309, 0.74226606, 0.76106691}},
"float32_default_min_max"),
RandomUniformParams(std::vector<int64_t>{3, 2, 4},
Tensor{{1}, element::f16, std::vector<float16>{0}},
Tensor{{1}, element::f16, std::vector<float16>{1}},
element::Type_t::f16,
150,
10,
Tensor{{3, 2, 4},
element::f16,
std::vector<float16>{0.60449219, 0.80664062, 0.83203125, 0.38378906, 0.03613281,
0.08300781, 0.54394531, 0.83398438, 0.33593750, 0.71972656,
0.15429688, 0.12890625, 0.34765625, 0.86914062, 0.41308594,
0.57226562, 0.57421875, 0.93945312, 0.65527344, 0.82226562,
0.82421875, 0.13281250, 0.64355469, 0.66015625}},
"float16_default_min_max"),
RandomUniformParams(std::vector<int64_t>{3, 2, 4},
Tensor{{1}, element::f32, std::vector<float>{-650}},
Tensor{{1}, element::f32, std::vector<float>{450}},
element::Type_t::f32,
150,
10,
Tensor{{3, 2, 4},
element::f32,
std::vector<float>{121.23596191, -314.06405640, 383.24157715, 390.16381836,
-521.35742188, -91.52935791, -78.30828857, -399.99786377,
440.51147461, -259.29055786, 259.61541748, 8.51342773,
-304.99481201, -17.70556641, -194.61004639, 409.39074707,
-241.45120239, 287.78485107, -547.05554199, -559.78948975,
-389.79409790, 241.61938477, 166.49267578, 187.17358398}},
"float32_non_default_min_max"),
RandomUniformParams(std::vector<int64_t>{3, 2, 4},
Tensor{{1}, element::f16, std::vector<float16>{-1.5}},
Tensor{{1}, element::f16, std::vector<float16>{-1.0}},
element::Type_t::f16,
150,
10,
Tensor{{3, 2, 4},
element::f16,
std::vector<float16>{-1.19726562, -1.09667969, -1.08398438, -1.30859375, -1.48242188,
-1.45898438, -1.22851562, -1.08300781, -1.33203125, -1.14062500,
-1.42285156, -1.43554688, -1.32617188, -1.06542969, -1.29296875,
-1.21386719, -1.21289062, -1.03027344, -1.17187500, -1.08886719,
-1.08789062, -1.43359375, -1.17773438, -1.16992188}},
"float16_non_default_min_max"),
RandomUniformParams(std::vector<int64_t>{2, 3, 4},
Tensor{{1}, element::i32, std::vector<int32_t>{-100}},
Tensor{{1}, element::i32, std::vector<int32_t>{50}},
element::Type_t::i32,
100,
350,
Tensor{{2, 3, 4},
element::i32,
std::vector<int32_t>{
22, -56, -33, -89, -98, -33, -3, -48, -82, 5, -66, 21,
29, -42, -73, -37, 3, 36, -35, 20, -11, -8, -78, 47,
}},
"int32"),
RandomUniformParams(std::vector<int64_t>{5, 4, 3},
Tensor{{1}, element::i64, std::vector<int64_t>{-2600}},
Tensor{{1}, element::i64, std::vector<int64_t>{3700}},
element::Type_t::i64,
755,
951,
Tensor{{5, 4, 3},
element::i64,
std::vector<int64_t>{
2116, -1581, 2559, -339, -1660, 519, 90, 2027, -210, 3330, 1831, -1737,
2683, 2661, 3473, 1220, 3534, -2384, 2199, 1935, 499, 2861, 2743, 3223,
-531, -836, -65, 3435, 632, 1765, 2613, 1891, 1698, 3069, 169, -792,
-32, 2976, -1552, -2588, 3327, -1756, 2637, -1084, 3567, -778, -1465, 2967,
1242, 2672, -1585, -2271, 3536, -1502, 400, 2241, 3126, 908, 1073, -2110}},
"int64"),
RandomUniformParams(std::vector<int64_t>{7, 3},
Tensor{{1}, element::bf16, std::vector<bfloat16>{0}},
Tensor{{1}, element::bf16, std::vector<bfloat16>{1}},
element::Type_t::bf16,
4978,
5164,
Tensor{{7, 3},
element::bf16,
std::vector<bfloat16>{0.8984375, 0.84375, 0.1640625, 0.1875, 0.46875, 0.6875,
0.5234375, 0.3046875, 0.9140625, 0.453125, 0.953125, 0.328125,
0.359375, 0.1875, 0.9453125, 0.390625, 0.21875, 0.9921875,
0.8203125, 0.453125, 0.875}},
"bfloat16_default_min_max"),
RandomUniformParams(std::vector<int64_t>{7, 3},
Tensor{{1}, element::bf16, std::vector<bfloat16>{-150}},
Tensor{{1}, element::bf16, std::vector<bfloat16>{200}},
element::Type_t::bf16,
4978,
5164,
Tensor{{7, 3},
element::bf16,
std::vector<bfloat16>{164, 146, -92.5, -84.5, 14, 90, 33, -43.5, 170, 8, 182,
-35, -24, -84.5, 180, -14, -73.5, 198, 138, 8, 156}},
"bfloat16_non_default_min_max")),
ReferenceRandomUniformLayerTest::getTestCaseName);
} // namespace reference_tests
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>

namespace ngraph {
namespace pass {

class DisableRandomUniformConstantFolding;

} // namespace pass
} // namespace ngraph

/**
* @ingroup ie_transformation_common_api
* @brief Disables ConstantFolding for RandomUniform operation. It is required as RandomUniform
* should generate new sequence each run.
*/
class ngraph::pass::DisableRandomUniformConstantFolding : public ngraph::pass::MatcherPass {
public:
NGRAPH_RTTI_DECLARATION;
DisableRandomUniformConstantFolding();
};
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#include "transformations/common_optimizations/hswish_fusion.hpp"
#include "transformations/common_optimizations/convert_quantize_dequantize.hpp"
#include "transformations/common_optimizations/relu_fake_quantize_fusion.hpp"
#include "transformations/common_optimizations/disable_random_uniform_constant_folding.hpp"
#include "transformations/common_optimizations/add_fake_quantize_fusion.hpp"
#include "transformations/common_optimizations/mul_fake_quantize_fusion.hpp"
#include "transformations/common_optimizations/clamp_fusion.hpp"
Expand Down Expand Up @@ -88,6 +89,7 @@ bool ngraph::pass::CommonOptimizations::run_on_function(std::shared_ptr<ngraph::

// This pass must be called first in pipeline
manager.register_pass<ngraph::pass::InitNodeInfo>();
manager.register_pass<ngraph::pass::DisableRandomUniformConstantFolding>();
manager.register_pass<ngraph::pass::SimplifyShapeOfSubGraph>();
manager.register_pass<ngraph::pass::ConstantFolding>();
manager.register_pass<ngraph::pass::RemoveFilteringBoxesBySize>(); // Resolves dynamism (replaces NonZero), CF needed
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "transformations/common_optimizations/disable_random_uniform_constant_folding.hpp"

#include <memory>
#include <ngraph/opsets/opset8.hpp>
#include <ngraph/pattern/op/wrap_type.hpp>
#include <transformations/rt_info/disable_constant_folding.hpp>

NGRAPH_RTTI_DEFINITION(ngraph::pass::DisableRandomUniformConstantFolding, "DisableRandomUniformConstantFolding", 0);

ngraph::pass::DisableRandomUniformConstantFolding::DisableRandomUniformConstantFolding() {
auto random_uniform = pattern::wrap_type<opset8::RandomUniform>();

ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) {
disable_constant_folding(m.get_match_root());
return true;
};

auto m = std::make_shared<ngraph::pattern::Matcher>(random_uniform, "DisableRandomUniformConstantFolding");
this->register_matcher(m, callback);
}
16 changes: 14 additions & 2 deletions ngraph/core/include/ngraph/op/random_uniform.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,20 @@ class NGRAPH_API RandomUniform : public Op {
const Output<Node>& min_val,
const Output<Node>& max_val,
const ngraph::element::Type& out_type,
uint64_t global_seed,
uint64_t op_seed);
uint64_t global_seed = 0,
uint64_t op_seed = 0);

void validate_and_infer_types() override;

bool visit_attributes(AttributeVisitor& visitor) override;

std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

/// \return Turns off constant folding for RandomUniform operation.
bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override {
return false;
}

/// \return The output tensor type.
const ngraph::element::Type& get_out_type() const {
return m_output_type;
Expand All @@ -63,10 +68,17 @@ class NGRAPH_API RandomUniform : public Op {
m_op_seed = seed2;
}

bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;

bool has_evaluate() const override;

protected:
ngraph::element::Type m_output_type;
uint64_t m_global_seed;
uint64_t m_op_seed;

mutable std::mutex m_state_mutex;
mutable std::pair<uint64_t, uint64_t> m_state;
};
} // namespace v8
} // namespace op
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include <ctime>
#include <ngraph/type/element_type.hpp>

#include "ngraph/shape.hpp"

namespace ngraph {
namespace runtime {
namespace reference {
std::pair<uint64_t, uint64_t> random_uniform(const uint64_t* out_shape,
const char* min_val,
const char* max_val,
char* out,
const Shape& out_shape_shape,
const ngraph::element::Type& elem_type,
uint64_t seed,
uint64_t seed2,
std::pair<uint64_t, uint64_t> prev_state);

// Following const values are taken from the original paper:
// https://www.thesalmons.org/john/random123/papers/random123sc11.pdf
const uint32_t crush_resistance_const_lower_value = 0x9E3779B9;
const uint32_t crush_resistance_const_upper_value = 0xBB67AE85;
const uint64_t statistic_maximizing_multiplier_n = 0xD2511F53;
const uint64_t statistic_maximizing_multiplier_counter = 0xCD9E8D57;
const size_t rounds_number = 10;

// Determines how many sequence elements of RNG sequence are skipped between runs.
// Can be any positive value, 256 is chosen for parity with Tensorflow.
const uint64_t skip_const = 256;

} // namespace reference
} // namespace runtime
} // namespace ngraph
Loading

0 comments on commit b7a8d8c

Please sign in to comment.