Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PT FE] [ONNX FE] Partially upcast random_normal to f32 #21400

Merged
merged 28 commits into from
Dec 7, 2023
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
57108a0
upcast randn to fp32
pavel-esir Nov 30, 2023
e306867
style fix
pavel-esir Nov 30, 2023
c604e68
corrected tests
pavel-esir Dec 1, 2023
66bc3e2
add layer tests with statistics
pavel-esir Dec 1, 2023
eee5558
style-fix
pavel-esir Dec 1, 2023
053873b
move make_random_normal to cmmmon
pavel-esir Dec 4, 2023
76951f7
style-fix
pavel-esir Dec 4, 2023
605112d
added randn layer tests; updated CMakeLists.txt
pavel-esir Dec 4, 2023
10993da
moved to inline
pavel-esir Dec 4, 2023
3808dda
fix problem with _USE_MATH_DEFINES on Win
pavel-esir Dec 4, 2023
85c925a
Merge branch 'master' into upcast_randn_to_f32
pavel-esir Dec 4, 2023
c788e20
pass NodeRegistry as reference; some other minor corrections
pavel-esir Dec 4, 2023
930d0ef
adjust thresholds to avoid sporadicity
pavel-esir Dec 4, 2023
51852fe
move random_normal_helper and hide from public api
pavel-esir Dec 5, 2023
cb6fbd4
fix install
pavel-esir Dec 5, 2023
1f64344
fix install: 2nd try
pavel-esir Dec 5, 2023
c3f4be9
Frontend common
ilya-lavrenov Dec 5, 2023
2fe4933
Merge pull request #123 from ilya-lavrenov/frontend-common
pavel-esir Dec 5, 2023
d09625d
remove last frontend_common::static
pavel-esir Dec 5, 2023
58e4af1
build fix
pavel-esir Dec 5, 2023
d25fdb5
try to fix mock1 build: 2nd attempt
pavel-esir Dec 6, 2023
24d7941
try to fix mock1 build: 3rd attempt
pavel-esir Dec 6, 2023
1bd8b24
Update src/core/tests/CMakeLists.txt
ilya-lavrenov Dec 6, 2023
02530c2
Fixed build: attemp 2
ilya-lavrenov Dec 6, 2023
e0aa4f8
Merge remote-tracking branch 'pavel-esir/upcast_randn_to_f32' into up…
ilya-lavrenov Dec 6, 2023
5fa00ce
Merge pull request #124 from ilya-lavrenov/upcast_randn_to_f32
pavel-esir Dec 6, 2023
d088ad3
Update src/plugins/intel_cpu/tests/unit/CMakeLists.txt
ilya-lavrenov Dec 6, 2023
4fee468
Update CMakeLists.txt
ilya-lavrenov Dec 7, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
pavel-esir marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include "ngraph/op/reshape.hpp"
#include "ngraph/output_vector.hpp"
#include "openvino/frontend/visibility.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/opsets/opset12.hpp"
#include "openvino/pass/graph_rewrite.hpp"
#include "transformations/rt_info/disable_fp16_compression.hpp"
#define _USE_MATH_DEFINES
#include <math.h>

namespace ov {
namespace frontend {

/// \brief Creates a random normal tensor with the given shape and type.
/// \details Uses Box-Mueller algorithm to generate random numbers from a Gauassian distribution
/// \param sizes Shape of the output tensor
/// \param target_type Type of the output tensor
/// \param mean Mean of the distribution
/// \param scale Standard deviation of the distribution
/// \param seed Seed for the random number generator
inline OutputVector make_random_normal(pass::NodeRegistry& registry,
const Output<Node>& sizes,
element::Type target_type,
const Output<Node>& mean,
const Output<Node>& scale,
float seed) {
// We start by generating two random series from a uniform distribution
const uint64_t global_seed = 0;

// ONNX specifies the seed as a float, but OpenVINO uses uint64_t
const auto op_seed = static_cast<uint64_t>(seed * 1000);

// We need to use two op_seeds to make sure we get different results for two RandomUniform series
// But we also have to keep original logic and pass "0" (auto-generated seed) to RandomUniform
const uint64_t seed_1 = op_seed;
const uint64_t seed_2 = (op_seed == 0 ? op_seed : op_seed + 10000);

auto min_val = registry.make<op::v0::Constant>(target_type, Shape{1}, std::numeric_limits<float>::min());
auto max_val = registry.make<op::v0::Constant>(target_type, Shape{1}, 1);

auto uniform_1 = registry.make<op::v8::RandomUniform>(sizes, min_val, max_val, target_type, global_seed, seed_1);
auto uniform_2 = registry.make<op::v8::RandomUniform>(sizes, min_val, max_val, target_type, global_seed, seed_2);

// Compute Box–Muller transform
// random_normal = scale * sqrt(-2.0 * log(uniform_1)) * cos(2.0 * pi * uniform_2) + mean
auto pi = registry.make<op::v0::Constant>(target_type, Shape{1}, M_PI);
auto minus_two = registry.make<op::v0::Constant>(target_type, Shape{1}, -2.0);
auto two = registry.make<op::v0::Constant>(target_type, Shape{1}, 2.0);

auto log = registry.make<op::v0::Log>(uniform_1);
auto multiply_minus_two_log = registry.make<op::v1::Multiply>(log, minus_two);
auto sqrt = registry.make<op::v0::Sqrt>(multiply_minus_two_log);

auto multiply_2pi = registry.make<op::v1::Multiply>(two, pi);
auto multiply_2pi_uniform_2 = registry.make<op::v1::Multiply>(multiply_2pi, uniform_2);
auto cos = registry.make<op::v0::Cos>(multiply_2pi_uniform_2);

auto sqrt_x_cos = registry.make<op::v1::Multiply>(sqrt, cos);
auto product = registry.make<op::v1::Multiply>(scale, sqrt_x_cos);
auto sum = registry.make<op::v1::Add>(product, mean);

// if we don't disable down-casting then log(float32_min) gives -inf
disable_fp16_compression(uniform_1);
disable_fp16_compression(log);

return {sum};
}

/// \brief Creates a random normal tensor with the given shape and type.
/// \details Uses Box-Mueller algorithm to generate random numbers from a Gauassian distribution
/// \param sizes Shape of the output tensor
/// \param target_type Type of the output tensor
/// \param mean Mean of the distribution
/// \param scale Standard deviation of the distribution
/// \param seed Seed for the random number generator
inline std::pair<OutputVector, pass::NodeRegistry> make_random_normal(const Output<Node>& sizes,
element::Type target_type,
const Output<Node>& mean,
const Output<Node>& scale,
float seed) {
pass::NodeRegistry registry;
OutputVector res = make_random_normal(registry, sizes, target_type, mean, scale, seed);
return std::make_pair(res, registry);
}

} // namespace frontend
} // namespace ov
11 changes: 7 additions & 4 deletions src/frontends/onnx/frontend/src/op/random_normal.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@
// SPDX-License-Identifier: Apache-2.0
//

#include "utils/random_normal.hpp"
#include "openvino/frontend/utils/random_normal.hpp"

#include "exceptions.hpp"
#include "ngraph/shape.hpp"
#include "openvino/op/constant.hpp"
#include "utils/common.hpp"

OPENVINO_SUPPRESS_DEPRECATED_START
Expand All @@ -23,11 +24,13 @@ OutputVector random_normal(const Node& node) {

const auto mean = node.get_attribute_value<float>("mean", 0.0f);
const auto scale = node.get_attribute_value<float>("scale", 1.0f);
const auto seed = node.get_attribute_value<float>("seed", 0);
auto scale_node = ov::op::v0::Constant::create(target_type, Shape{1}, {scale});
auto mean_node = ov::op::v0::Constant::create(target_type, Shape{1}, {mean});

const auto seed = node.get_attribute_value<float>("seed", 0);
const auto shape = node.get_attribute_as_constant<std::vector<int64_t>>("shape");

return detail::make_random_normal(shape, target_type, mean, scale, seed);
auto res = ov::frontend::make_random_normal(shape, target_type, mean_node, scale_node, seed);
return res.first;
}

} // namespace set_1
Expand Down
10 changes: 7 additions & 3 deletions src/frontends/onnx/frontend/src/op/random_normal_like.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@

#include "ngraph/shape.hpp"
#include "op/random_uniform_like.hpp"
#include "openvino/frontend/utils/random_normal.hpp"
#include "utils/common.hpp"
#include "utils/random_normal.hpp"

OPENVINO_SUPPRESS_DEPRECATED_START
namespace ngraph {
Expand All @@ -25,11 +25,15 @@ OutputVector random_normal_like(const Node& node) {
}

const auto shape = std::make_shared<default_opset::ShapeOf>(input);
const auto seed = node.get_attribute_value<float>("seed", 0.0f);

const auto mean = node.get_attribute_value<float>("mean", 0.0f);
const auto scale = node.get_attribute_value<float>("scale", 1.0f);
const auto seed = node.get_attribute_value<float>("seed", 0.0f);
auto scale_node = ov::op::v0::Constant::create(target_type, Shape{1}, {scale});
auto mean_node = ov::op::v0::Constant::create(target_type, Shape{1}, {mean});

return detail::make_random_normal(shape, target_type, mean, scale, seed);
auto res = ov::frontend::make_random_normal(shape, target_type, mean_node, scale_node, seed);
return res.first;
}

} // namespace set_1
Expand Down
63 changes: 0 additions & 63 deletions src/frontends/onnx/frontend/src/utils/random_normal.cpp

This file was deleted.

29 changes: 0 additions & 29 deletions src/frontends/onnx/frontend/src/utils/random_normal.hpp

This file was deleted.

4 changes: 2 additions & 2 deletions src/frontends/onnx/tests/onnx_import.in.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5366,7 +5366,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_random_normal) {
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/random_normal.onnx"));

auto test_case = ov::test::TestCase(function, s_device);
test_case.add_expected_output<float>(Shape{2, 2}, {13.459274f, 41.75028f, -19.311913f, 131.79282f});
test_case.add_expected_output<float>(Shape{2, 2}, {83.052017f, 55.496368f, 119.31188f, -3.6946249f});
test_case.run();
}

Expand All @@ -5377,7 +5377,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_random_normal_like) {

auto test_case = ov::test::TestCase(function, s_device);
test_case.add_input<float>(Shape{2, 2}, {0, 0, 0, 0});
test_case.add_expected_output<float>(Shape{2, 2}, {13.459274f, 41.75028f, -19.311913f, 131.79282f});
test_case.add_expected_output<float>(Shape{2, 2}, {83.052017f, 55.496368f, 119.31188f, -3.6946249f});
test_case.run();
}

Expand Down
41 changes: 8 additions & 33 deletions src/frontends/pytorch/src/op/rand.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#include <random>

#include "openvino/frontend/pytorch/node_context.hpp"
#include "openvino/frontend/utils/random_normal.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/convert.hpp"
Expand All @@ -15,6 +16,7 @@
#include "openvino/op/shape_of.hpp"
#include "openvino/op/sqrt.hpp"
#include "pt_framework_node.hpp"
#include "transformations/rt_info/disable_fp16_compression.hpp"
#include "utils.hpp"

namespace ov {
Expand All @@ -32,40 +34,13 @@ OutputVector make_random_normal(const NodeContext& context,
const Output<Node>& mean_const) {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<uint64_t> distrib(0, 9999);
std::uniform_real_distribution<float> distrib(0.0f, 9999.0f);
float seed = distrib(gen);

const uint64_t global_seed = 0;

const uint64_t seed_1 = distrib(gen);
const uint64_t seed_2 = distrib(gen);

auto min_val = context.mark_node(v0::Constant::create(target_type, Shape{1}, {std::numeric_limits<float>::min()}));
auto max_val = context.mark_node(v0::Constant::create(target_type, Shape{1}, {1}));

auto uniform_1 = context.mark_node(
std::make_shared<v8::RandomUniform>(sizes, min_val, max_val, target_type, global_seed, seed_1));
auto uniform_2 = context.mark_node(
std::make_shared<v8::RandomUniform>(sizes, min_val, max_val, target_type, global_seed, seed_2));

// Compute Box–Muller transform
// random_normal = scale * ng.sqrt(-2.0 * ng.log(uniform_1)) * ng.cos(2.0 * np.pi * uniform_2) + mean
auto pi = context.mark_node(v0::Constant::create(target_type, Shape{1}, {3.141592653589793}));
auto minus_two = context.mark_node(v0::Constant::create(target_type, Shape{1}, {-2.0}));
auto two = context.mark_node(v0::Constant::create(target_type, Shape{1}, {2.0}));

auto log = context.mark_node(std::make_shared<v0::Log>(uniform_1));
auto multiply_minus_two_log = context.mark_node(std::make_shared<v1::Multiply>(log, minus_two));
auto sqrt = context.mark_node(std::make_shared<v0::Sqrt>(multiply_minus_two_log));

auto multiply_two_pi = context.mark_node(std::make_shared<v1::Multiply>(uniform_2, pi));
auto multiply_two_pi_uniform_2 = context.mark_node(std::make_shared<v1::Multiply>(multiply_two_pi, uniform_2));
auto cos = context.mark_node(std::make_shared<v0::Cos>(multiply_two_pi_uniform_2));

auto sqrt_x_cos = context.mark_node(std::make_shared<v1::Multiply>(sqrt, cos));
auto product = context.mark_node(std::make_shared<v1::Multiply>(scale_const, sqrt_x_cos));
auto sum = context.mark_node(std::make_shared<v1::Add>(product, mean_const));

return {sum};
pass::NodeRegistry registry;
auto res = ov::frontend::make_random_normal(registry, sizes, target_type, mean_const, scale_const, seed);
context.mark_nodes(registry.get());
return res;
}
}; // namespace

Expand Down
54 changes: 54 additions & 0 deletions tests/layer_tests/pytorch_tests/test_rand.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,3 +88,57 @@ def test_inplace_normal(self, model, inputs, ie_device, precision, ir_version):
self.inputs = inputs
self._test(model, None, "aten::normal",
ie_device, precision, ir_version, custom_eps=1e30)


class TestStatistics():
class aten_normal(torch.nn.Module):
def forward(self, mean, std):
return torch.normal(mean, std)

class aten_randn(torch.nn.Module):
def forward(self, size):
return torch.randn(*size)

@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.parametrize("fw_model,inputs", [
(aten_normal(), (0, 1, (1000000,))),
(aten_normal(), (0, 1, (10000, 100))),
(aten_normal(), (0, 3, (100000, 100))),
(aten_normal(), (1, 6, (100000, 100))),
(aten_normal(), (-20, 2, (10000, 100))),
(aten_normal(), (-20, 100, (10000, 100))),

(aten_randn(), (0, 1, (1000000,))),
(aten_randn(), (0, 1, (10000, 100))),
(aten_randn(), (0, 1, (100000, 100))),
])
def test_normal_statistics(self, fw_model, inputs, ie_device, precision):
import numpy.testing as npt
import numpy as np
import openvino as ov
mean_scalar, std_scalar, size = inputs
mean = torch.full(size, mean_scalar, dtype=torch.float32)
std = torch.full(size, std_scalar, dtype=torch.float32)

if isinstance(fw_model, self.aten_randn):
example_input = (torch.tensor(size), )
input_size = [len(size)]
else:
example_input = (mean, std)
input_size = [size, size]

ov_model = ov.convert_model(input_model=fw_model, example_input=example_input, input=input_size)
if ie_device == 'GPU' and precision == 'FP32':
config = {'INFERENCE_PRECISION_HINT': 'f32'}
else:
config = {}
compiled_model = ov.Core().compile_model(ov_model, ie_device, config)

fw_res = fw_model(*example_input)
ov_res = compiled_model(example_input)[0]

hist_fw, _ = np.histogram(fw_res.numpy(), bins=100, range=(-2 * std_scalar, 2 * std_scalar))
hist_ov, _ = np.histogram(ov_res, bins=100, range=(-2 * std_scalar, 2 * std_scalar))

npt.assert_allclose(hist_fw, hist_ov, atol=1e-1, rtol=1e-1)
Loading