Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PT FE] [ONNX FE] Partially upcast random_normal to f32 #21400

Merged
merged 28 commits into from
Dec 7, 2023
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
57108a0
upcast randn to fp32
pavel-esir Nov 30, 2023
e306867
style fix
pavel-esir Nov 30, 2023
c604e68
corrected tests
pavel-esir Dec 1, 2023
66bc3e2
add layer tests with statistics
pavel-esir Dec 1, 2023
eee5558
style-fix
pavel-esir Dec 1, 2023
053873b
move make_random_normal to cmmmon
pavel-esir Dec 4, 2023
76951f7
style-fix
pavel-esir Dec 4, 2023
605112d
added randn layer tests; updated CMakeLists.txt
pavel-esir Dec 4, 2023
10993da
moved to inline
pavel-esir Dec 4, 2023
3808dda
fix problem with _USE_MATH_DEFINES on Win
pavel-esir Dec 4, 2023
85c925a
Merge branch 'master' into upcast_randn_to_f32
pavel-esir Dec 4, 2023
c788e20
pass NodeRegistry as reference; some other minor corrections
pavel-esir Dec 4, 2023
930d0ef
adjust thresholds to avoid sporadicity
pavel-esir Dec 4, 2023
51852fe
move random_normal_helper and hide from public api
pavel-esir Dec 5, 2023
cb6fbd4
fix install
pavel-esir Dec 5, 2023
1f64344
fix install: 2nd try
pavel-esir Dec 5, 2023
c3f4be9
Frontend common
ilya-lavrenov Dec 5, 2023
2fe4933
Merge pull request #123 from ilya-lavrenov/frontend-common
pavel-esir Dec 5, 2023
d09625d
remove last frontend_common::static
pavel-esir Dec 5, 2023
58e4af1
build fix
pavel-esir Dec 5, 2023
d25fdb5
try to fix mock1 build: 2nd attempt
pavel-esir Dec 6, 2023
24d7941
try to fix mock1 build: 3rd attempt
pavel-esir Dec 6, 2023
1bd8b24
Update src/core/tests/CMakeLists.txt
ilya-lavrenov Dec 6, 2023
02530c2
Fixed build: attemp 2
ilya-lavrenov Dec 6, 2023
e0aa4f8
Merge remote-tracking branch 'pavel-esir/upcast_randn_to_f32' into up…
ilya-lavrenov Dec 6, 2023
5fa00ce
Merge pull request #124 from ilya-lavrenov/upcast_randn_to_f32
pavel-esir Dec 6, 2023
d088ad3
Update src/plugins/intel_cpu/tests/unit/CMakeLists.txt
ilya-lavrenov Dec 6, 2023
4fee468
Update CMakeLists.txt
ilya-lavrenov Dec 7, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions src/frontends/onnx/frontend/src/utils/random_normal.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

#include "default_opset.hpp"
#include "ngraph/opsets/opset8.hpp"
#include "transformations/rt_info/disable_fp16_compression.hpp"

namespace ngraph {
namespace onnx_import {
Expand All @@ -27,7 +28,7 @@ OutputVector make_random_normal(const Output<ngraph::Node>& shape,
const uint64_t seed_1 = op_seed;
const uint64_t seed_2 = (op_seed == 0 ? op_seed : op_seed + 10000);

const auto min_val = default_opset::Constant::create(target_type, Shape{1}, {0});
const auto min_val = default_opset::Constant::create(target_type, Shape{1}, {std::numeric_limits<float>::min()});
const auto max_val = default_opset::Constant::create(target_type, Shape{1}, {1});

const auto uniform_1 =
Expand All @@ -45,15 +46,18 @@ OutputVector make_random_normal(const Output<ngraph::Node>& shape,
const auto multiply_minus_two_log = std::make_shared<default_opset::Multiply>(log, minus_two);
const auto sqrt = std::make_shared<default_opset::Sqrt>(multiply_minus_two_log);

const auto multiply_two_pi = std::make_shared<default_opset::Multiply>(uniform_2, pi);
const auto multiply_two_pi_uniform_2 = std::make_shared<default_opset::Multiply>(multiply_two_pi, uniform_2);
const auto multiply_pi_uniform2 = std::make_shared<default_opset::Multiply>(uniform_2, pi);
const auto multiply_two_pi_uniform_2 = std::make_shared<default_opset::Multiply>(multiply_pi_uniform2, two);
auto const cos = std::make_shared<default_opset::Cos>(multiply_two_pi_uniform_2);

auto const scale_const = default_opset::Constant::create(target_type, Shape{1}, {scale});
auto const mean_const = default_opset::Constant::create(target_type, Shape{1}, {mean});
auto const product =
std::make_shared<default_opset::Multiply>(scale_const, std::make_shared<default_opset::Multiply>(sqrt, cos));
auto const sum = std::make_shared<default_opset::Add>(product, mean_const);
// if we don't disable downcasting then log(float32_min) gives -inf
disable_fp16_compression(uniform_1);
disable_fp16_compression(log);

return {sum};
}
Expand Down
4 changes: 2 additions & 2 deletions src/frontends/onnx/tests/onnx_import.in.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5366,7 +5366,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_random_normal) {
file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/random_normal.onnx"));

auto test_case = ov::test::TestCase(function, s_device);
test_case.add_expected_output<float>(Shape{2, 2}, {13.459274f, 41.75028f, -19.311913f, 131.79282f});
test_case.add_expected_output<float>(Shape{2, 2}, {83.052017f, 55.496368f, 119.31188f, -3.6946249f});
test_case.run();
}

Expand All @@ -5377,7 +5377,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_random_normal_like) {

auto test_case = ov::test::TestCase(function, s_device);
test_case.add_input<float>(Shape{2, 2}, {0, 0, 0, 0});
test_case.add_expected_output<float>(Shape{2, 2}, {13.459274f, 41.75028f, -19.311913f, 131.79282f});
test_case.add_expected_output<float>(Shape{2, 2}, {83.052017f, 55.496368f, 119.31188f, -3.6946249f});
test_case.run();
}

Expand Down
9 changes: 6 additions & 3 deletions src/frontends/pytorch/src/op/rand.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include "openvino/op/shape_of.hpp"
#include "openvino/op/sqrt.hpp"
#include "pt_framework_node.hpp"
#include "transformations/rt_info/disable_fp16_compression.hpp"
#include "utils.hpp"

namespace ov {
Expand Down Expand Up @@ -57,14 +58,16 @@ OutputVector make_random_normal(const NodeContext& context,
auto multiply_minus_two_log = context.mark_node(std::make_shared<v1::Multiply>(log, minus_two));
auto sqrt = context.mark_node(std::make_shared<v0::Sqrt>(multiply_minus_two_log));

auto multiply_two_pi = context.mark_node(std::make_shared<v1::Multiply>(uniform_2, pi));
auto multiply_two_pi_uniform_2 = context.mark_node(std::make_shared<v1::Multiply>(multiply_two_pi, uniform_2));
auto multiply_pi_uniform2 = context.mark_node(std::make_shared<v1::Multiply>(uniform_2, pi));
auto multiply_two_pi_uniform_2 = context.mark_node(std::make_shared<v1::Multiply>(multiply_pi_uniform2, two));
auto cos = context.mark_node(std::make_shared<v0::Cos>(multiply_two_pi_uniform_2));

auto sqrt_x_cos = context.mark_node(std::make_shared<v1::Multiply>(sqrt, cos));
auto product = context.mark_node(std::make_shared<v1::Multiply>(scale_const, sqrt_x_cos));
auto sum = context.mark_node(std::make_shared<v1::Add>(product, mean_const));

// if we don't disable downcasting then log(float32_min) gives -inf
disable_fp16_compression(uniform_1);
disable_fp16_compression(log);
return {sum};
}
}; // namespace
Expand Down
38 changes: 38 additions & 0 deletions tests/layer_tests/pytorch_tests/test_rand.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,3 +88,41 @@ def test_inplace_normal(self, model, inputs, ie_device, precision, ir_version):
self.inputs = inputs
self._test(model, None, "aten::normal",
ie_device, precision, ir_version, custom_eps=1e30)

class TestStatistics():
class aten_randn1(torch.nn.Module):
def forward(self, mean, std):
return torch.normal(mean, std)

@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.parametrize("fw_model,inputs", [
(aten_randn1(), (0, 1, (1000000,))),
(aten_randn1(), (0, 1, (10000, 100))),
(aten_randn1(), (0, 3, (100000, 100))),
(aten_randn1(), (1, 6, (100000, 100))),
(aten_randn1(), (-20, 2, (10000, 100))),
(aten_randn1(), (-20, 100, (10000, 100))),
])
def test_statistics(self, fw_model, inputs, ie_device, precision):
import numpy.testing as npt
import numpy as np
import openvino as ov
mean_scalar, std_scalar, input_size = inputs
mean = torch.full(input_size, mean_scalar, dtype=torch.float32)
std = torch.full(input_size, std_scalar, dtype=torch.float32)

ov_model = ov.convert_model(input_model=fw_model, example_input=(mean, std), input=[input_size, input_size])
if ie_device == 'GPU' and precision == 'FP32':
config = {'INFERENCE_PRECISION_HINT': 'f32'}
else:
config = {}
compiled_model = ov.Core().compile_model(ov_model, ie_device, config)

fw_res = fw_model(mean, std)
ov_res = compiled_model((mean, std))[0]

hist_fw, _ = np.histogram(fw_res.numpy(), bins=100, range=(-2*std_scalar, 2*std_scalar))
hist_ov, _ = np.histogram(ov_res, bins=100, range=(-2*std_scalar, 2*std_scalar))

npt.assert_allclose(hist_fw, hist_ov, atol=1e-1, rtol=1e-1)
Loading