Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into ci/gha/linux-arm-u22
Browse files Browse the repository at this point in the history
  • Loading branch information
akashchi committed Feb 1, 2024
2 parents db8cc3f + 42ea606 commit 8b58e4b
Show file tree
Hide file tree
Showing 34 changed files with 404 additions and 505 deletions.
42 changes: 42 additions & 0 deletions .github/workflows/send_workflows_to_opentelemetry.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
name: Send workflows to OpenTelemetry (BETA)

on:
workflow_run:
workflows:
- Android ARM64 with vcpkg
- Documentation
- Cleanup PIP caches
- Code snippets
- Code Style
- Code coverage
- Coverity (Ubuntu 20.04, Python 3.11)
- Fedora (RHEL), Python 3.9
- Linux (Ubuntu 20.04, Python 3.11)
- Linux ARM64 (Ubuntu 20.04, Python 3.11)
- Linux Static CC (Ubuntu 22.04, Python 3.11, Clang)
- Linux RISC-V with Conan (Ubuntu 22.04, Python 3.10)
- macOS (Python 3.11)
- macOS ARM64 (Python 3.11)
- MO
- Python API Checks
- Webassembly
- Windows (VS 2019, Python 3.11)
- Windows Conditional Compilation (VS 2022, Python 3.11)
types:
- completed

permissions: read-all

jobs:
otel-export-trace:
name: OpenTelemetry Export Trace
runs-on: ubuntu-latest

steps:
- name: Export Workflow Trace
uses: inception-health/otel-export-trace-action@7eabc7de1f4753f0b45051b44bb0ba46d05a21ef
with:
otlpEndpoint: grpc://api.honeycomb.io:443/
otlpHeaders: ${{ secrets.OTLP_HEADERS }}
githubToken: ${{ secrets.GITHUB_TOKEN }}
runId: ${{ github.event.workflow_run.id }}
2 changes: 1 addition & 1 deletion docs/dev/build_mac_arm.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ The software was validated on:
```
- (arm64 only) Product and samples dependencies:
```sh
% brew install tbb pugixml flatbuffers snappy protobuf nlohmann-json zlib gflags
% brew install tbb pugixml flatbuffers snappy protobuf
```
- Additional `pip` dependencies to build OpenVINO Runtime Python API, Development tools (Model Optimizer, POT and others):
```sh
Expand Down
2 changes: 1 addition & 1 deletion docs/dev/build_mac_intel_cpu.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ The software was validated on:
```
- (Optional; native compilation only, x86_64) Product and samples dependencies:
```sh
% brew install tbb pugixml flatbuffers snappy protobuf nlohmann-json zlib gflags
% brew install tbb pugixml flatbuffers snappy protobuf
```
- Additional `pip` dependencies to build OpenVINO Runtime Python API, Development tools (Model Optimizer, POT and others):
```sh
Expand Down
5 changes: 5 additions & 0 deletions docs/sphinx_setup/_static/css/custom.css
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,11 @@ div.highlight {
.bd-sidebar {
top: calc(var(--pst-header-height));
}

.bd-links {
margin-top: 10px;
}

body {
padding-top: calc(var(--pst-header-height));
}
Expand Down
4 changes: 2 additions & 2 deletions docs/sphinx_setup/_static/html/banner_data.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"p": "OpenVINO 2022.1 has introduced OpenVINO API 2.0. For more information on transition steps from the previous API, see the <a href=\"https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html\">transition guide</a>",
"p": "",
"version": "1",
"style": "info"
"style": "warning"
}
68 changes: 35 additions & 33 deletions docs/sphinx_setup/_static/js/hide_banner.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,40 +2,42 @@ var cookieVersion = 0;
window.onload = (e) => {
fetch('/build/docs/_build/_static/html/banner_data.json').then((data) => {
data.json().then((item) => {
cookieVersion = item.version;
var transitionBanner = document.getElementById("info-banner");
if (document.cookie.split(';').filter(function (find_cookie_name) {
return find_cookie_name.trim().indexOf('TransitionBannerIsHiddenX' + cookieVersion + '=') === 0;
}
).length) {
transitionBanner.classList.add("hidden-banner");
}
transitionBanner.classList.add(item.style);

const p = document.createElement("p");
p.innerHTML = item.p;
transitionBanner.append(p);

const button = document.createElement("button");
button.className = "close-banner";
button.type = "button";

const span = document.createElement("span");
span.setAttribute('aria-hidden', 'true');
span.innerHTML = '&times;';
button.appendChild(span);
button.addEventListener("click", function () {
var cookieContent = 'TransitionBannerIsHiddenX' + cookieVersion + '=true;';
var expiry = 'expires=';
var date = new Date();
var expirationDate = date.getTime() + (365 * 24 * 60 * 60 * 1000);
date.setTime(expirationDate);
expiry += date.toUTCString();
document.cookie = cookieContent + expiry;
if(item != null && item.p != "" && item.version != "" && item.style != "" ) {
cookieVersion = item.version;
var transitionBanner = document.getElementById("info-banner");
transitionBanner.classList.add("hidden-banner");
});
transitionBanner.append(button)
if (document.cookie.split(';').filter(function (find_cookie_name) {
return find_cookie_name.trim().indexOf('TransitionBannerIsHiddenX' + cookieVersion + '=') === 0;
}
).length) {
transitionBanner.classList.add("hidden-banner");
}
transitionBanner.classList.add(item.style);

const p = document.createElement("p");
p.innerHTML = item.p;
transitionBanner.append(p);

const button = document.createElement("button");
button.className = "close-banner";
button.type = "button";

const span = document.createElement("span");
span.setAttribute('aria-hidden', 'true');
span.innerHTML = '&times;';
button.appendChild(span);
button.addEventListener("click", function () {
var cookieContent = 'TransitionBannerIsHiddenX' + cookieVersion + '=true;';
var expiry = 'expires=';
var date = new Date();
var expirationDate = date.getTime() + (365 * 24 * 60 * 60 * 1000);
date.setTime(expirationDate);
expiry += date.toUTCString();
document.cookie = cookieContent + expiry;
var transitionBanner = document.getElementById("info-banner");
transitionBanner.classList.add("hidden-banner");
});
transitionBanner.append(button)
}
});
});
};
21 changes: 4 additions & 17 deletions install_build_dependencies.sh
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,7 @@ if [ -f /etc/lsb-release ] || [ -f /etc/debian_version ] ; then
libffi-dev \
`# spell checking for MO sources` \
python3-enchant \
`# samples and tools` \
libgflags-dev \
zlib1g-dev \
`# tools` \
wget
# TF lite frontend
if apt-cache search --names-only '^libflatbuffers-dev'| grep -q libflatbuffers-dev; then
Expand Down Expand Up @@ -128,10 +126,7 @@ elif [ -f /etc/redhat-release ] || grep -q "rhel" /etc/os-release ; then
opencl-headers \
`# python API` \
python3-pip \
python3-devel \
`# samples and tools` \
zlib-devel \
gflags-devel
python3-devel
elif [ -f /etc/os-release ] && grep -q "SUSE" /etc/os-release ; then
zypper refresh
zypper install -y \
Expand Down Expand Up @@ -169,11 +164,7 @@ elif [ -f /etc/os-release ] && grep -q "SUSE" /etc/os-release ; then
`# python API` \
python39-pip \
python39-setuptools \
python39-devel \
`# samples and tools` \
zlib-devel \
gflags-devel-static \
nlohmann_json-devel
python39-devel
elif [ -f /etc/os-release ] && grep -q "raspbian" /etc/os-release; then
# Raspbian
apt update
Expand All @@ -200,11 +191,7 @@ elif [ -f /etc/os-release ] && grep -q "raspbian" /etc/os-release; then
python3-pip \
python3-venv \
python3-setuptools \
libpython3-dev \
`# samples and tools` \
libgflags-dev \
zlib1g-dev \
nlohmann-json-dev
libpython3-dev
else
echo "Unknown OS, please install build dependencies manually"
fi
Expand Down
14 changes: 8 additions & 6 deletions src/bindings/c/src/ov_remote_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,14 @@ const char* ov_property_key_intel_gpu_dev_object_handle = "DEV_OBJECT_HANDLE";
const char* ov_property_key_intel_gpu_va_plane = "VA_PLANE";

inline bool check_intel_gpu_property_value_is_ptr(std::string& key) {
if ((key == ov_property_key_intel_gpu_ocl_context) || (key == ov_property_key_intel_gpu_ocl_queue) ||
(key == ov_property_key_intel_gpu_va_device) || (key == ov_property_key_intel_gpu_mem_handle) ||
(key == ov_property_key_intel_gpu_dev_object_handle))
return true;
else
return false;
#ifdef _WIN32
return (key == ov_property_key_intel_gpu_ocl_context) || (key == ov_property_key_intel_gpu_ocl_queue) ||
(key == ov_property_key_intel_gpu_va_device) || (key == ov_property_key_intel_gpu_mem_handle) ||
(key == ov_property_key_intel_gpu_dev_object_handle);
#else
return (key == ov_property_key_intel_gpu_ocl_context) || (key == ov_property_key_intel_gpu_ocl_queue) ||
(key == ov_property_key_intel_gpu_va_device) || (key == ov_property_key_intel_gpu_mem_handle);
#endif
}

//!< Properties of intel gpu cannot be compeletly handled by (char*) type, because it contains non-char pointer which
Expand Down
5 changes: 0 additions & 5 deletions src/frontends/onnx/frontend/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,3 @@ ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME}
SOURCE_DIRECTORIES "${${TARGET_NAME}_INCLUDE_DIR}"
DEFINITIONS
$<TARGET_PROPERTY:onnx,INTERFACE_COMPILE_DEFINITIONS>)

install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/onnx_import
DESTINATION ${FRONTEND_INSTALL_INCLUDE}/ngraph/frontend
COMPONENT ${OV_CPACK_COMP_CORE_DEV}
${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL})
9 changes: 2 additions & 7 deletions src/frontends/onnx/frontend/src/core/transform.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,8 @@ namespace ngraph {
namespace onnx_import {
namespace transform {

static const std::vector<std::string> onnx_functions_to_expand = {"AffineGrid",
"Bernoulli",
"Celu",
"CenterCropPad",
"NegativeLogLikelihoodLoss",
"SoftmaxCrossEntropyLoss",
"LayerNormalization"};
static const std::vector<std::string> onnx_functions_to_expand =
{"AffineGrid", "Bernoulli", "Celu", "CenterCropPad", "NegativeLogLikelihoodLoss", "SoftmaxCrossEntropyLoss"};

/// \brief Replace nodes with expanded body of ONNX functions
///
Expand Down
102 changes: 102 additions & 0 deletions src/frontends/onnx/frontend/src/op/layer_normalization.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "op/layer_normalization.hpp"

#include "exceptions.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/broadcast.hpp"
#include "openvino/op/concat.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/convert.hpp"
#include "openvino/op/convert_like.hpp"
#include "openvino/op/divide.hpp"
#include "openvino/op/multiply.hpp"
#include "openvino/op/negative.hpp"
#include "openvino/op/reduce_mean.hpp"
#include "openvino/op/reshape.hpp"
#include "openvino/op/shape_of.hpp"
#include "openvino/op/slice.hpp"
#include "openvino/op/sqrt.hpp"
#include "openvino/op/subtract.hpp"
#include "ov_models/ov_builders/reshape.hpp"
#include "utils/common.hpp"

using namespace ov::op;
using namespace ov::op::v0;
using namespace ov::op::v1;
using namespace ov::op::v8;

OPENVINO_SUPPRESS_DEPRECATED_START
namespace ngraph {
namespace onnx_import {
namespace op {
namespace set_1 {

ov::OutputVector layer_normalization(const Node& node) {
const auto inputs = node.get_ng_inputs();
const auto num_inputs = inputs.size();
CHECK_VALID_NODE(node,
num_inputs == 2 || num_inputs == 3,
"LayerNormalization expects 2 or 3 input tensors. Got: ",
num_inputs);

const auto& X = inputs.at(0);
const auto& Scale = inputs.at(1);

auto axis = node.get_attribute_value<std::int64_t>("axis", -1);
double epsilon = node.get_attribute_value<double>("epsilon", 1e-5);
int64_t stash_type_i =
node.get_attribute_value<int64_t>("stash_type",
static_cast<int64_t>(ONNX_NAMESPACE::TensorProto_DataType_FLOAT));
element::Type stash_type = common::get_ov_element_type(stash_type_i);

// following calculations are kept as close to the onnx\defs.cc description as possible
auto FloatEpsilon = Constant::create(ov::element::f32, Shape{}, {epsilon});
auto Epsilon = std::make_shared<Convert>(FloatEpsilon, stash_type);
auto XShape = std::make_shared<ShapeOf>(X);
auto Rank = std::make_shared<v3::ShapeOf>(XShape);
auto Zero1D = Constant::create(ov::element::i64, {1}, {0});
auto One1D = Constant::create(ov::element::i64, {1}, {1});
auto Axis1D = Constant::create(ov::element::i64, {1}, {axis});
auto PrefixShape = std::make_shared<Slice>(XShape, Zero1D, Axis1D, One1D);
ov::Output<ov::Node> NumReducedAxes = (axis >= 0 ? std::make_shared<Subtract>(Rank, Axis1D)->output(0)
: std::make_shared<Negative>(Axis1D)->output(0));
auto SuffixShape = std::make_shared<v3::Broadcast>(One1D, NumReducedAxes);
auto ReducedShape = std::make_shared<Concat>(ov::OutputVector{PrefixShape, SuffixShape}, 0);

auto X2D = util::flatten(X, static_cast<int>(axis));
auto XU = std::make_shared<Convert>(X2D, stash_type);

auto Mean2D = std::make_shared<ReduceMean>(XU, One1D, true);
auto Square = std::make_shared<Multiply>(XU, XU);
auto MeanOfSquare = std::make_shared<ReduceMean>(Square, One1D, true);
auto SquareOfMean = std::make_shared<Multiply>(Mean2D, Mean2D);

auto Var = std::make_shared<Subtract>(MeanOfSquare, SquareOfMean);
auto VarPlusEpsilon = std::make_shared<Add>(Var, Epsilon);
auto StdDev = std::make_shared<Sqrt>(VarPlusEpsilon);
auto Deviation = std::make_shared<Subtract>(XU, Mean2D);
auto Normalized = std::make_shared<Divide>(Deviation, StdDev);
auto NormalizedT = std::make_shared<ConvertLike>(Normalized, X);

auto Scale2D = util::flatten(Scale, 0);
auto Scaled = std::make_shared<Multiply>(NormalizedT, Scale2D);
ov::Output<ov::Node> Biased =
(num_inputs == 3 ? std::make_shared<Add>(Scaled, util::flatten(inputs.at(2), 0))->output(0)
: Scaled->output(0));

auto Y = std::make_shared<Reshape>(Biased, XShape, false);
auto InvStdDev2D = std::make_shared<Divide>(Constant::create(stash_type, {1}, {1}), StdDev);
auto Mean = std::make_shared<Reshape>(Mean2D, ReducedShape, false);
auto InvStdDev = std::make_shared<Reshape>(InvStdDev2D, ReducedShape, false);

return ov::OutputVector{Y, Mean, InvStdDev};
}

} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph
OPENVINO_SUPPRESS_DEPRECATED_END
20 changes: 20 additions & 0 deletions src/frontends/onnx/frontend/src/op/layer_normalization.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once

#include "openvino/core/deprecated.hpp"
OPENVINO_SUPPRESS_DEPRECATED_START

#include "onnx_import/core/node.hpp"

namespace ngraph {
namespace onnx_import {
namespace op {
namespace set_1 {
ov::OutputVector layer_normalization(const Node& node);
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph
OPENVINO_SUPPRESS_DEPRECATED_END
2 changes: 2 additions & 0 deletions src/frontends/onnx/frontend/src/ops_bridge.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@
#include "op/is_finite.hpp"
#include "op/is_inf.hpp"
#include "op/is_nan.hpp"
#include "op/layer_normalization.hpp"
#include "op/leaky_relu.hpp"
#include "op/less.hpp"
#include "op/less_or_equal.hpp"
Expand Down Expand Up @@ -426,6 +427,7 @@ OperatorsBridge::OperatorsBridge() {
REGISTER_OPERATOR("IsFinite", 1, is_finite);
REGISTER_OPERATOR("IsInf", 1, is_inf);
REGISTER_OPERATOR("IsNaN", 1, is_nan)
REGISTER_OPERATOR("LayerNormalization", 1, layer_normalization);
REGISTER_OPERATOR("LeakyRelu", 1, leaky_relu);
REGISTER_OPERATOR("Less", 1, less);
REGISTER_OPERATOR("LessOrEqual", 1, less_or_equal);
Expand Down
Loading

0 comments on commit 8b58e4b

Please sign in to comment.