diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/activation.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/activation.cpp index 2801b1711e91c3..b1b7972b1c087f 100644 --- a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/activation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/activation.cpp @@ -176,7 +176,7 @@ void ActivationParamLayerTest::SetUp() { std::tie(activationType, netPrecision, shapes, targetDevice) = GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); auto params = ngraph::builder::makeParams(ngPrc, {shapes.first}); - auto activationParams = createActivationParams(ngPrc); + auto activationParams = createActivationParams(ngPrc, shapes.second); params[0]->set_friendly_name("Input"); params.insert(params.end(), activationParams.begin(), activationParams.end()); auto activation = ngraph::builder::makeActivation(params, ngPrc, activationType); diff --git a/ngraph/test/runtime/interpreter/evaluates_map.cpp b/ngraph/test/runtime/interpreter/evaluates_map.cpp index a30dab8f4568bb..472bde482341af 100644 --- a/ngraph/test/runtime/interpreter/evaluates_map.cpp +++ b/ngraph/test/runtime/interpreter/evaluates_map.cpp @@ -24,10 +24,16 @@ #include "ngraph/runtime/reference/mvn.hpp" #include "ngraph/runtime/reference/lrn.hpp" #include "ngraph/runtime/reference/avg_pool.hpp" +#include +#include + #include "reference/detection_output.hpp" #include "reference/scatter_nd_update.hpp" #include "reference/scatter_update.hpp" -#include "ngraph/runtime/reference/select.hpp" +#include "reference/gelu.hpp" +#include "reference/hard_sigmoid.hpp" +#include "reference/elu.hpp" +#include "reference/selu.hpp" using namespace ngraph; using namespace std; @@ -379,6 +385,65 @@ namespace { return true; } + template + bool evaluate(const shared_ptr &op, const HostTensorVector &outputs, + const HostTensorVector &input) { + using T = typename element_type_traits::value_type; + runtime::reference::hard_sigmoid(input[0]->get_data_ptr(), + input[1]->get_data_ptr(), + input[2]->get_data_ptr(), + outputs[0]->get_data_ptr(), + shape_size(input[0]->get_shape()), + shape_size(input[1]->get_shape()), + shape_size(input[2]->get_shape())); + return true; + } + + template + bool evaluate(const shared_ptr &op, const HostTensorVector &outputs, + const HostTensorVector &input) { + using T = typename element_type_traits::value_type; + runtime::reference::elu(input[0]->get_data_ptr(), + outputs[0]->get_data_ptr(), + shape_size(input[0]->get_shape()), + op->get_alpha()); + return true; + } + + template + bool evaluate(const shared_ptr &op, const HostTensorVector &outputs, + const HostTensorVector &input) { + using T = typename element_type_traits::value_type; + runtime::reference::selu(input[0]->get_data_ptr(), + input[1]->get_data_ptr(), + input[2]->get_data_ptr(), + outputs[0]->get_data_ptr(), + shape_size(input[0]->get_shape()), + shape_size(input[1]->get_shape()), + shape_size(input[2]->get_shape())); + return true; + } + + template + bool evaluate(const shared_ptr &op, const HostTensorVector &outputs, + const HostTensorVector &input) { + using T = typename element_type_traits::value_type; + runtime::reference::ceiling(input[0]->get_data_ptr(), + outputs[0]->get_data_ptr(), + shape_size(input[0]->get_shape())); + return true; + } + + template + bool evaluate(const shared_ptr &op, const HostTensorVector &outputs, + const HostTensorVector &input) { + using T = typename element_type_traits::value_type; + runtime::reference::gelu(input[0]->get_data_ptr(), + outputs[0]->get_data_ptr(), + shape_size(input[0]->get_shape())); + return true; + } + template bool evaluate_node(std::shared_ptr node, const HostTensorVector &outputs, const HostTensorVector &inputs) { switch (node->get_element_type()) { diff --git a/ngraph/test/runtime/interpreter/opset_int_tbl.hpp b/ngraph/test/runtime/interpreter/opset_int_tbl.hpp index bc146ab16e9406..2f552ae8a6c043 100644 --- a/ngraph/test/runtime/interpreter/opset_int_tbl.hpp +++ b/ngraph/test/runtime/interpreter/opset_int_tbl.hpp @@ -45,3 +45,8 @@ NGRAPH_OP(ShapeOf, op::v3) NGRAPH_OP(NonZero, op::v3) NGRAPH_OP(ScatterNDUpdate, op::v3) NGRAPH_OP(ScatterUpdate, op::v3) +NGRAPH_OP(HardSigmoid, op::v0) +NGRAPH_OP(Elu, op::v0) +NGRAPH_OP(Selu, op::v0) +NGRAPH_OP(Ceiling, op::v0) +NGRAPH_OP(Gelu, op::v0) diff --git a/ngraph/core/include/ngraph/runtime/reference/elu.hpp b/ngraph/test/runtime/interpreter/reference/elu.hpp similarity index 92% rename from ngraph/core/include/ngraph/runtime/reference/elu.hpp rename to ngraph/test/runtime/interpreter/reference/elu.hpp index 3440ece42aa105..efa62d806e983b 100644 --- a/ngraph/core/include/ngraph/runtime/reference/elu.hpp +++ b/ngraph/test/runtime/interpreter/reference/elu.hpp @@ -30,9 +30,11 @@ namespace ngraph { for (size_t i = 0; i < count; i++) { - out[i] = arg[i] < 0 ? alpha * (std::exp(arg[i]) - 1.0) : arg[i]; + out[i] = arg[i] < T(0) ? T(alpha * (std::exp(arg[i]) - 1.0)) : arg[i]; } } } + + } } diff --git a/ngraph/test/runtime/interpreter/reference/gelu.hpp b/ngraph/test/runtime/interpreter/reference/gelu.hpp new file mode 100644 index 00000000000000..0d879b61b2969a --- /dev/null +++ b/ngraph/test/runtime/interpreter/reference/gelu.hpp @@ -0,0 +1,38 @@ +//***************************************************************************** +// Copyright 2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include +#include + +namespace ngraph +{ + namespace runtime + { + namespace reference + { + template + void gelu(const T* arg, T* out, size_t count) + { + for (size_t i = 0; i < count; i++) + { + out[i] = 0.5 * arg[i] * (1 + erf(arg[i] / std::sqrt(2))); + } + } + } + } +} diff --git a/ngraph/test/runtime/interpreter/reference/hard_sigmoid.hpp b/ngraph/test/runtime/interpreter/reference/hard_sigmoid.hpp new file mode 100644 index 00000000000000..577492fd17ffab --- /dev/null +++ b/ngraph/test/runtime/interpreter/reference/hard_sigmoid.hpp @@ -0,0 +1,54 @@ +//***************************************************************************** +// Copyright 2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include +#include +#include +#include +#include + +#include "ngraph/axis_vector.hpp" +#include "ngraph/coordinate_transform.hpp" +#include "ngraph/shape.hpp" + +namespace ngraph +{ + namespace runtime + { + namespace reference + { + + template + void hard_sigmoid(const T* arg, + const T* alpha, + const T* beta, + T* out, + size_t size_arg, + size_t size_alpha, + size_t size_beta) + { + int cnt = 0; + for (size_t i = 0; i < size_arg; ++i) + { + out[i] = std::max(T(0), std::min(T(1), T(alpha[cnt % size_alpha] * arg[i] + beta[cnt % size_beta]))); + cnt++; + } + } + } + } +} diff --git a/ngraph/test/runtime/interpreter/reference/selu.hpp b/ngraph/test/runtime/interpreter/reference/selu.hpp new file mode 100644 index 00000000000000..2ae5b36d095c5e --- /dev/null +++ b/ngraph/test/runtime/interpreter/reference/selu.hpp @@ -0,0 +1,47 @@ +//***************************************************************************** +// Copyright 2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include +#include + +namespace ngraph +{ + namespace runtime + { + namespace reference + { + template + void selu(const T* arg, + const T* alpha, + const T* lambda, + T* out, + size_t size_arg, + size_t size_alpha, + size_t size_lambda) + { + int cnt = 0; + for (size_t i = 0; i < size_arg; ++i) + { + out[i] = arg[i] > T(0) ? T(lambda[cnt % size_lambda] * arg[i]) : + T(alpha[cnt % size_alpha] * lambda[cnt % size_lambda] * (std::exp(arg[i]) - 1)); + cnt++; + } + } + } + } +}