diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 1ac094d991abc7..4b3bf753ff339f 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -498,6 +498,7 @@ const std::map get_supported_ops_ts() { {"aten::size", op::translate_size}, {"aten::slice", op::quantizable_op}, {"aten::softmax", op::translate_softmax}, + {"aten::softplus", op::translate_1to1_match_1_inputs}, {"aten::sort", op::translate_sort}, {"aten::sqrt", op::translate_1to1_match_1_inputs_with_fp32_type_alignment}, {"aten::square", op::translate_square}, diff --git a/tests/layer_tests/pytorch_tests/test_softplus.py b/tests/layer_tests/pytorch_tests/test_softplus.py new file mode 100644 index 00000000000000..bf218e88031a96 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_softplus.py @@ -0,0 +1,24 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +class aten_softplus(torch.nn.Module): + def forward(self, x): + return torch.nn.functional.softplus(x) + + +class TestSoftplus(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(2, 4, 224, 224).astype(np.float32),) + + @pytest.mark.nightly + @pytest.mark.precommit + def test_softplus(self, ie_device, precision, ir_version): + self._test(aten_softplus(), None, "aten::softplus", + ie_device, precision, ir_version)