diff --git a/python/tvm/relay/frontend/pytorch.py b/python/tvm/relay/frontend/pytorch.py index 7dee58e2ea804..0845fec4ac829 100644 --- a/python/tvm/relay/frontend/pytorch.py +++ b/python/tvm/relay/frontend/pytorch.py @@ -193,6 +193,33 @@ def _impl(inputs, input_types): return _op.nn.relu(data) return _impl +def _prelu(): + def _impl(inputs, input_types): + data = inputs[0] + alpha = inputs[1] + return _op.nn.prelu(data, alpha) + return _impl + +def _leaky_relu(): + def _impl(inputs, input_types): + data = inputs[0] + alpha = int(inputs[1]) + return _op.nn.leaky_relu(data, alpha) + return _impl + +def _elu(): + def _impl(inputs, input_types): + data = inputs[0] + alpha = _expr.const(int(inputs[1]), dtype='float32') + return alpha * _op.nn.relu(alpha - _op.exp(data)) + _op.nn.relu(data) + return _impl + +def _log_sigmoid(): + def _impl(inputs, input_types): + data = inputs[0] + return _op.log(_op.tensor.sigmoid(data)) + return _impl + def _adaptive_avg_pool_2d(): def _impl(inputs, input_types): data = inputs[0] @@ -921,6 +948,10 @@ def _wrap_const(c): "aten::select" : _select(), "aten::relu" : _relu(), "aten::relu_" : _relu(), + "aten::prelu" : _prelu(), + "aten::leaky_relu" : _leaky_relu(), + "aten::elu" : _elu(), + "aten::log_sigmoid" : _log_sigmoid(), "aten::adaptive_avg_pool2d" : _adaptive_avg_pool_2d(), "aten::adaptive_max_pool2d" : _adaptive_max_pool_2d(), "aten::max_pool2d" : _maxpool_2d(), diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index 6070d884b1912..ffa81945a977e 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -335,6 +335,53 @@ def forward(self, *args): input_data = torch.rand(input_shape).float() verify_model(ReLU1().float().eval(), input_data=input_data) +def test_forward_prelu(): + torch.set_grad_enabled(False) + input_shape = [1, 3, 10, 10] + + class PReLU1(Module): + def __init__(self): + super(PReLU1, self).__init__() + self.prelu = torch.nn.PReLU(num_parameters=3) + def forward(self, *args): + return self.prelu(args[0]) + + input_data = torch.rand(input_shape).float() + verify_model(PReLU1().float().eval(), input_data=input_data) + +def test_forward_leakyrelu(): + torch.set_grad_enabled(False) + input_shape = [10, 10] + + class LeakyReLU1(Module): + def forward(self, *args): + return torch.nn.LeakyReLU(negative_slope=0.05)(args[0]) + + input_data = torch.rand(input_shape).float() + verify_model(LeakyReLU1().float().eval(), input_data=input_data) + +def test_forward_elu(): + torch.set_grad_enabled(False) + input_shape = [10, 10] + + class ELU1(Module): + def forward(self, *args): + return torch.nn.ELU(alpha=1.3)(args[0]) + + input_data = torch.rand(input_shape).float() + verify_model(ELU1().float().eval(), input_data=input_data) + +def test_forward_log_sigmoid(): + torch.set_grad_enabled(False) + input_shape = [10, 10] + + class LogSigmoid1(Module): + def forward(self, *args): + return torch.nn.LogSigmoid()(args[0]) + + input_data = torch.rand(input_shape).float() + verify_model(LogSigmoid1().float().eval(), input_data=input_data) + def test_forward_adaptiveavgpool(): torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10] @@ -1076,6 +1123,10 @@ def forward(self, xs): test_forward_unsqueeze() test_forward_concatenate() test_forward_relu() + test_forward_prelu() + test_forward_leakyrelu() + test_forward_elu() + test_forward_log_sigmoid() test_forward_adaptiveavgpool() test_forward_maxpool2d() test_forward_maxpool1d()