From df87a45f3b1148b768c6f44428050418d54668e7 Mon Sep 17 00:00:00 2001 From: Siju Samuel Date: Tue, 31 Mar 2020 22:01:26 +0530 Subject: [PATCH 1/2] [PYTORCH]Activations for pytorch --- python/tvm/relay/frontend/pytorch.py | 31 +++++++++++ tests/python/frontend/pytorch/test_forward.py | 51 +++++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/python/tvm/relay/frontend/pytorch.py b/python/tvm/relay/frontend/pytorch.py index 7dee58e2ea80..0845fec4ac82 100644 --- a/python/tvm/relay/frontend/pytorch.py +++ b/python/tvm/relay/frontend/pytorch.py @@ -193,6 +193,33 @@ def _impl(inputs, input_types): return _op.nn.relu(data) return _impl +def _prelu(): + def _impl(inputs, input_types): + data = inputs[0] + alpha = inputs[1] + return _op.nn.prelu(data, alpha) + return _impl + +def _leaky_relu(): + def _impl(inputs, input_types): + data = inputs[0] + alpha = int(inputs[1]) + return _op.nn.leaky_relu(data, alpha) + return _impl + +def _elu(): + def _impl(inputs, input_types): + data = inputs[0] + alpha = _expr.const(int(inputs[1]), dtype='float32') + return alpha * _op.nn.relu(alpha - _op.exp(data)) + _op.nn.relu(data) + return _impl + +def _log_sigmoid(): + def _impl(inputs, input_types): + data = inputs[0] + return _op.log(_op.tensor.sigmoid(data)) + return _impl + def _adaptive_avg_pool_2d(): def _impl(inputs, input_types): data = inputs[0] @@ -921,6 +948,10 @@ def _wrap_const(c): "aten::select" : _select(), "aten::relu" : _relu(), "aten::relu_" : _relu(), + "aten::prelu" : _prelu(), + "aten::leaky_relu" : _leaky_relu(), + "aten::elu" : _elu(), + "aten::log_sigmoid" : _log_sigmoid(), "aten::adaptive_avg_pool2d" : _adaptive_avg_pool_2d(), "aten::adaptive_max_pool2d" : _adaptive_max_pool_2d(), "aten::max_pool2d" : _maxpool_2d(), diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index 6070d884b191..ffa81945a977 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -335,6 +335,53 @@ def forward(self, *args): input_data = torch.rand(input_shape).float() verify_model(ReLU1().float().eval(), input_data=input_data) +def test_forward_prelu(): + torch.set_grad_enabled(False) + input_shape = [1, 3, 10, 10] + + class PReLU1(Module): + def __init__(self): + super(PReLU1, self).__init__() + self.prelu = torch.nn.PReLU(num_parameters=3) + def forward(self, *args): + return self.prelu(args[0]) + + input_data = torch.rand(input_shape).float() + verify_model(PReLU1().float().eval(), input_data=input_data) + +def test_forward_leakyrelu(): + torch.set_grad_enabled(False) + input_shape = [10, 10] + + class LeakyReLU1(Module): + def forward(self, *args): + return torch.nn.LeakyReLU(negative_slope=0.05)(args[0]) + + input_data = torch.rand(input_shape).float() + verify_model(LeakyReLU1().float().eval(), input_data=input_data) + +def test_forward_elu(): + torch.set_grad_enabled(False) + input_shape = [10, 10] + + class ELU1(Module): + def forward(self, *args): + return torch.nn.ELU(alpha=1.3)(args[0]) + + input_data = torch.rand(input_shape).float() + verify_model(ELU1().float().eval(), input_data=input_data) + +def test_forward_log_sigmoid(): + torch.set_grad_enabled(False) + input_shape = [10, 10] + + class LogSigmoid1(Module): + def forward(self, *args): + return torch.nn.LogSigmoid()(args[0]) + + input_data = torch.rand(input_shape).float() + verify_model(LogSigmoid1().float().eval(), input_data=input_data) + def test_forward_adaptiveavgpool(): torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10] @@ -1076,6 +1123,10 @@ def forward(self, xs): test_forward_unsqueeze() test_forward_concatenate() test_forward_relu() + test_forward_prelu() + test_forward_leakyrelu() + test_forward_elu() + test_forward_log_sigmoid() test_forward_adaptiveavgpool() test_forward_maxpool2d() test_forward_maxpool1d() From c934abd5e195a85ea16ff5952d29788d11f35142 Mon Sep 17 00:00:00 2001 From: Siju Samuel Date: Wed, 1 Apr 2020 17:04:01 +0530 Subject: [PATCH 2/2] Review comments updated --- tests/python/frontend/pytorch/test_forward.py | 78 +++---------------- 1 file changed, 11 insertions(+), 67 deletions(-) diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index ffa81945a977..ccc9a39fe20d 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -327,76 +327,39 @@ def forward(self, *args): def test_forward_relu(): torch.set_grad_enabled(False) input_shape = [10, 10] - - class ReLU1(Module): - def forward(self, *args): - return torch.nn.ReLU()(args[0]) - input_data = torch.rand(input_shape).float() - verify_model(ReLU1().float().eval(), input_data=input_data) + verify_model(torch.nn.ReLU().eval(), input_data=input_data) def test_forward_prelu(): torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10] - - class PReLU1(Module): - def __init__(self): - super(PReLU1, self).__init__() - self.prelu = torch.nn.PReLU(num_parameters=3) - def forward(self, *args): - return self.prelu(args[0]) - input_data = torch.rand(input_shape).float() - verify_model(PReLU1().float().eval(), input_data=input_data) + verify_model(torch.nn.PReLU(num_parameters=3).eval(), input_data=input_data) def test_forward_leakyrelu(): torch.set_grad_enabled(False) input_shape = [10, 10] - - class LeakyReLU1(Module): - def forward(self, *args): - return torch.nn.LeakyReLU(negative_slope=0.05)(args[0]) - input_data = torch.rand(input_shape).float() - verify_model(LeakyReLU1().float().eval(), input_data=input_data) + verify_model(torch.nn.LeakyReLU(negative_slope=0.05).eval(), input_data=input_data) def test_forward_elu(): torch.set_grad_enabled(False) input_shape = [10, 10] - - class ELU1(Module): - def forward(self, *args): - return torch.nn.ELU(alpha=1.3)(args[0]) - input_data = torch.rand(input_shape).float() - verify_model(ELU1().float().eval(), input_data=input_data) + verify_model(torch.nn.ELU(alpha=1.3).eval(), input_data=input_data) def test_forward_log_sigmoid(): torch.set_grad_enabled(False) input_shape = [10, 10] - - class LogSigmoid1(Module): - def forward(self, *args): - return torch.nn.LogSigmoid()(args[0]) - input_data = torch.rand(input_shape).float() - verify_model(LogSigmoid1().float().eval(), input_data=input_data) + verify_model(torch.nn.LogSigmoid().eval(), input_data=input_data) def test_forward_adaptiveavgpool(): torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10] - - class AdaptiveAvgPool2D1(Module): - def forward(self, *args): - return torch.nn.AdaptiveAvgPool2d([1, 1])(args[0]) - - class AdaptiveAvgPool2D2(Module): - def forward(self, *args): - return torch.nn.AdaptiveAvgPool2d([10, 10])(args[0]) - input_data = torch.rand(input_shape).float() - verify_model(AdaptiveAvgPool2D1().float().eval(), input_data=input_data) - verify_model(AdaptiveAvgPool2D2().float().eval(), input_data=input_data) + verify_model(torch.nn.AdaptiveAvgPool2d([1, 1]).eval(), input_data=input_data) + verify_model(torch.nn.AdaptiveAvgPool2d([10, 10]).eval(), input_data=input_data) def test_forward_maxpool2d(): torch.set_grad_enabled(False) @@ -453,28 +416,19 @@ def test_forward_avgpool(): torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10] - class AvgPool2D1(Module): - def forward(self, *args): - return torch.nn.AvgPool2d(kernel_size=[10, 10])(args[0]) - class AvgPool2D2(Module): def forward(self, *args): return torch.nn.functional.avg_pool2d(args[0], kernel_size=[10, 10]) input_data = torch.rand(input_shape).float() - verify_model(AvgPool2D1().float().eval(), input_data=input_data) + verify_model(torch.nn.AvgPool2d(kernel_size=[10, 10]).eval(), input_data=input_data) verify_model(AvgPool2D2().float().eval(), input_data=input_data) def test_forward_hardtanh(): torch.set_grad_enabled(False) input_shape = [10] - - class HardTanh1(Module): - def forward(self, *args): - return torch.nn.Hardtanh()(args[0]) - input_data = torch.rand(input_shape).float() - verify_model(HardTanh1().float().eval(), input_data=input_data) + verify_model(torch.nn.Hardtanh().eval(), input_data=input_data) def test_forward_conv(): torch.set_grad_enabled(False) @@ -529,13 +483,8 @@ def test_forward_conv_transpose(): def test_forward_threshold(): torch.set_grad_enabled(False) input_shape = [1, 3] - - class Threshold1(Module): - def forward(self, *args): - return torch.nn.Threshold(0, 0)(args[0]) - input_data = torch.rand(input_shape).float() - verify_model(Threshold1().float().eval(), input_data=input_data) + verify_model(torch.nn.Threshold(0, 0).float().eval(), input_data=input_data) def test_forward_contiguous(): torch.set_grad_enabled(False) @@ -642,13 +591,8 @@ def forward(self, *args): def test_forward_sigmoid(): torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10] - - class Sigmoid1(Module): - def forward(self, *args): - return torch.nn.Sigmoid()(args[0]) - input_data = torch.rand(input_shape).float() - verify_model(Sigmoid1().float().eval(), input_data=input_data) + verify_model(torch.nn.Sigmoid().eval(), input_data=input_data) def test_forward_dense(): torch.set_grad_enabled(False)