From 585948401d71b6a95980ad5b30545aa4e1d80cb1 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Tue, 15 Sep 2020 11:04:14 +0800 Subject: [PATCH 1/7] Add copy_ and clamp_ in PyTorch frontend --- python/tvm/relay/frontend/pytorch.py | 10 +++++ tests/python/frontend/pytorch/test_forward.py | 37 +++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/python/tvm/relay/frontend/pytorch.py b/python/tvm/relay/frontend/pytorch.py index c9320a9b2882..331e2401c266 100644 --- a/python/tvm/relay/frontend/pytorch.py +++ b/python/tvm/relay/frontend/pytorch.py @@ -1755,6 +1755,14 @@ def _impl(inputs, input_types): return _impl +def _copy_(): + def _impl(inputs, input_types): + # use add to help handle broadcasting + rel = _op.zeros_like(inputs[0]) + return _op.add(rel, inputs[1]) + + return _impl + def _none(): def _impl(inputs, input_types): @@ -2630,6 +2638,8 @@ def _get_convert_map(prelude, default_dtype): "aten::isinf": _unary("isinf"), "aten::isnan": _unary("isnan"), "aten::clamp": _clamp(), + "aten::clamp_": _clamp(), + "aten::copy_": _copy_(), "aten::detach": _identity(), "aten::upsample_bilinear2d": _upsample("bilinear", prelude), "aten::upsample_nearest2d": _upsample("nearest_neighbor", prelude), diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index e8a8507158a3..c91ddeedd961 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -2398,6 +2398,41 @@ def forward(self, *args): verify_model(Clamp3().float().eval(), input_data=input_data) +@tvm.testing.uses_gpu +def test_forward_clamp_(): + torch.set_grad_enabled(False) + + class ClampInPlace(Module): + def __init__(self, min, max): + super(ClampInPlace, self).__init__() + self.min = min + self.max = max + + def forward(self, *args): + return torch.clamp_(args[0], self.min, self.max) + + for ishape, min, max in (([4, 8], 0.1, 0.9), ([7, 6], 0.2, 0.5)): + input_data = torch.rand(ishape).float() + verify_model(ClampInPlace(min, max).float().eval(), + input_data=input_data) + +@tvm.testing.uses_gpu +def test_forward_copy_(): + torch.set_grad_enabled(False) + + class Copy(Module): + def __init__(self): + super(Copy, self).__init__() + + def forward(self, *args): + return torch.Tensor.copy_(args[0], args[1]) + + src_tensor = torch.rand((5)) + tgt_tensor = torch.rand((2, 3, 5)) + verify_model(Copy().float().eval(), input_data=[tgt_tensor, src_tensor]) + verify_model(Copy().float().eval(), input_data=[tgt_tensor, src_tensor + tgt_tensor]) + + @tvm.testing.uses_gpu def test_forward_ones(): torch.set_grad_enabled(False) @@ -3323,6 +3358,8 @@ def test_forward_pretrained_bert_base_uncased(): test_forward_pow() test_forward_unary() test_forward_clamp() + test_forward_clamp_() + test_forward_copy_() test_forward_logical_not() test_forward_bitwise_not() test_forward_bitwise_xor() From ddfa81d982c4005a888edcfcd35a1544f92ddc81 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Tue, 15 Sep 2020 11:42:00 +0800 Subject: [PATCH 2/7] add true_divide in PyTorch frontend --- python/tvm/relay/frontend/pytorch.py | 2 ++ tests/python/frontend/pytorch/test_forward.py | 20 +++++++++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/python/tvm/relay/frontend/pytorch.py b/python/tvm/relay/frontend/pytorch.py index 331e2401c266..b928e0013d1a 100644 --- a/python/tvm/relay/frontend/pytorch.py +++ b/python/tvm/relay/frontend/pytorch.py @@ -1755,6 +1755,7 @@ def _impl(inputs, input_types): return _impl + def _copy_(): def _impl(inputs, input_types): # use add to help handle broadcasting @@ -2517,6 +2518,7 @@ def _get_convert_map(prelude, default_dtype): "aten::div": _elemwise("divide"), "aten::div_": _elemwise("divide"), "aten::floor_divide": _elemwise("floor_divide"), + "aten::true_divide": _elemwise("divide"), "aten::addcdiv": _addcdiv(), "aten::addcmul": _addcmul(), "aten::ones": _ones(default_dtype), diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index c91ddeedd961..9decff8d9b13 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -2413,8 +2413,7 @@ def forward(self, *args): for ishape, min, max in (([4, 8], 0.1, 0.9), ([7, 6], 0.2, 0.5)): input_data = torch.rand(ishape).float() - verify_model(ClampInPlace(min, max).float().eval(), - input_data=input_data) + verify_model(ClampInPlace(min, max).float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_copy_(): @@ -2930,6 +2929,22 @@ def forward(self, *args): verify_model(Addcmul2().float().eval(), input_data=[input_data, t1, t2]) +@tvm.testing.uses_gpu +def test_forward_true_divide(): + torch.set_grad_enabled(False) + + class TrueDivide(Module): + def forward(self, *args): + return torch.true_divide(args[0], args[1]) + + dividend = torch.rand([5, 3]).float() + # divisor could be either tensor or scalar + divisor_tensor = torch.rand([5, 3]).float() + divisor_scalar = divisor = torch.tensor(1.0, dtype=torch.float32) + verify_model(TrueDivide().float().eval(), input_data=[dividend, divisor_tensor]) + verify_model(TrueDivide().float().eval(), input_data=[dividend, divisor_scalar]) + + @tvm.testing.uses_gpu def test_forward_traced_function(): def fn(t1, t2): @@ -3343,6 +3358,7 @@ def test_forward_pretrained_bert_base_uncased(): test_forward_where() test_forward_addcdiv() test_forward_addcmul() + test_forward_true_divide() test_forward_clone() test_forward_softplus() test_forward_softsign() From b954c8462de3e8cbd0a4c11cc3e4cbc5be1dc3a3 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Tue, 15 Sep 2020 12:24:26 +0800 Subject: [PATCH 3/7] more test cases for copy_ --- tests/python/frontend/pytorch/test_forward.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index 9decff8d9b13..59da2b93c10c 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -2426,10 +2426,21 @@ def __init__(self): def forward(self, *args): return torch.Tensor.copy_(args[0], args[1]) - src_tensor = torch.rand((5)) + class CopyInPlace(Module): + def __init__(self): + super(CopyInPlace, self).__init__() + + def forward(self, *args): + a = args[0] + b = args[1] + c = torch.Tensor.copy_(a, b) + return a + + src_tensor = torch.rand(5) tgt_tensor = torch.rand((2, 3, 5)) - verify_model(Copy().float().eval(), input_data=[tgt_tensor, src_tensor]) - verify_model(Copy().float().eval(), input_data=[tgt_tensor, src_tensor + tgt_tensor]) + for copy in [Copy, CopyInPlace]: + verify_model(copy().float().eval(), input_data=[tgt_tensor, src_tensor]) + verify_model(copy().float().eval(), input_data=[tgt_tensor, src_tensor + tgt_tensor]) @tvm.testing.uses_gpu From b32f5c4f399cc94773e94d18992c8ff7b3be58bc Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Tue, 15 Sep 2020 12:46:08 +0800 Subject: [PATCH 4/7] fix format --- tests/python/frontend/pytorch/test_forward.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index 59da2b93c10c..83509b4434c5 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -2415,6 +2415,7 @@ def forward(self, *args): input_data = torch.rand(ishape).float() verify_model(ClampInPlace(min, max).float().eval(), input_data=input_data) + @tvm.testing.uses_gpu def test_forward_copy_(): torch.set_grad_enabled(False) @@ -2940,7 +2941,6 @@ def forward(self, *args): verify_model(Addcmul2().float().eval(), input_data=[input_data, t1, t2]) -@tvm.testing.uses_gpu def test_forward_true_divide(): torch.set_grad_enabled(False) From 9a77478101e096890ad1fb929ed9b8116b0ee4b6 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Fri, 18 Sep 2020 02:20:06 +0800 Subject: [PATCH 5/7] remove copy_ --- python/tvm/relay/frontend/pytorch.py | 10 ----- tests/python/frontend/pytorch/test_forward.py | 41 ++++--------------- 2 files changed, 7 insertions(+), 44 deletions(-) diff --git a/python/tvm/relay/frontend/pytorch.py b/python/tvm/relay/frontend/pytorch.py index b928e0013d1a..9ceb9fc66ec4 100644 --- a/python/tvm/relay/frontend/pytorch.py +++ b/python/tvm/relay/frontend/pytorch.py @@ -1756,15 +1756,6 @@ def _impl(inputs, input_types): return _impl -def _copy_(): - def _impl(inputs, input_types): - # use add to help handle broadcasting - rel = _op.zeros_like(inputs[0]) - return _op.add(rel, inputs[1]) - - return _impl - - def _none(): def _impl(inputs, input_types): return None @@ -2641,7 +2632,6 @@ def _get_convert_map(prelude, default_dtype): "aten::isnan": _unary("isnan"), "aten::clamp": _clamp(), "aten::clamp_": _clamp(), - "aten::copy_": _copy_(), "aten::detach": _identity(), "aten::upsample_bilinear2d": _upsample("bilinear", prelude), "aten::upsample_nearest2d": _upsample("nearest_neighbor", prelude), diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index 83509b4434c5..5820008fb31d 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -2416,34 +2416,6 @@ def forward(self, *args): verify_model(ClampInPlace(min, max).float().eval(), input_data=input_data) -@tvm.testing.uses_gpu -def test_forward_copy_(): - torch.set_grad_enabled(False) - - class Copy(Module): - def __init__(self): - super(Copy, self).__init__() - - def forward(self, *args): - return torch.Tensor.copy_(args[0], args[1]) - - class CopyInPlace(Module): - def __init__(self): - super(CopyInPlace, self).__init__() - - def forward(self, *args): - a = args[0] - b = args[1] - c = torch.Tensor.copy_(a, b) - return a - - src_tensor = torch.rand(5) - tgt_tensor = torch.rand((2, 3, 5)) - for copy in [Copy, CopyInPlace]: - verify_model(copy().float().eval(), input_data=[tgt_tensor, src_tensor]) - verify_model(copy().float().eval(), input_data=[tgt_tensor, src_tensor + tgt_tensor]) - - @tvm.testing.uses_gpu def test_forward_ones(): torch.set_grad_enabled(False) @@ -2940,7 +2912,7 @@ def forward(self, *args): t2 = torch.rand([1, 3]).float() verify_model(Addcmul2().float().eval(), input_data=[input_data, t1, t2]) - +@tvm.testing.uses_gpu def test_forward_true_divide(): torch.set_grad_enabled(False) @@ -2950,10 +2922,12 @@ def forward(self, *args): dividend = torch.rand([5, 3]).float() # divisor could be either tensor or scalar - divisor_tensor = torch.rand([5, 3]).float() - divisor_scalar = divisor = torch.tensor(1.0, dtype=torch.float32) - verify_model(TrueDivide().float().eval(), input_data=[dividend, divisor_tensor]) - verify_model(TrueDivide().float().eval(), input_data=[dividend, divisor_scalar]) + divisor_tensor = torch.rand([5, 3]).float() + 0.5 + divisor_scalar = torch.tensor(1.0, dtype=torch.float32) + verify_model(TrueDivide().float().eval(), + input_data=[dividend, divisor_tensor], atol=1e-4, rtol=1e-4) + verify_model(TrueDivide().float().eval(), + input_data=[dividend, divisor_scalar], atol=1e-4, rtol=1e-4) @tvm.testing.uses_gpu @@ -3386,7 +3360,6 @@ def test_forward_pretrained_bert_base_uncased(): test_forward_unary() test_forward_clamp() test_forward_clamp_() - test_forward_copy_() test_forward_logical_not() test_forward_bitwise_not() test_forward_bitwise_xor() From 5e2db31bddeb8c2e24819fd45d03cb819b30d0eb Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Fri, 18 Sep 2020 02:25:53 +0800 Subject: [PATCH 6/7] fix format --- tests/python/frontend/pytorch/test_forward.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index 5820008fb31d..65009bffb27e 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -2912,6 +2912,7 @@ def forward(self, *args): t2 = torch.rand([1, 3]).float() verify_model(Addcmul2().float().eval(), input_data=[input_data, t1, t2]) + @tvm.testing.uses_gpu def test_forward_true_divide(): torch.set_grad_enabled(False) @@ -2924,10 +2925,12 @@ def forward(self, *args): # divisor could be either tensor or scalar divisor_tensor = torch.rand([5, 3]).float() + 0.5 divisor_scalar = torch.tensor(1.0, dtype=torch.float32) - verify_model(TrueDivide().float().eval(), - input_data=[dividend, divisor_tensor], atol=1e-4, rtol=1e-4) - verify_model(TrueDivide().float().eval(), - input_data=[dividend, divisor_scalar], atol=1e-4, rtol=1e-4) + verify_model( + TrueDivide().float().eval(), input_data=[dividend, divisor_tensor], atol=1e-4, rtol=1e-4 + ) + verify_model( + TrueDivide().float().eval(), input_data=[dividend, divisor_scalar], atol=1e-4, rtol=1e-4 + ) @tvm.testing.uses_gpu From fbf6572bfc51dd8309d1973b9ff31945470ff123 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Fri, 18 Sep 2020 15:45:15 +0800 Subject: [PATCH 7/7] skip true_divide for torch < 1.5 --- tests/python/frontend/pytorch/test_forward.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index 65009bffb27e..83ba22b7c1d9 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -21,15 +21,14 @@ from scipy.stats import t as tdistr import numpy as np import torch +import torchvision from torch.nn import Module import tvm -import torchvision - from tvm import relay from tvm.contrib import graph_runtime from tvm.contrib.nvcc import have_fp16 import tvm.testing - +from packaging import version as package_version sys.setrecursionlimit(10000) @@ -2915,6 +2914,8 @@ def forward(self, *args): @tvm.testing.uses_gpu def test_forward_true_divide(): + if package_version.parse(torch.__version__) < package_version.parse("1.5.0"): + return torch.set_grad_enabled(False) class TrueDivide(Module):