Skip to content

Commit

Permalink
Merge pull request #201 from Idein/support-convTranspose
Browse files Browse the repository at this point in the history
Support convTranspose
  • Loading branch information
eguchi1904 authored Jul 19, 2023
2 parents e24a03a + 6174110 commit 07ef32a
Show file tree
Hide file tree
Showing 6 changed files with 262 additions and 4 deletions.
4 changes: 4 additions & 0 deletions runtime/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,10 @@ This runtime supports only below operators.
- ConcatFromSequence
- Constant
- ConstantOfShape
- ConvTranspose
- support 2d only
- `group` should be 1
- `auto_pad` should be `"NOTSET"` (default value)
- Cos
- Cosh
- DepthToSpace
Expand Down
1 change: 1 addition & 0 deletions runtime/onnion_runtime/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from .concatfromsequence import ConcatFromSequence # noqa: F401
from .constant import Constant # noqa: F401
from .constantofshape import ConstantOfShape # noqa: F401
from .convtranspose import ConvTranspose # noqa: F401
from .cos import Cos # noqa: F401
from .cosh import Cosh # noqa: F401
from .depthtospace import DepthToSpace # noqa: F401
Expand Down
106 changes: 106 additions & 0 deletions runtime/onnion_runtime/convtranspose.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
from typing import Any, List, Optional

import numpy as np

from .error import RunError


# https://github.com/onnx/onnx/blob/main/docs/Operators.md#ConvTranspose
class ConvTranspose:
auto_pad: str
group: int
dilations: Optional[List[int]]
strides: Optional[List[int]]
kernel_shape: Optional[List[int]]
output_shape: Optional[List[int]]
output_padding: Optional[List[int]]
pads: Optional[List[int]]

def __init__(self, opset_version: int, **kwargs: Any):
self.version = opset_version
self.auto_pad = kwargs.get("auto_pad", "NOTSET")
self.dilations = kwargs.get("dilations", None)
self.group = kwargs.get("group", 1)
self.kernel_shape = kwargs.get("kernel_shape", None)
self.output_padding = kwargs.get("output_padding", None)
self.output_shape = kwargs.get("output_shape", None)
self.pads = kwargs.get("pads", None)
self.strides = kwargs.get("strides", None)

def run(self, x: np.ndarray, W: np.ndarray, b: Optional[np.ndarray] = None) -> List[np.ndarray]:
"""
2D Convolution Transpose
input shapes:
x: [batch, in_ch, in_h, in_w]
W: [in_ch, out_ch/group, kernel_h, kernel_w]
b: [out_ch]
output shape:
[batch, out_ch, out_h, out_w]
"""

# define parameters
dim = len(x.shape) - 2
group = self.group
batch = x.shape[0]
in_ch = x.shape[1]
out_ch = W.shape[1]
dilations = self.dilations or [1] * dim
strides = self.strides or [1] * dim
output_padding = self.output_padding or [0] * dim
kernel_shape = self.kernel_shape or W.shape[2:]
input_shape = x.shape[2:]
pads = self.pads or [0] * (dim * 2)

if dim != 2:
raise RunError("ConvTranspose", self.version, "support 2d only")

if group != 1:
raise RunError("ConvTranspose", self.version, "support group=1 only")

if self.auto_pad != "NOTSET":
raise RunError("ConvTranspose", self.version, "support auto_pad=NOTSET only")

# calculate pads and output_shape
if self.output_shape is not None:
output_shape = self.output_shape
total_padding = [
strides[i] * (input_shape[i] - 1)
+ output_padding[i]
+ ((kernel_shape[i] - 1) * dilations[i] + 1)
- output_shape[i]
for i in range(len(input_shape))
]
for i in range(len(input_shape)):
pads[i] = total_padding[i] - (total_padding[i] // 2)
pads[i + dim] = total_padding[i] // 2
else:
output_shape = [
strides[i] * (input_shape[i] - 1)
+ output_padding[i]
+ ((kernel_shape[i] - 1) * dilations[i] + 1)
- pads[i]
- pads[i + dim]
for i in range(dim)
]

# calculate output
result = np.zeros([batch, out_ch, *output_shape], dtype=x.dtype)

for n in range(batch):
for och in range(out_ch):
if b is not None:
result[n, och, :, :] += b[och]
for ih in range(input_shape[0]):
for iw in range(input_shape[1]):
for kh in range(kernel_shape[0]):
for kw in range(kernel_shape[1]):
oh = strides[0] * ih + kh * dilations[0] - pads[0]
ow = strides[1] * iw + kw * dilations[1] - pads[1]
if oh < 0 or ow < 0 or oh >= output_shape[0] or ow >= output_shape[1]:
continue
v = np.float32(0)
for ich in range(in_ch):
v += x[n, ich, ih, iw] * W[ich, och, kh, kw]
result[n, och, oh, ow] += v

return [result]
6 changes: 5 additions & 1 deletion runtime/onnion_runtime/error.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
from typing import Optional


class RunError(Exception):
def __init__(self, op, version):
def __init__(self, op: str, version: int, reason: Optional[str] = None):
self.op = op
self.version = version
self.reason = reason
143 changes: 143 additions & 0 deletions runtime/tests/test_convtranspose.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
import numpy as np
from onnion_runtime import ConvTranspose

from .utils import check


def test_convtranspose_00() -> None:
opset_version = 13
attrs = dict()
x = np.array([[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]]).astype(np.float32) # (1, 1, 3, 3)

W = np.array(
[
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3)
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
]
]
).astype(np.float32)

inputs = [x, W]

check(ConvTranspose, opset_version, attrs, inputs)


def test_convtranspose_01() -> None:
opset_version = 13
attrs = {"strides": [3, 2], "output_padding": [1, 1]}

x = np.array([[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]]).astype(np.float32) # (1, 1, 3, 3)

W = np.array(
[
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3)
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
]
]
).astype(np.float32)

inputs = [x, W]

check(ConvTranspose, opset_version, attrs, inputs)


# test dillation
def test_convtranspose_02() -> None:
opset_version = 13
attrs = {"dilations": [2, 2]}
x = np.random.randn(1, 1, 3, 3).astype(np.float32)
W = np.random.randn(1, 1, 2, 2).astype(np.float32)

inputs = [x, W]

check(ConvTranspose, opset_version, attrs, inputs)


# test pads
def test_convtranspose_03() -> None:
opset_version = 13
attrs = {"strides": [3, 2], "pads": [1, 2, 1, 2]}
x = np.random.randn(1, 1, 3, 3).astype(np.float32)
W = np.random.randn(1, 2, 3, 3).astype(np.float32)
b = np.random.randn(2).astype(np.float32)

inputs = [x, W, b]

check(ConvTranspose, opset_version, attrs, inputs)


# specify output shape
def test_convtranspose_04() -> None:
opset_version = 13
attrs = {"strides": [3, 2], "output_shape": [10, 8]}
x = np.random.randn(1, 1, 3, 3).astype(np.float32)
W = np.random.randn(1, 2, 3, 3).astype(np.float32)

inputs = [x, W]

check(ConvTranspose, opset_version, attrs, inputs)


# specify output shape and output padding
def test_convtranspose_05() -> None:
opset_version = 13
attrs = {"strides": [3, 2], "output_shape": [10, 8], "kernel_shape": [3, 3], "output_padding": [1, 1]}
x = np.random.randn(1, 1, 3, 3).astype(np.float32)
W = np.random.randn(1, 2, 3, 3).astype(np.float32)
b = np.random.randn(2).astype(np.float32)

inputs = [x, W, b]

check(ConvTranspose, opset_version, attrs, inputs)


# larger channel number
def test_convtranspose_06() -> None:
opset_version = 13
attrs = {"strides": [2, 2], "kernel_shape": [2, 2], "pads": [0, 0, 0, 0]}
x = np.random.randn(2, 24, 12, 12).astype(np.float32)
W = np.random.randn(24, 24, 2, 2).astype(np.float32)

inputs = [x, W]

check(ConvTranspose, opset_version, attrs, inputs)


# larger channel number (with bias)
def test_convtranspose_07() -> None:
opset_version = 13
attrs = {"strides": [2, 2], "kernel_shape": [2, 2], "pads": [0, 0, 0, 0]}
x = np.random.randn(2, 24, 12, 12).astype(np.float32)
W = np.random.randn(24, 24, 2, 2).astype(np.float32)
b = np.random.randn(24).astype(np.float32)

inputs = [x, W, b]

check(ConvTranspose, opset_version, attrs, inputs)


# opset 1
def test_convtranspose_08() -> None:
opset_version = 1
attrs = {"strides": [3, 2], "output_shape": [10, 8], "kernel_shape": [3, 3], "output_padding": [1, 1]}
x = np.random.randn(1, 1, 3, 3).astype(np.float32)
W = np.random.randn(1, 2, 3, 3).astype(np.float32)

inputs = [x, W]

check(ConvTranspose, opset_version, attrs, inputs)


# opset 1 (larager channel number)
def test_convtranspose_09() -> None:
opset_version = 1
attrs = {"strides": [2, 2], "kernel_shape": [2, 2], "pads": [0, 0, 0, 0]}
x = np.random.randn(2, 24, 12, 12).astype(np.float32)
W = np.random.randn(24, 24, 2, 2).astype(np.float32)
b = np.random.randn(24).astype(np.float32)

inputs = [x, W, b]

check(ConvTranspose, opset_version, attrs, inputs)
6 changes: 3 additions & 3 deletions runtime/tests/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
try:
import onnx
import onnxruntime
from onnx import checker, helper, mapping, numpy_helper
from onnx import checker, helper, numpy_helper

WITHOUT_ONNXRUNTIME = False
except Exception:
Expand All @@ -24,7 +24,7 @@ def on_arm32():
result = bool(int(os.environ["ONNION_TEST_ON_ARM32"]))
except Exception:
arch = platform.machine()
if arch == "x86_64":
if arch == "x86_64" or arch == "arm64":
result = False
elif arch == "armv7l":
result = True
Expand Down Expand Up @@ -81,7 +81,7 @@ def check_by_data(expected, result, max_error=1e-4):

def _convert_type(dtype):
assert not WITHOUT_ONNXRUNTIME
return mapping.NP_TYPE_TO_TENSOR_TYPE[dtype]
return helper.np_dtype_to_tensor_dtype(dtype)


def _run_onnx(model, inputs, output_names):
Expand Down

0 comments on commit 07ef32a

Please sign in to comment.