Skip to content

Commit

Permalink
generated layers, remove conv and transposed conv
Browse files Browse the repository at this point in the history
Those were the remaining modules with params.

#82
  • Loading branch information
albertz committed Jan 6, 2022
1 parent 49ae94d commit 28450df
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 218 deletions.
2 changes: 2 additions & 0 deletions nn/_generate_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@
"dropout",
"batch_norm",
"linear",
"conv",
"transposed_conv",
"rec",
"self_attention",
"concat_attention",
Expand Down
219 changes: 1 addition & 218 deletions nn/_generated_layers.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""
This file is auto-generated by _generate_layers.py.
RETURNN: 1.20220105.113203+git.8470324
RETURNN: 1.20220106.130144+git.248f8eb
These are the RETURNN layers directly wrapped.
Note that we intentionally exclude some layers or options for more consistency.
Expand Down Expand Up @@ -1147,123 +1147,6 @@ def cast(
**args}, name=name or 'cast')


class Conv(_Base):
"""
A generic convolution layer which supports 1D, 2D and 3D convolution.
Pooling can be done in the separate "pool" layer.
"""
returnn_layer_class = 'conv'
has_recurrent_state = False
has_variables = True

# noinspection PyShadowingBuiltins,PyShadowingNames
def __init__(self,
*,
out_dim: Dim,
filter_size: Sequence[int],
padding: str,
strides: Union[int, Sequence[int]] = NotSpecified,
dilation_rate: Union[int, Sequence[int]] = NotSpecified,
groups: int = NotSpecified,
input_expand_dims: int = NotSpecified,
input_add_feature_dim: bool = NotSpecified,
input_split_feature_dim: Optional[int] = NotSpecified,
in_dim: Optional[Dim] = NotSpecified,
out_spatial_dims: Optional[Sequence[Dim]] = NotSpecified,
with_bias: Union[bool, NotSpecified] = NotSpecified,
forward_weights_init: Any = NotSpecified,
bias_init: Any = NotSpecified,
filter_perm: Optional[Dict[str, str]] = NotSpecified,
**kwargs):
"""
:param Dim out_dim:
:param tuple[int] filter_size: (width,), (height,width) or (depth,height,width) for 1D/2D/3D conv.
the input data ndim must match, or you can add dimensions via input_expand_dims or input_add_feature_dim.
it will automatically swap the batch-dim to the first axis of the input data.
:param str padding: "same" or "valid"
:param int|tuple[int] strides: strides for the spatial dims,
i.e. length of this tuple should be the same as filter_size, or a single int.
:param int|tuple[int] dilation_rate: dilation for the spatial dims
:param int groups: grouped convolution
:param int input_expand_dims: number of spatial dims to add to the input
:param bool input_add_feature_dim: will add a dim at the end and use input-feature-dim == 1,
and use the original input feature-dim as a spatial dim.
:param None|int input_split_feature_dim: if set, like input_add_feature_dim it will add a new feature dim
which is of value input_split_feature_dim, and the original input feature dim
will be divided by input_split_feature_dim, thus it must be a multiple of that value.
:param Dim|None in_dim:
:param Sequence[Dim]|None out_spatial_dims:
:param bool|NotSpecified with_bias: if True, will add a bias to the output features. False by default
:param forward_weights_init:
:param bias_init:
:param dict[str,str]|None filter_perm: transposes the filter (input filter as layer)
"""
super().__init__(**kwargs)
self.out_dim = out_dim
self.filter_size = filter_size
self.padding = padding
self.strides = strides
self.dilation_rate = dilation_rate
self.groups = groups
self.input_expand_dims = input_expand_dims
self.input_add_feature_dim = input_add_feature_dim
self.input_split_feature_dim = input_split_feature_dim
self.in_dim = in_dim
self.out_spatial_dims = out_spatial_dims
self.with_bias = with_bias
self.forward_weights_init = forward_weights_init
self.bias_init = bias_init
self.filter_perm = filter_perm

def get_opts(self):
"""
Return all options
"""
opts = {
'out_dim': self.out_dim,
'filter_size': self.filter_size,
'padding': self.padding,
'strides': self.strides,
'dilation_rate': self.dilation_rate,
'groups': self.groups,
'input_expand_dims': self.input_expand_dims,
'input_add_feature_dim': self.input_add_feature_dim,
'input_split_feature_dim': self.input_split_feature_dim,
'in_dim': self.in_dim,
'out_spatial_dims': self.out_spatial_dims,
'with_bias': self.with_bias,
'forward_weights_init': self.forward_weights_init,
'bias_init': self.bias_init,
'filter_perm': self.filter_perm,
}
opts = {key: value for (key, value) in opts.items() if value is not NotSpecified}
return {**opts, **super().get_opts()}

# noinspection PyShadowingBuiltins,PyShadowingNames
def __call__(self,
source: LayerRef,
*,
in_spatial_dims: Sequence[Dim],
filter: Optional[LayerRef] = NotSpecified,
bias: Optional[LayerRef] = NotSpecified,
) -> Layer:
"""
Make layer dict
"""
assert isinstance(source, LayerRef)
args = {
'in_spatial_dims': in_spatial_dims,
'filter': filter,
'bias': bias,
}
args = {key: value for (key, value) in args.items() if value is not NotSpecified}
return make_layer({
'class': 'conv',
'from': source,
**args,
**self.get_opts()}, module=self)


# noinspection PyShadowingBuiltins,PyShadowingNames
def pool(
source: LayerRef,
Expand Down Expand Up @@ -1343,106 +1226,6 @@ def dct(
**args}, name=name or 'dct')


class TransposedConv(_Base):
"""
Transposed convolution, sometimes also called deconvolution.
See :func:`tf.nn.conv2d_transpose` (currently we support 1D/2D).
"""
returnn_layer_class = 'transposed_conv'
has_recurrent_state = False
has_variables = True

# noinspection PyShadowingBuiltins,PyShadowingNames
def __init__(self,
*,
out_dim: Dim,
filter_size: Sequence[int],
strides: Optional[Sequence[int]] = NotSpecified,
padding: str = NotSpecified,
remove_padding: Union[Sequence[int], int] = NotSpecified,
output_padding: Optional[Union[Sequence[Optional[int]], int]] = NotSpecified,
in_dim: Optional[Dim] = NotSpecified,
out_spatial_dims: Optional[Sequence[Dim]] = NotSpecified,
with_bias: bool = NotSpecified,
forward_weights_init: Any = NotSpecified,
bias_init: Any = NotSpecified,
filter_perm: Optional[Dict[str, str]] = NotSpecified,
**kwargs):
"""
:param Dim out_dim:
:param list[int] filter_size:
:param list[int]|None strides: specifies the upscaling. by default, same as filter_size
:param str padding: "same" or "valid"
:param list[int]|int remove_padding:
:param list[int|None]|int|None output_padding:
:param Dim|None in_dim:
:param Sequence[Dim]|None out_spatial_dims:
:param bool with_bias: whether to add a bias. enabled by default.
Note that the default is different from ConvLayer!
:param forward_weights_init:
:param bias_init:
:param dict[str,str]|None filter_perm: transposes the filter (input filter as layer)
"""
super().__init__(**kwargs)
self.out_dim = out_dim
self.filter_size = filter_size
self.strides = strides
self.padding = padding
self.remove_padding = remove_padding
self.output_padding = output_padding
self.in_dim = in_dim
self.out_spatial_dims = out_spatial_dims
self.with_bias = with_bias
self.forward_weights_init = forward_weights_init
self.bias_init = bias_init
self.filter_perm = filter_perm

def get_opts(self):
"""
Return all options
"""
opts = {
'out_dim': self.out_dim,
'filter_size': self.filter_size,
'strides': self.strides,
'padding': self.padding,
'remove_padding': self.remove_padding,
'output_padding': self.output_padding,
'in_dim': self.in_dim,
'out_spatial_dims': self.out_spatial_dims,
'with_bias': self.with_bias,
'forward_weights_init': self.forward_weights_init,
'bias_init': self.bias_init,
'filter_perm': self.filter_perm,
}
opts = {key: value for (key, value) in opts.items() if value is not NotSpecified}
return {**opts, **super().get_opts()}

# noinspection PyShadowingBuiltins,PyShadowingNames
def __call__(self,
source: LayerRef,
*,
in_spatial_dims: Sequence[Dim],
filter: Optional[LayerRef] = NotSpecified,
bias: Optional[LayerRef] = NotSpecified,
) -> Layer:
"""
Make layer dict
"""
assert isinstance(source, LayerRef)
args = {
'in_spatial_dims': in_spatial_dims,
'filter': filter,
'bias': bias,
}
args = {key: value for (key, value) in args.items() if value is not NotSpecified}
return make_layer({
'class': 'transposed_conv',
'from': source,
**args,
**self.get_opts()}, module=self)


# noinspection PyShadowingBuiltins,PyShadowingNames
def reduce(
source: LayerRef,
Expand Down

0 comments on commit 28450df

Please sign in to comment.