Skip to content

Commit

Permalink
Support UpSampling1D (#475)
Browse files Browse the repository at this point in the history
* Support UpSampling1D

* Proper output directory for upsampling tests

Co-authored-by: Javier Duarte <[email protected]>
  • Loading branch information
vloncar and jmduarte authored Apr 28, 2022
1 parent bc51a44 commit 219c7de
Show file tree
Hide file tree
Showing 4 changed files with 103 additions and 5 deletions.
27 changes: 26 additions & 1 deletion hls4ml/converters/keras/reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,34 @@ def parse_reshape_layer(keras_layer, input_names, input_shapes, data_reader, con

return layer, output_shape

@keras_handler('UpSampling1D')
def parse_upsampling1d_layer(keras_layer, input_names, input_shapes, data_reader, config):
assert('UpSampling' in keras_layer['class_name'])

layer = parse_default_keras_layer(keras_layer, input_names)

layer['in_height'] = 1
(
layer['in_width'],
layer['n_chan']
) = parse_data_format(input_shapes[0], layer['data_format'])

layer['algorithm'] = 'nearest'

layer['width_factor'] = keras_layer['config']['size']

layer['out_height'] = 1
layer['out_width'] = layer['in_width'] * layer['width_factor']

if layer['data_format'] == 'channels_first':
output_shape = [input_shapes[0][0], layer['n_chan'], layer['out_width']]
else:
output_shape = [input_shapes[0][0], layer['out_width'], layer['n_chan']]

return layer, output_shape

@keras_handler('UpSampling2D')
def parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader, config):
def parse_upsampling2d_layer(keras_layer, input_names, input_shapes, data_reader, config):
assert('UpSampling2D' in keras_layer['class_name'])

layer = parse_default_keras_layer(keras_layer, input_names)
Expand Down
9 changes: 7 additions & 2 deletions hls4ml/model/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -783,8 +783,12 @@ def initialize(self):
class Resize(Layer):
def initialize(self):
inp = self.get_input_variable()
shape = [self.get_attr('out_height'), self.get_attr('out_width'), self.get_attr('n_chan')]
dims = ['OUT_HEIGHT_{}'.format(self.index), 'OUT_WIDTH_{}'.format(self.index), 'N_CHAN_{}'.format(self.index)]
if len(inp.shape) == 2: # 1D -> width + chan
shape = [self.get_attr('out_width'), self.get_attr('n_chan')]
dims = ['OUT_WIDTH_{}'.format(self.index), 'N_CHAN_{}'.format(self.index)]
elif len(inp.shape) == 3: # 2D -> height + width + chan
shape = [self.get_attr('out_height'), self.get_attr('out_width'), self.get_attr('n_chan')]
dims = ['OUT_HEIGHT_{}'.format(self.index), 'OUT_WIDTH_{}'.format(self.index), 'N_CHAN_{}'.format(self.index)]
self.add_output_variable(shape, dims, precision=inp.type.precision)

class Transpose(Layer):
Expand Down Expand Up @@ -1012,6 +1016,7 @@ def _initialize_transforms(self):
'Dot' : Dot,
'Concatenate' : Concatenate,
'Resize' : Resize,
'UpSampling1D' : Resize,
'UpSampling2D' : Resize,
'Transpose' : Transpose,
'GarNet' : GarNet,
Expand Down
5 changes: 3 additions & 2 deletions hls4ml/utils/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,20 +100,21 @@ def config_from_keras_model(model, granularity='model', default_precision='ap_fi
model_arch = json.loads(model.to_json())

#Define supported layers
core_layers = ['InputLayer', 'Dropout', 'Flatten', 'Reshape', 'Permute', 'UpSampling2D']
core_layers = ['InputLayer', 'Dropout', 'Flatten', 'Reshape', 'Permute']
dense_layers = ['Dense', 'BinaryDense', 'TernaryDense']
conv_layers = ['Conv1D', 'Conv2D', 'BinaryConv2D']
pooling_layers = ['MaxPooling1D', 'MaxPooling2D', 'GlobalMaxPooling1D', 'GlobalMaxPooling2D', 'AveragePooling1D', 'AveragePooling2D', 'GlobalAveragePooling1D', 'GlobalAveragePooling2D']
norm_layers = ['BatchNormalization']
activation_layers = ['Activation', 'LeakyReLU', 'ThresholdedReLU', 'ELU', 'PReLU', 'Softmax', 'ReLU']
merge_layers = ['Add', 'Subtract', 'Multiply', 'Average', 'Maximum', 'Minimum', 'Concatenate', 'Dot']
qkeras_layers = ['QDense', 'QActivation', 'QConv1D', 'QConv2D', 'QBatchNormalization', 'QConv2DBatchnorm']
upsampling_layers = ['UpSampling1D', 'UpSampling2D']
reshaping_layers = ['ZeroPadding1D', 'ZeroPadding2D']
graph_layers = ['GarNet', 'GarNetStack']
#Define layers to skip because they're not configurable or not converted to HLS
skip_layers = ['Dropout', 'Flatten', 'Reshape', 'Permute']
#All supported layers
supported_layers = core_layers + dense_layers + conv_layers + pooling_layers + norm_layers + activation_layers + merge_layers + qkeras_layers + reshaping_layers + graph_layers + skip_layers
supported_layers = core_layers + dense_layers + conv_layers + pooling_layers + norm_layers + activation_layers + merge_layers + qkeras_layers + upsampling_layers + reshaping_layers + graph_layers + skip_layers

keras_layer_config = None
if model_arch['class_name'] == 'Sequential':
Expand Down
67 changes: 67 additions & 0 deletions test/pytest/test_upsampling.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
import pytest
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import UpSampling1D, UpSampling2D
import numpy as np
import hls4ml
from pathlib import Path

test_root_path = Path(__file__).parent

in_height = 6
in_width = 8
in_feat = 4

size = 2
atol = 5e-3

@pytest.fixture(scope='module')
def data_1d():
X = np.random.rand(100, in_width, in_feat)
return X

@pytest.fixture(scope='module')
def data_2d():
X = np.random.rand(100, in_height, in_width, in_feat)
return X


@pytest.fixture(scope='module')
def keras_model_1d():
model = Sequential()
model.add(UpSampling1D(input_shape=(in_width, in_feat), size=size))
model.compile()
return model

@pytest.fixture(scope='module')
def keras_model_2d():
model = Sequential()
model.add(UpSampling2D(input_shape=(in_height, in_width, in_feat), size=(size, size)))
model.compile()
return model


@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream'])
@pytest.mark.parametrize('model_type', ['1d', '2d'])
def test_upsampling(keras_model_1d, keras_model_2d, data_1d, data_2d, model_type, io_type):
if model_type == '1d':
model = keras_model_1d
data = data_1d
else:
model = keras_model_2d
data = data_2d

config = hls4ml.utils.config_from_keras_model(model,
default_precision='ap_fixed<32,1>',
granularity='name')
odir = str(test_root_path / f'hls4mlprj_upsampling_{model_type}_{io_type}')
hls_model = hls4ml.converters.convert_from_keras_model(model,
hls_config=config,
io_type=io_type,
output_dir=odir,
part='xcvu9p-flgb2104-2-i')
hls_model.compile()

# Predict
y_keras = model.predict(data).flatten()
y_hls = hls_model.predict(data).flatten()
np.testing.assert_allclose(y_keras, y_hls, rtol=0, atol=atol, verbose=True)

0 comments on commit 219c7de

Please sign in to comment.