diff --git a/hls4ml/converters/keras/reshape.py b/hls4ml/converters/keras/reshape.py index 2442903117..f2db15d59e 100644 --- a/hls4ml/converters/keras/reshape.py +++ b/hls4ml/converters/keras/reshape.py @@ -27,9 +27,34 @@ def parse_reshape_layer(keras_layer, input_names, input_shapes, data_reader, con return layer, output_shape +@keras_handler('UpSampling1D') +def parse_upsampling1d_layer(keras_layer, input_names, input_shapes, data_reader, config): + assert('UpSampling' in keras_layer['class_name']) + + layer = parse_default_keras_layer(keras_layer, input_names) + + layer['in_height'] = 1 + ( + layer['in_width'], + layer['n_chan'] + ) = parse_data_format(input_shapes[0], layer['data_format']) + + layer['algorithm'] = 'nearest' + + layer['width_factor'] = keras_layer['config']['size'] + + layer['out_height'] = 1 + layer['out_width'] = layer['in_width'] * layer['width_factor'] + + if layer['data_format'] == 'channels_first': + output_shape = [input_shapes[0][0], layer['n_chan'], layer['out_width']] + else: + output_shape = [input_shapes[0][0], layer['out_width'], layer['n_chan']] + + return layer, output_shape @keras_handler('UpSampling2D') -def parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader, config): +def parse_upsampling2d_layer(keras_layer, input_names, input_shapes, data_reader, config): assert('UpSampling2D' in keras_layer['class_name']) layer = parse_default_keras_layer(keras_layer, input_names) diff --git a/hls4ml/model/layers.py b/hls4ml/model/layers.py index 89be745ff1..fb4b7db8a8 100644 --- a/hls4ml/model/layers.py +++ b/hls4ml/model/layers.py @@ -783,8 +783,12 @@ def initialize(self): class Resize(Layer): def initialize(self): inp = self.get_input_variable() - shape = [self.get_attr('out_height'), self.get_attr('out_width'), self.get_attr('n_chan')] - dims = ['OUT_HEIGHT_{}'.format(self.index), 'OUT_WIDTH_{}'.format(self.index), 'N_CHAN_{}'.format(self.index)] + if len(inp.shape) == 2: # 1D -> width + chan + shape = [self.get_attr('out_width'), self.get_attr('n_chan')] + dims = ['OUT_WIDTH_{}'.format(self.index), 'N_CHAN_{}'.format(self.index)] + elif len(inp.shape) == 3: # 2D -> height + width + chan + shape = [self.get_attr('out_height'), self.get_attr('out_width'), self.get_attr('n_chan')] + dims = ['OUT_HEIGHT_{}'.format(self.index), 'OUT_WIDTH_{}'.format(self.index), 'N_CHAN_{}'.format(self.index)] self.add_output_variable(shape, dims, precision=inp.type.precision) class Transpose(Layer): @@ -1012,6 +1016,7 @@ def _initialize_transforms(self): 'Dot' : Dot, 'Concatenate' : Concatenate, 'Resize' : Resize, + 'UpSampling1D' : Resize, 'UpSampling2D' : Resize, 'Transpose' : Transpose, 'GarNet' : GarNet, diff --git a/hls4ml/utils/config.py b/hls4ml/utils/config.py index 562094b0bc..003282e54c 100644 --- a/hls4ml/utils/config.py +++ b/hls4ml/utils/config.py @@ -100,7 +100,7 @@ def config_from_keras_model(model, granularity='model', default_precision='ap_fi model_arch = json.loads(model.to_json()) #Define supported layers - core_layers = ['InputLayer', 'Dropout', 'Flatten', 'Reshape', 'Permute', 'UpSampling2D'] + core_layers = ['InputLayer', 'Dropout', 'Flatten', 'Reshape', 'Permute'] dense_layers = ['Dense', 'BinaryDense', 'TernaryDense'] conv_layers = ['Conv1D', 'Conv2D', 'BinaryConv2D'] pooling_layers = ['MaxPooling1D', 'MaxPooling2D', 'GlobalMaxPooling1D', 'GlobalMaxPooling2D', 'AveragePooling1D', 'AveragePooling2D', 'GlobalAveragePooling1D', 'GlobalAveragePooling2D'] @@ -108,12 +108,13 @@ def config_from_keras_model(model, granularity='model', default_precision='ap_fi activation_layers = ['Activation', 'LeakyReLU', 'ThresholdedReLU', 'ELU', 'PReLU', 'Softmax', 'ReLU'] merge_layers = ['Add', 'Subtract', 'Multiply', 'Average', 'Maximum', 'Minimum', 'Concatenate', 'Dot'] qkeras_layers = ['QDense', 'QActivation', 'QConv1D', 'QConv2D', 'QBatchNormalization', 'QConv2DBatchnorm'] + upsampling_layers = ['UpSampling1D', 'UpSampling2D'] reshaping_layers = ['ZeroPadding1D', 'ZeroPadding2D'] graph_layers = ['GarNet', 'GarNetStack'] #Define layers to skip because they're not configurable or not converted to HLS skip_layers = ['Dropout', 'Flatten', 'Reshape', 'Permute'] #All supported layers - supported_layers = core_layers + dense_layers + conv_layers + pooling_layers + norm_layers + activation_layers + merge_layers + qkeras_layers + reshaping_layers + graph_layers + skip_layers + supported_layers = core_layers + dense_layers + conv_layers + pooling_layers + norm_layers + activation_layers + merge_layers + qkeras_layers + upsampling_layers + reshaping_layers + graph_layers + skip_layers keras_layer_config = None if model_arch['class_name'] == 'Sequential': diff --git a/test/pytest/test_upsampling.py b/test/pytest/test_upsampling.py new file mode 100644 index 0000000000..16af48973e --- /dev/null +++ b/test/pytest/test_upsampling.py @@ -0,0 +1,67 @@ +import pytest +from tensorflow.keras.models import Sequential +from tensorflow.keras.layers import UpSampling1D, UpSampling2D +import numpy as np +import hls4ml +from pathlib import Path + +test_root_path = Path(__file__).parent + +in_height = 6 +in_width = 8 +in_feat = 4 + +size = 2 +atol = 5e-3 + +@pytest.fixture(scope='module') +def data_1d(): + X = np.random.rand(100, in_width, in_feat) + return X + +@pytest.fixture(scope='module') +def data_2d(): + X = np.random.rand(100, in_height, in_width, in_feat) + return X + + +@pytest.fixture(scope='module') +def keras_model_1d(): + model = Sequential() + model.add(UpSampling1D(input_shape=(in_width, in_feat), size=size)) + model.compile() + return model + +@pytest.fixture(scope='module') +def keras_model_2d(): + model = Sequential() + model.add(UpSampling2D(input_shape=(in_height, in_width, in_feat), size=(size, size))) + model.compile() + return model + + +@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) +@pytest.mark.parametrize('model_type', ['1d', '2d']) +def test_upsampling(keras_model_1d, keras_model_2d, data_1d, data_2d, model_type, io_type): + if model_type == '1d': + model = keras_model_1d + data = data_1d + else: + model = keras_model_2d + data = data_2d + + config = hls4ml.utils.config_from_keras_model(model, + default_precision='ap_fixed<32,1>', + granularity='name') + odir = str(test_root_path / f'hls4mlprj_upsampling_{model_type}_{io_type}') + hls_model = hls4ml.converters.convert_from_keras_model(model, + hls_config=config, + io_type=io_type, + output_dir=odir, + part='xcvu9p-flgb2104-2-i') + hls_model.compile() + + # Predict + y_keras = model.predict(data).flatten() + y_hls = hls_model.predict(data).flatten() + np.testing.assert_allclose(y_keras, y_hls, rtol=0, atol=atol, verbose=True)