Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use wider accum_t for (average) pooling #681

Merged
merged 1 commit into from
Nov 9, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 32 additions & 1 deletion hls4ml/backends/vivado/vivado_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from hls4ml.backends import FPGABackend
from hls4ml.backends.fpga.fpga_types import APTypeConverter, HLSTypeConverter, VivadoArrayVariableConverter
from hls4ml.report import parse_vivado_report
from hls4ml.utils.fixed_point_utils import ceil_log2

class VivadoBackend(FPGABackend):
def __init__(self):
Expand Down Expand Up @@ -129,7 +130,7 @@ def build(self, model, reset=False, csim=True, synth=True, cosim=False, validati

def _validate_conv_strategy(self, layer):
if layer.model.config.model_strategy.lower() != 'resource':
print('WARNING: Cannot use "Latency" model strategy for {} layer. Switching to "Resource" strategy.')
print(f'WARNING: Cannot use "Latency" model strategy for {layer.name} layer. Switching to "Resource" strategy.')
layer.model.config.model_strategy = 'Resource'

@layer_optimizer(Layer)
Expand Down Expand Up @@ -251,6 +252,36 @@ def init_depconv2d(self, layer):
layer.set_attr('n_partitions', 1) #TODO Once we have SeparableConv implementation for io_parallel this should be set properly
layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower())

def _set_pooling_accum_t(self, layer, pool_size):
extra_bits = ceil_log2(pool_size)
accum_t = layer.get_attr('accum_t')
accum_t.precision.fractional += extra_bits
accum_t.precision.integer += extra_bits

@layer_optimizer(Pooling1D)
def init_pooling1d(self, layer):
pool_size = layer.get_attr('pool_width')
self._set_pooling_accum_t(layer, pool_size)

layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower())

@layer_optimizer(Pooling2D)
def init_pooling2d(self, layer):
pool_size = layer.get_attr('pool_height') * layer.get_attr('pool_width')
self._set_pooling_accum_t(layer, pool_size)

layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower())

@layer_optimizer(GlobalPooling1D)
def init_global_pooling1d(self, layer):
pool_size = layer.get_attr('n_in')
self._set_pooling_accum_t(layer, pool_size)

@layer_optimizer(GlobalPooling2D)
def init_global_pooling2d(self, layer):
pool_size = layer.get_attr('in_height') * layer.get_attr('in_width')
self._set_pooling_accum_t(layer, pool_size)

@layer_optimizer(Activation)
def init_activation(self, layer):
if 'table_t' not in layer.attributes:
Expand Down
2 changes: 0 additions & 2 deletions hls4ml/model/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -573,7 +573,6 @@ def initialize(self):
dims = ['N_FILT_{}'.format(self.index), 'N_OUTPUTS_{}'.format(self.index)]
self.add_output_variable(shape, dims)
self.set_attr('pool_op', self.get_attr('class_name').split('Pooling')[0])
self.set_attr('implementation', self.model.config.get_conv_implementation(self).lower())

class Pooling2D(Layer):
_expected_attributes = [
Expand Down Expand Up @@ -607,7 +606,6 @@ def initialize(self):
dims = ['N_FILT_{}'.format(self.index), 'OUT_HEIGHT_{}'.format(self.index), 'OUT_WIDTH_{}'.format(self.index)]
self.add_output_variable(shape, dims)
self.set_attr('pool_op', self.get_attr('class_name').split('Pooling')[0])
self.set_attr('implementation', self.model.config.get_conv_implementation(self).lower())

class GlobalPooling1D(Layer):
_expected_attributes = [
Expand Down
8 changes: 4 additions & 4 deletions hls4ml/templates/vivado/nnet_utils/nnet_pooling_stream.h
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ void compute_pool_buffer_2d(
static int sX = 0; // stride X
static int sY = 0; // stride Y

typename data_T::value_type pool_window[CONFIG_T::pool_height * CONFIG_T::pool_width];
typename CONFIG_T::accum_t pool_window[CONFIG_T::pool_height * CONFIG_T::pool_width];
#pragma HLS ARRAY_PARTITION variable=pool_window complete

static typename data_T::value_type kernel_data[CONFIG_T::pool_height * CONFIG_T::pool_width * CONFIG_T::n_filt];
Expand All @@ -192,7 +192,7 @@ void compute_pool_buffer_2d(
}

// Compute Pooling
res_pack[i_ic] = reduce_pool<typename data_T::value_type, CONFIG_T::pool_height * CONFIG_T::pool_width, CONFIG_T>(pool_window);
res_pack[i_ic] = reduce_pool<typename CONFIG_T::accum_t, CONFIG_T::pool_height * CONFIG_T::pool_width, CONFIG_T>(pool_window);
}

// Write to output
Expand Down Expand Up @@ -378,7 +378,7 @@ void compute_pool_buffer_1d(
static int pX = 0;
static int sX = 0;

typename data_T::value_type pool_window[CONFIG_T::pool_width];
typename CONFIG_T::accum_t pool_window[CONFIG_T::pool_width];
#pragma HLS ARRAY_PARTITION variable=pool_window complete

static typename data_T::value_type kernel_data[CONFIG_T::pool_width * CONFIG_T::n_filt];
Expand All @@ -402,7 +402,7 @@ void compute_pool_buffer_1d(
}

// Compute Pooling
res_pack[i_ic] = reduce_pool<typename data_T::value_type, CONFIG_T::pool_width, CONFIG_T>(pool_window);
res_pack[i_ic] = reduce_pool<typename CONFIG_T::accum_t, CONFIG_T::pool_width, CONFIG_T>(pool_window);
}

// Write to output
Expand Down
7 changes: 5 additions & 2 deletions test/pytest/test_cnn_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,13 @@ def keras_model(mnist_data):
keras_model.fit(x_train, y_train, batch_size=32, epochs=5, verbose=0)
return keras_model

@pytest.mark.parametrize('backend, io_type, strategy', [
@pytest.mark.parametrize('backend,io_type,strategy', [
('Quartus', 'io_parallel', 'resource'),

('Vivado', 'io_parallel', 'resource'),
('Vivado', 'io_parallel', 'latency')
('Vivado', 'io_parallel', 'latency'),
('Vivado', 'io_stream', 'latency'),
('Vivado', 'io_stream', 'resource')
])
def test_mnist_cnn(keras_model, mnist_data, backend, io_type, strategy):
x_train, y_train, x_test, y_test = mnist_data
Expand Down