From 833778bb0815e883c59b2a4f90f477ec71e8c064 Mon Sep 17 00:00:00 2001 From: Sioni Summers Date: Tue, 22 Feb 2022 12:53:42 +0100 Subject: [PATCH 1/3] Register ApplyAlpha layer's templates for Vivado backend --- .../vivado/passes/quantization_templates.py | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 hls4ml/backends/vivado/passes/quantization_templates.py diff --git a/hls4ml/backends/vivado/passes/quantization_templates.py b/hls4ml/backends/vivado/passes/quantization_templates.py new file mode 100644 index 000000000..fa61b81ff --- /dev/null +++ b/hls4ml/backends/vivado/passes/quantization_templates.py @@ -0,0 +1,29 @@ +from hls4ml.backends.backend import get_backend +from hls4ml.model.optimizer.passes.qkeras import ApplyAlpha +from hls4ml.backends.template import LayerConfigTemplate, FunctionCallTemplate +from hls4ml.backends.vivado.passes.core_templates import batchnorm_config_template, batchnorm_function_template, batchnorm_include_list + +class ApplyAlphaConfigTemplate(LayerConfigTemplate): + def __init__(self): + super().__init__(ApplyAlpha) + self.template = batchnorm_config_template + + def format(self, node): + params = self._default_config_params(node) + params['n_in'] = node.get_input_variable().size_cpp() + params['product_type'] = get_backend('vivado').product_type(node.get_input_variable().type.precision, node.get_weights('scale').type.precision) + + return self.template.format(**params) + +class ApplyAlphaFunctionTemplate(FunctionCallTemplate): + def __init__(self): + super().__init__(ApplyAlpha, include_header=batchnorm_include_list) + self.template = batchnorm_function_template + + def format(self, node): + params = self._default_function_params(node) + params['scale'] = node.get_weights('scale').name + params['bias'] = node.get_weights('bias').name + + return self.template.format(**params) + From d3a7bc7055de9be308dd6362f79ba1ac35e08565 Mon Sep 17 00:00:00 2001 From: Jovan Mitrevski Date: Mon, 24 Jan 2022 15:58:27 -0600 Subject: [PATCH 2/3] add auto_po2 qkeras test that revealed problem earlier --- test/pytest/test_qkeras.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/pytest/test_qkeras.py b/test/pytest/test_qkeras.py index 51222aebf..6c1138729 100644 --- a/test/pytest/test_qkeras.py +++ b/test/pytest/test_qkeras.py @@ -110,8 +110,8 @@ def randX_100_16(): # Note 4-bit test can still fail sometimes depending on random seed # https://github.com/fastmachinelearning/hls4ml/issues/381 #@pytest.mark.parametrize('bits', [4, 6, 8]) -@pytest.mark.parametrize('bits', [4]) -def test_single_dense_activation_exact(randX_100_16, bits): +@pytest.mark.parametrize('bits,alpha', [(4, 1), (4, 'auto_po2')]) +def test_single_dense_activation_exact(randX_100_16, bits, alpha): ''' Test a single Dense -> Activation layer topology for bit exactness with number of bits parameter @@ -119,7 +119,7 @@ def test_single_dense_activation_exact(randX_100_16, bits): X = randX_100_16 model = Sequential() model.add(QDense(16, input_shape=(16,), name='fc1', - kernel_quantizer=quantized_bits(bits,0,alpha=1), bias_quantizer=quantized_bits(bits,0,alpha=1), + kernel_quantizer=quantized_bits(bits,0,alpha=alpha), bias_quantizer=quantized_bits(bits,0,alpha=1), kernel_initializer='lecun_uniform')) model.add(QActivation(activation=quantized_relu(bits,0), name='relu1')) model.compile() From c3d431ee66998cbb7b032dd50240df5ed23cfe61 Mon Sep 17 00:00:00 2001 From: Sioni Summers Date: Tue, 22 Feb 2022 12:55:58 +0100 Subject: [PATCH 3/3] Give the qkeras test_single_dense_activation_exact tests unique output directories again --- test/pytest/test_qkeras.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/pytest/test_qkeras.py b/test/pytest/test_qkeras.py index 6c1138729..1b16ae21a 100644 --- a/test/pytest/test_qkeras.py +++ b/test/pytest/test_qkeras.py @@ -128,7 +128,7 @@ def test_single_dense_activation_exact(randX_100_16, bits, alpha): config = hls4ml.utils.config_from_keras_model(model, granularity='name') hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config, - output_dir=str(test_root_path / 'hls4mlprj_qkeras_single_dense_activation_exact_{}'.format(bits)), + output_dir=str(test_root_path / 'hls4mlprj_qkeras_single_dense_activation_exact_{}_{}'.format(bits, alpha)), part='xcu250-figd2104-2L-e') hls4ml.model.optimizer.get_optimizer('output_rounding_saturation_mode').configure(layers=[]) hls_model.compile()