Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Register ApplyAlpha layer templates #499

Merged
merged 3 commits into from
Feb 23, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions hls4ml/backends/vivado/passes/quantization_templates.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
from hls4ml.backends.backend import get_backend
from hls4ml.model.optimizer.passes.qkeras import ApplyAlpha
from hls4ml.backends.template import LayerConfigTemplate, FunctionCallTemplate
from hls4ml.backends.vivado.passes.core_templates import batchnorm_config_template, batchnorm_function_template, batchnorm_include_list

class ApplyAlphaConfigTemplate(LayerConfigTemplate):
def __init__(self):
super().__init__(ApplyAlpha)
self.template = batchnorm_config_template

def format(self, node):
params = self._default_config_params(node)
params['n_in'] = node.get_input_variable().size_cpp()
params['product_type'] = get_backend('vivado').product_type(node.get_input_variable().type.precision, node.get_weights('scale').type.precision)

return self.template.format(**params)

class ApplyAlphaFunctionTemplate(FunctionCallTemplate):
def __init__(self):
super().__init__(ApplyAlpha, include_header=batchnorm_include_list)
self.template = batchnorm_function_template

def format(self, node):
params = self._default_function_params(node)
params['scale'] = node.get_weights('scale').name
params['bias'] = node.get_weights('bias').name

return self.template.format(**params)

8 changes: 4 additions & 4 deletions test/pytest/test_qkeras.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,16 +110,16 @@ def randX_100_16():
# Note 4-bit test can still fail sometimes depending on random seed
# https://github.com/fastmachinelearning/hls4ml/issues/381
#@pytest.mark.parametrize('bits', [4, 6, 8])
@pytest.mark.parametrize('bits', [4])
def test_single_dense_activation_exact(randX_100_16, bits):
@pytest.mark.parametrize('bits,alpha', [(4, 1), (4, 'auto_po2')])
def test_single_dense_activation_exact(randX_100_16, bits, alpha):
'''
Test a single Dense -> Activation layer topology for
bit exactness with number of bits parameter
'''
X = randX_100_16
model = Sequential()
model.add(QDense(16, input_shape=(16,), name='fc1',
kernel_quantizer=quantized_bits(bits,0,alpha=1), bias_quantizer=quantized_bits(bits,0,alpha=1),
kernel_quantizer=quantized_bits(bits,0,alpha=alpha), bias_quantizer=quantized_bits(bits,0,alpha=1),
kernel_initializer='lecun_uniform'))
model.add(QActivation(activation=quantized_relu(bits,0), name='relu1'))
model.compile()
Expand All @@ -128,7 +128,7 @@ def test_single_dense_activation_exact(randX_100_16, bits):
config = hls4ml.utils.config_from_keras_model(model, granularity='name')
hls_model = hls4ml.converters.convert_from_keras_model(model,
hls_config=config,
output_dir=str(test_root_path / 'hls4mlprj_qkeras_single_dense_activation_exact_{}'.format(bits)),
output_dir=str(test_root_path / 'hls4mlprj_qkeras_single_dense_activation_exact_{}_{}'.format(bits, alpha)),
part='xcu250-figd2104-2L-e')
hls4ml.model.optimizer.get_optimizer('output_rounding_saturation_mode').configure(layers=[])
hls_model.compile()
Expand Down