Skip to content

Commit

Permalink
Add a test for QKeras activations passed as an argument
Browse files Browse the repository at this point in the history
  • Loading branch information
AdrianAlan committed May 27, 2022
1 parent 9d7df61 commit 41a6d5f
Showing 1 changed file with 82 additions and 2 deletions.
84 changes: 82 additions & 2 deletions test/pytest/test_qkeras.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from tensorflow.keras.models import Sequential, model_from_json
from tensorflow.keras.models import Sequential, Model, model_from_json
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l1
from tensorflow.keras.layers import Activation, BatchNormalization
from tensorflow.keras.layers import Activation, BatchNormalization, Input
from qkeras.qlayers import QDense, QActivation
from qkeras.quantizers import quantized_bits, quantized_relu, ternary, binary
from qkeras.utils import _add_supported_quantized_objects; co = {}; _add_supported_quantized_objects(co)
Expand Down Expand Up @@ -228,3 +228,83 @@ def test_quantizer(randX_1000_1, quantizer, backend):
y_hls4ml = hls_model.predict(X)
# Goal is to get it passing with all equal
np.testing.assert_array_equal(y_qkeras, y_hls4ml)


@pytest.mark.parametrize(
'weight_quantizer,activation_quantizer,', [
('binary', 'binary'),
('ternary', 'ternary'),
('quantized_bits(4, 0, alpha=1)', 'quantized_relu(2, 0)'),
('quantized_bits(4, 0, alpha=1)', 'quantized_relu(4, 0)'),
('quantized_bits(4, 0, alpha=1)', 'quantized_relu(8, 0)')
]
)
def test_qactivation_kwarg(randX_100_10,
activation_quantizer,
weight_quantizer):
if activation_quantizer in ['binary', 'ternary']:
name = 'bnbt_qdense_alpha'
else:
name = 'qdense_{}'.format(
eval(activation_quantizer).__class__.__name__)

inputs = Input(shape=(10,))

outputs = QDense(
10,
activation=activation_quantizer,
name='qdense',
kernel_quantizer=weight_quantizer,
bias_quantizer=weight_quantizer,
kernel_initializer='lecun_uniform'
)(inputs)
model = Model(inputs, outputs)

hls4ml.model.optimizer.get_optimizer(
'output_rounding_saturation_mode'
).configure(
layers=[name],
rounding_mode='AP_RND_CONV',
saturation_mode='AP_SAT'
)
config = hls4ml.utils.config_from_keras_model(
model,
granularity='name'
)

out_dir = str(
test_root_path / 'hls4mlprj_qactivation_kwarg_{}'.format(
activation_quantizer
)
)

hls_model = hls4ml.converters.convert_from_keras_model(
model,
hls_config=config,
output_dir=out_dir
)
hls4ml.model.optimizer.get_optimizer(
'output_rounding_saturation_mode'
).configure(layers=[])
hls_model.compile()

# Verify if activation in hls_model
assert name in [layer.name for layer in hls_model.get_layers()]

# Output tests
X = randX_100_10
X = np.round(X * 2**10) * 2**-10
y_qkeras = model.predict(X)
y_hls4ml = hls_model.predict(X)
if hasattr(eval(activation_quantizer), 'bits'):
np.testing.assert_allclose(
y_qkeras.ravel(),
y_hls4ml.ravel(),
atol=2**-eval(activation_quantizer).bits,
rtol=1.0
)
else:
if activation_quantizer == 'binary':
y_hls4ml = np.where(y_hls4ml == 0, -1, 1)
wrong = (y_hls4ml != y_qkeras).ravel()
assert sum(wrong) / len(wrong) <= 0.005

0 comments on commit 41a6d5f

Please sign in to comment.