Skip to content

Commit

Permalink
Merge pull request fastmachinelearning#796 from fastmachinelearning/p…
Browse files Browse the repository at this point in the history
…re-commit-ci-update-config

[pre-commit.ci] pre-commit autoupdate
  • Loading branch information
jmitrevs authored May 16, 2023
2 parents 08e309f + 05866f7 commit 487d269
Show file tree
Hide file tree
Showing 31 changed files with 3 additions and 59 deletions.
6 changes: 3 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ exclude: (^hls4ml\/templates\/(vivado|quartus)\/(ap_types|ac_types)\/|^test/pyte

repos:
- repo: https://github.com/psf/black
rev: 22.12.0
rev: 23.3.0
hooks:
- id: black
language_version: python3
Expand Down Expand Up @@ -30,7 +30,7 @@ repos:
args: ["--profile", "black", --line-length=125]

- repo: https://github.com/asottile/pyupgrade
rev: v3.3.1
rev: v3.4.0
hooks:
- id: pyupgrade
args: ["--py36-plus"]
Expand All @@ -41,7 +41,7 @@ repos:
- id: setup-cfg-fmt

- repo: https://github.com/pycqa/flake8
rev: 5.0.4
rev: 6.0.0
hooks:
- id: flake8
exclude: docs/conf.py
Expand Down
1 change: 0 additions & 1 deletion contrib/kl_layer/kl_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ def build(self, input_shape):
super().build(input_shape)

def _merge_function(self, inputs):

mean = inputs[0]
log_var = inputs[1]

Expand Down
2 changes: 0 additions & 2 deletions hls4ml/backends/fpga/fpga_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,7 +391,6 @@ def product_type(self, data_T, weight_T):
return product

def compute_conv1d_instructions(self, in_W, in_C, kernel_size=3, stride=1, pad=0):

# Current limitations
assert pad == 0

Expand Down Expand Up @@ -427,7 +426,6 @@ def compute_conv1d_instructions(self, in_W, in_C, kernel_size=3, stride=1, pad=0
return (min_W, windows_int)

def compute_conv2d_instructions(self, in_H, in_W, in_C, kernel_size=3, stride=1, pad=0):

if isinstance(kernel_size, Iterable):
kernel_height = kernel_size[0]
kernel_width = kernel_size[1]
Expand Down
2 changes: 0 additions & 2 deletions hls4ml/backends/quartus/passes/convolution_winograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,6 @@ def match(self, node):
def transform(self, model, node):
if isinstance(node, Conv1D):
if node.get_attr('filt_width', 3) == 3:

# First, transpose to a format suitable for the Winograd algorithm (F, C, W)
# Note, this assumes a format post-resource strategy optimizer, that is (F, W, C)
# Therefore, (F, W, C) => (F, C, W)
Expand Down Expand Up @@ -127,7 +126,6 @@ def transform(self, model, node):

elif isinstance(node, Conv2D):
if node.get_attr('filt_height', 3) == 3 and node.get_attr('filt_width', 3) == 3:

# First, transpose to a format suitable for the Winograd algorithm (F, C, H, W)
# Note, this assumes a format post-resource strategy optimizer, that is (F, H, W, C)
# Therefore, (F, H, W, C) => (F, C, H, W)
Expand Down
1 change: 0 additions & 1 deletion hls4ml/backends/quartus/quartus_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,6 @@ def create_initial_config(self, part='Arria10', clock_period=5, io_type='io_para
return config

def build(self, model, synth=True, fpgasynth=False, log_level=1, cont_if_large_area=False):

"""
Builds the project using Intel HLS compiler.
Expand Down
1 change: 0 additions & 1 deletion hls4ml/backends/vivado/passes/recurrent_templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ def __init__(self):
self.mult2_template = recr_mult_config_template

def format(self, node):

params = self._default_config_params(node)

params['n_in'] = node.get_input_variable().dim_names[1]
Expand Down
1 change: 0 additions & 1 deletion hls4ml/backends/vivado/passes/resource_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ class ApplyResourceStrategy(OptimizerPass):
'''Transposes the weights to use the dense_resource matrix multiply routine'''

def match(self, node):

node_matches = isinstance(node, (Dense, Conv1D, SeparableConv1D, Conv2D, SeparableConv2D, LSTM, GRU))
is_resource_strategy = node.get_attr('strategy', '').lower() == 'resource'
already_transformed = node.get_attr('_weights_transposed', False) is True
Expand Down
1 change: 0 additions & 1 deletion hls4ml/converters/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@
# and is defined in this module (i.e., not imported)
if callable(func) and hasattr(func, 'handles') and func.__module__ == lib.__name__:
for layer in func.handles:

if model_type == 'keras':
register_keras_layer_handler(layer, func)
elif model_type == 'pytorch':
Expand Down
2 changes: 0 additions & 2 deletions hls4ml/converters/keras/qkeras.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ def get_quantizer_from_config(keras_layer, quantizer_var):

@keras_handler('QDense')
def parse_qdense_layer(keras_layer, input_names, input_shapes, data_reader):

layer, output_shape = parse_dense_layer(keras_layer, input_names, input_shapes, data_reader)

layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel')
Expand Down Expand Up @@ -124,7 +123,6 @@ def parse_qactivation_layer(keras_layer, input_names, input_shapes, data_reader)

@keras_handler('QBatchNormalization')
def parse_qbatchnorm_layer(keras_layer, input_names, input_shapes, data_reader):

layer, output_shape = parse_batchnorm_layer(keras_layer, input_names, input_shapes, data_reader)

layer['mean_quantizer'] = get_quantizer_from_config(keras_layer, 'mean')
Expand Down
1 change: 0 additions & 1 deletion hls4ml/converters/keras_to_hls.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,6 @@ def get_model_arch(config):


def parse_keras_model(model_arch, reader):

# This is a list of dictionaries to hold all the layer info we need to generate HLS
layer_list = []

Expand Down
2 changes: 0 additions & 2 deletions hls4ml/converters/onnx/convolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@

@onnx_handler('Conv')
def parse_conv_layer(reader, node, inputs_map, input_shapes, graph, config):

layer = {}
layer['name'] = node.name
layer['data_format'] = 'channels_first' # ONNX's default is channel first
Expand Down Expand Up @@ -45,7 +44,6 @@ def parse_conv_layer(reader, node, inputs_map, input_shapes, graph, config):
output_shape = [input_shapes[0][0], layer['n_filt'], layer['out_width']]

elif len(input_shapes[0]) == 4: # Conv2D

layer['class_name'] = 'Conv2D'

layer['in_height'] = input_shapes[0][2]
Expand Down
5 changes: 0 additions & 5 deletions hls4ml/converters/onnx/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

@onnx_handler(*['Gemm', 'MatMul'])
def parse_gemm_layer(reader, node, inputs_map, input_shapes, graph, config):

layer = {}

layer['class_name'] = 'Dense'
Expand Down Expand Up @@ -61,7 +60,6 @@ def parse_gemm_layer(reader, node, inputs_map, input_shapes, graph, config):

@onnx_handler(*activation_layers)
def parse_activation_layer(reader, node, inputs_map, input_shapes, graph, config):

layer = {}

layer['name'] = node.name
Expand All @@ -70,7 +68,6 @@ def parse_activation_layer(reader, node, inputs_map, input_shapes, graph, config
layer['inputs'] = get_onnx_input_name(node, graph)

if layer['class_name'] != 'Activation':

if layer['class_name'] == 'Softmax':
layer['activation'] = 'softmax'

Expand All @@ -79,7 +76,6 @@ def parse_activation_layer(reader, node, inputs_map, input_shapes, graph, config
layer['activ_param'] = get_onnx_attribute(node, 'alpha', 0.01)

elif layer['class_name'] == 'Clip':

clip_min_node = [x for x in graph.initializer if x.name in node.input]
clip_min = clip_min_node[0].float_data[0]

Expand All @@ -99,7 +95,6 @@ def parse_activation_layer(reader, node, inputs_map, input_shapes, graph, config

@onnx_handler('BatchNormalization')
def parse_batchnorm_layer(reader, node, inputs_map, input_shapes, graph, config):

layer = {}

layer['class_name'] = 'BatchNormalization'
Expand Down
1 change: 0 additions & 1 deletion hls4ml/converters/onnx/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

@onnx_handler(*merge_layers)
def parse_merge_layer(reader, node, inputs_map, input_shapes, graph, config):

layer = {}
layer['class_name'] = node.op_type
layer['name'] = node.name
Expand Down
2 changes: 0 additions & 2 deletions hls4ml/converters/onnx/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@

@onnx_handler(*pool_operations)
def parse_pool_layer(reader, node, inputs_map, input_shapes, graph, config):

layer = {}
layer['name'] = node.name
layer['inputs'] = get_onnx_input_name(node, graph)
Expand Down Expand Up @@ -91,7 +90,6 @@ def parse_pool_layer(reader, node, inputs_map, input_shapes, graph, config):

@onnx_handler(*global_pooling_layers)
def parse_global_pooling_layer(reader, node, inputs_map, input_shapes, graph, config):

layer = {}
layer['name'] = node.name
layer['inputs'] = get_onnx_input_name(node, graph)
Expand Down
2 changes: 0 additions & 2 deletions hls4ml/converters/onnx/reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

@onnx_handler('Transpose')
def parse_transpose_layer(reader, node, inputs_map, input_shapes, graph, config):

layer = {}
layer['name'] = node.name
layer['class_name'] = 'Transpose'
Expand All @@ -21,7 +20,6 @@ def parse_transpose_layer(reader, node, inputs_map, input_shapes, graph, config)

@onnx_handler('Reshape')
def parse_reshape_layer(reader, node, inputs_map, input_shapes, graph, config):

layer = {}
layer['name'] = node.name
layer['class_name'] = 'Reshape'
Expand Down
2 changes: 0 additions & 2 deletions hls4ml/converters/onnx_to_hls.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ def get_weights_data(self, layer_name, var_name):
tensor = next((x for x in self.model.graph.initializer if x.name == inputs['inputs'][inp_idx]), None)

if tensor is not None:

data = numpy_helper.to_array(tensor)

if inputs['transpose']:
Expand Down Expand Up @@ -278,7 +277,6 @@ def onnx_to_hls(config):

print('Topology:')
for node in graph.node:

if node.op_type not in supported_layers:
raise Exception(f'ERROR: Unsupported operation type: {node.op_type}')

Expand Down
1 change: 0 additions & 1 deletion hls4ml/converters/pytorch/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ def parse_linear_layer(pytorch_layer, layer_name, input_shapes, data_reader, con

@pytorch_handler(*activation_layers)
def parse_activation_layer(pytorch_layer, layer_name, input_shapes, data_reader, config):

layer = {}

layer['class_name'] = pytorch_layer.__class__.__name__
Expand Down
1 change: 0 additions & 1 deletion hls4ml/converters/pytorch_to_hls.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,6 @@ def pytorch_to_hls(config):
print("Input Shape: ", input_shapes)

for layer_name, pytorch_layer in model.named_modules():

pytorch_class = pytorch_layer.__class__.__name__

# First module is the whole model's class
Expand Down
1 change: 0 additions & 1 deletion hls4ml/model/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -647,7 +647,6 @@ def compile(self):

lib_name = self.config.backend.compile(self)
if self._top_function_lib is not None:

if platform.system() == "Linux":
libdl_libs = ['libdl.so', 'libdl.so.2']
for libdl in libdl_libs:
Expand Down
1 change: 0 additions & 1 deletion hls4ml/model/profiling.py
Original file line number Diff line number Diff line change
Expand Up @@ -567,7 +567,6 @@ def get_ymodel_keras(keras_model, X):
isinstance(layer, keras.layers.Activation) or isinstance(layer, qkeras.qlayers.QActivation)
):
if layer.activation:

if layer.activation.__class__.__name__ == "linear":
ymodel[layer.name] = _get_output(layer, X, keras_model.input)

Expand Down
1 change: 0 additions & 1 deletion hls4ml/model/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,6 @@ class ExponentType(NamedType):
"""

def __init__(self, name, precision, **kwargs):

if not name.startswith('exponent_'):
name = 'exponent_' + name
super().__init__(name, precision, **kwargs)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@

class NeuralNetworkOverlay(Overlay):
def __init__(self, xclbin_name, dtbo=None, download=True, ignore_version=False, device=None):

super().__init__(xclbin_name, dtbo=dtbo, download=download, ignore_version=ignore_version, device=device)
self.input_buffer = None
self.output_buffer = None
Expand Down
2 changes: 0 additions & 2 deletions hls4ml/utils/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@


def create_config(output_dir='my-hls-test', project_name='myproject', backend='Vivado', **kwargs):

backend_list = hls4ml.backends.get_available_backends()
if backend.lower() not in backend_list:
raise Exception(f'Unknown backend: {backend}')
Expand All @@ -25,7 +24,6 @@ def create_config(output_dir='my-hls-test', project_name='myproject', backend='V


def _get_precision_from_quantizer(quantizer):

if isinstance(quantizer, str):
quantizer_obj = qkeras.get_quantizer(quantizer)
quantizer = {}
Expand Down
6 changes: 0 additions & 6 deletions hls4ml/utils/example_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,21 +27,18 @@ def _load_data_config_avai(model_name):


def _data_is_available(model_name):

data = _load_data_config_avai(model_name)

return data['example_data']


def _config_is_available(model_name):

data = _load_data_config_avai(model_name)

return data['example_config']


def _create_default_config(model_name, model_config, backend):

# Initiate the configuration file
config = create_config(backend=backend)

Expand Down Expand Up @@ -69,7 +66,6 @@ def _filter_name(model_name):


def _load_example_data(model_name):

print("Downloading input & output example files ...")

filtered_name = _filter_name(model_name)
Expand All @@ -89,7 +85,6 @@ def _load_example_data(model_name):


def _load_example_config(model_name):

print("Downloading configuration files ...")

filtered_name = _filter_name(model_name)
Expand Down Expand Up @@ -179,7 +174,6 @@ def fetch_example_model(model_name, backend='Vivado'):


def fetch_example_list():

link_to_list = 'https://raw.githubusercontent.com/hls-fpga-machine-learning/example-models/master/available_models.json'

temp_file, _ = urlretrieve(link_to_list)
Expand Down
5 changes: 0 additions & 5 deletions hls4ml/writer/quartus_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,6 @@ def write_defines(self, model):
fout = open(f'{model.config.get_output_dir()}/firmware/defines.h', 'w')

for line in f.readlines():

# Insert numbers
if '// hls-fpga-machine-learning insert numbers' in line:
newline = line
Expand Down Expand Up @@ -450,7 +449,6 @@ def write_parameters(self, model):
fout = open(f'{model.config.get_output_dir()}/firmware/parameters.h', 'w')

for line in f.readlines():

if '// hls-fpga-machine-learning insert includes' in line:
newline = line
for include in sorted(set(sum((layer.get_attr('include_header', []) for layer in model.get_layers()), []))):
Expand Down Expand Up @@ -760,7 +758,6 @@ def write_bridge(self, model):
indent = ' '

for line in f.readlines():

if 'MYPROJECT' in line:
newline = line.replace('MYPROJECT', format(model.config.get_project_name().upper()))

Expand Down Expand Up @@ -882,7 +879,6 @@ def write_build_script(self, model):
fout = open(f'{model.config.get_output_dir()}/Makefile', 'w')

for line in f.readlines():

line = line.replace('myproject', model.config.get_project_name())

if 'DEVICE :=' in line:
Expand Down Expand Up @@ -1054,7 +1050,6 @@ def __write_softsign_table(self, model, path):

sep = ''
for i in range(table_size):

in_val = (
i * (MAX_VALUE - MIN_VALUE) / float(table_size)
+ (MAX_VALUE - MIN_VALUE) / (float(table_size) * 2)
Expand Down
1 change: 0 additions & 1 deletion hls4ml/writer/vivado_accelerator_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,6 @@ def modify_build_script(self, model):
fout.close()

def write_wrapper_test(self, model):

###################
# write myproject_test_wrapper.cpp
###################
Expand Down
Loading

0 comments on commit 487d269

Please sign in to comment.