Skip to content

Commit

Permalink
Issue47 parsing ee onnx graph (#50)
Browse files Browse the repository at this point in the history
* ee parser work bringup

* started updating parser, temp save

* expanded subgraphs, updated explicit edges of subnodes

* added early exit dataflow edges to output of If operation/layer

* adding splitlayers to branching connections, removed extra nodes

* adding buffer layer, reworking ctrl edges

* updated parsering layers

* added Buffer and BufferLayer for hw optimiser

* ignoring egg dir, adding custom setup for recompilation ease

* updated Buffer layer/mod, added Exit layers, updated init

* updated add_hardware with new layers, linking control signals, fixing graph and ctrl edges

* updating add_dimensions function - savepoint

* fixing additional conflicts after rebase

* init hw for split layer, fixed comment typos

* working parser for branchnet onnx graph (somewhat verified)
  • Loading branch information
biggsbenjamin authored May 20, 2021
1 parent 9c525fe commit d06bbfc
Show file tree
Hide file tree
Showing 13 changed files with 898 additions and 104 deletions.
37 changes: 37 additions & 0 deletions ben_setup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import setuptools

with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()

setuptools.setup(
name="fpgaconvnet-optimiser-BenDev", # Replace with your own username
version="0.0.6",
author="Alex Montgomerie & Ben Biggs",
author_email="[email protected] [email protected]",
description="Optimiser for mapping convolutional neural network models to FPGA platforms.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/AlexMontgomerie/fpgaconvnet-optimiser",
include_package_data=True,
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
"networkx>=2.5",
"numpy>=1.19.2",
"protobuf>=3.13.0",
"torch>=1.7.1",
"pyyaml>=5.1.0",
"scipy>=1.2.1",
"torchvision>=0.8.2",
"onnx==1.8.0",
"onnxruntime>=1.6.0",
"graphviz>=0.16",
"pydot>=1.4.2",
"onnxoptimizer>=0.2.5",
"ddt>=1.4.2",
]
)
138 changes: 138 additions & 0 deletions fpgaconvnet_optimiser/models/layers/BufferLayer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
"""
Buffering layer
Stores intermediate compute information such as results from Conv or Pool layers.
During DSE the required size will be calculated to store intermediate results at
branching layers. The position of the buffer layer will then be moved along a
given branch until the buffer size is feasible and the latency of the exit
condition is mitigated/matched. For effective pipelining I think.
Secondary function of the buffer is to "drop" a partial calculation.
Clear a FIFO - takes X number of cycles?
Drop signal will be control signal from the Exit Condition.
Future goal will be to have buffer as an offchip memory link.
In this case, the drop might not be used.
If "drop_mode" True then when True ctrl signal received, drop the data.
If "drop_mode" False then use inverted ctrl signal.
"""

import numpy as np
import math
import pydot
import torch

from fpgaconvnet_optimiser.models.modules import Buffer
#from fpgaconvnet_optimiser.models.modules import Fork
from fpgaconvnet_optimiser.models.layers import Layer

class BufferLayer(Layer):
def __init__(
self,
rows: int,
cols: int,
channels: int,
coarse_in: int,
coarse_out: int,
ctrledge,
drop_mode =True,
data_width =16,
):
# initialise parent class
super().__init__([rows],[cols],[channels],[coarse_in],[coarse_out])

#ctrledge links to exit condition layer
self.ctrledge = ctrledge
self.drop_mode = drop_mode

#init modules
self.modules = {
"buffer" : Buffer(rows,cols,channels, ctrledge, data_width)
}
self.update()

## LAYER INFO ##
def layer_info(self,parameters,batch_size=1):
parameters.batch_size = batch_size
parameters.buffer_depth = self.buffer_depth
parameters.rows_in = self.rows_in(0)
parameters.cols_in = self.cols_in(0)
parameters.channels_in = self.channels_in0()
parameters.rows_out = self.rows_out(0)
parameters.cols_out = self.cols_out(0)
parameters.channels_out = self.channels_out(0)
parameters.coarse_in = self.coarse_in
parameters.coarse_out = self.coarse_out

## UPDATE MODULES ##
def update(self):
self.modules['buffer'].rows = self.rows_in(0)
self.modules['buffer'].cols = self.cols_in(0)
self.modules['buffer'].channels = self.channels_in(0)
#TODO work out if channels = int(self.channels/self.coarse_in)


### RATES ###
def rates_graph(self):
rates_graph = np.zeros( shape=(1,2) , dtype=float )
#buffer
rates_graph[0,0] = self.modules['buffer'].rate_in(0)
rates_graph[0,1] = self.modules['buffer'].rate_out(0)
return rates_graph

def update_coarse_in(self, coarse_in):
self.coarse_in = coarse_in

def update_coarse_out(self, coarse_out):
self.coarse_out = coarse_out

#def get_weights_reloading_feasible(self):

def resource(self):

buff_rsc = self.modules['buffer'].rsc()

# Total
return {
"LUT" : buff_rsc['LUT']*self.coarse_in,
"FF" : buff_rsc['FF']*self.coarse_in,
"BRAM" : buff_rsc['BRAM']*self.coarse_in,
"DSP" : buff_rsc['DSP']*self.coarse_in,
}

def visualise(self,name):
cluster = pydot.Cluster(name,label=name)

for i in range(self.coarse_in):
cluster.add_node(pydot.Node( "_".join([name,"buff",str(i)]), label="buff" ))

# get nodes in and out
nodes_in = [ "_".join([name,"buff",str(i)]) for i in range(self.coarse_in) ]
nodes_out = nodes_in

return cluster, nodes_in, nodes_out

def functional_model(self, data, ctrl_drop):
#Buffer is not an ONNX or pytorch op
# check input dimensionality
assert data.shape[0] == self.rows_in(0) , "ERROR (data): invalid row dimension"
assert data.shape[1] == self.cols_in(0) , "ERROR (data): invalid column dimension"
assert data.shape[2] == self.channels_in(0), "ERROR (data): invalid channel dimension"

out = np.zeros((
self.rows,
self.cols,
self.channels),dtype=float)

if self.drop_mode: #non-inverted
if ctrl_drop:
return out
else:
return data #pass through
else: #inverted
if not ctrl_drop:
return out
else:
return data #pass through

106 changes: 106 additions & 0 deletions fpgaconvnet_optimiser/models/layers/ExitConditionLayer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
"""
Exit Condition layer
Takes the input and performs Softmax, then takes the maximum value, and
compares it to the exit threshold value specified in the model.
This generates a control signal that will terminate the execution early
or allow the input sample to pass through the graph.
TODO Add other variations of the exit condition.
"""

import torch
import math
import numpy as np
import pydot

#from fpgaconvnet_optimiser.models.modules import SlidingWindow
#from fpgaconvnet_optimiser.models.modules import Pool
from fpgaconvnet_optimiser.models.layers import Layer

class ExitConditionLayer(Layer):
def __init__(
self,
rows: int,
cols: int,
channels: int,
coarse_in: int,
coarse_out: int,
ctrledges: [str], #expecting list
cond_type = 'top1',
data_width = 16
):
super().__init__([rows],[cols],[channels],[coarse_in],[coarse_out])

self.ctrledges = ctrledges
self.cond_type = cond_type

#update flags

#init modules
#TODO
self.modules = {
}

self.update()

def layer_info(self,parameters,batch_size=1):
parameters.batch_size = batch_size
parameters.buffer_depth = self.buffer_depth
parameters.rows_in = self.rows_in()
parameters.rows_in = self.rows_in()
parameters.cols_in = self.cols_in()
parameters.channels_in = self.channels_in()
parameters.rows_out = self.rows_out()
parameters.cols_out = self.cols_out()
parameters.channels_out = self.channels_out()
parameters.coarse = self.coarse_in
parameters.coarse_in = self.coarse_in
parameters.coarse_out = self.coarse_out

def update(self): #TODO
Layer.update(self)

def rates_graph(self): #TODO
rates_graph = np.zeros( shape=(1,2), dtype=float)
return rates_graph

def resource(self): #TODO
mod_rsc = 0#self.modules['mod'].rsc()

# Total
return {
"LUT" : mod_rsc['LUT']*self.coarse_in,
"FF" : mod_rsc['FF']*self.coarse_in,
"BRAM" : mod_rsc['BRAM']*self.coarse_in,
"DSP" : mod_rsc['DSP']*self.coarse_in,
}

def visualise(self,name): #TODO replace 'mod' with actual modules used
cluster = pydot.Cluster(name,label=name)

for i in range(self.coarse_in):
cluster.add_node(pydot.Node( "_".join([name,"mod",str(i)]), label="mod" ))

for i in range(self.coarse_out):
cluster.add_node(pydot.Node( "_".join([name,"mod",str(i)]), label="mod" ))

# get nodes in and out
nodes_in = [ "_".join([name,"mod",str(i)]) for i in range(self.coarse_in) ]
nodes_out = [ "_".join([name,"mod",str(i)]) for i in range(self.coarse_out) ]

return cluster, nodes_in, nodes_out

def functional_model(self, data, threshold):

assert data.shape[0] == self.rows , "ERROR (data): invalid row dimension"
assert data.shape[1] == self.cols , "ERROR (data): invalid column dimension"
assert data.shape[2] == self.channels, "ERROR (data): invalid channel dimension"

#instantiate softmax layer
softmax_layer = torch.nn.Softmax() #TODO move softmax to separate layer
pk = softmax_layer(torch.from_numpy(data)).detach()
#get max value
top1 = torch.max(pk)
#True = early exit, drop buffered data
return top1 > threshold
111 changes: 111 additions & 0 deletions fpgaconvnet_optimiser/models/layers/ExitSelectLayer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
"""
Exit Selection Layer
This layer merges all exit results into a single point for the output to offchip mem.
The select lines will be driven by the control signal from each exit condition layer.
"""

import numpy as np
import math
import pydot
import torch

from fpgaconvnet_optimiser.models.layers import Layer

class ExitSelectLayer(Layer):
def __init__(
self,
rows: int,
cols: int,
channels: int,
coarse_in: int,
coarse_out: int,
ctrledge: str,
data_width =16,
):
# initialise parent class
#rows, cols, channels will be the same for both inputs
super().__init__( [rows,rows],
[cols,cols],
[channels,channels],
[coarse_in,coarse_in],
[coarse_out,coarse_out])

#index 0 is then_branch, index 1 is else_branch
#ctrledge links to exit condition layer
self.ctrledge = ctrledge

#init modules
self.modules = {
}
self.update()

## LAYER INFO ##
def layer_info(self,parameters,batch_size=1):
#TODO
#parameters.batch_size = batch_size
#parameters.buffer_depth = self.buffer_depth
#parameters.rows_in = self.rows_in()
#parameters.cols_in = self.cols_in()
#parameters.channels_in = self.channels_in()
#parameters.rows_out = self.rows_out()
#parameters.cols_out = self.cols_out()
#parameters.channels_out = self.channels_out()
#parameters.coarse_in = self.coarse_in
#parameters.coarse_out = self.coarse_out
return

## UPDATE MODULES ##
def update(self): #TODO
return

### RATES ###
def rates_graph(self):
rates_graph = np.zeros( shape=(1,2) , dtype=float )

#rates_graph[0,0] = self.modules['mod'].rate_in()
#rates_graph[0,1] = self.modules['mod'].rate_out()
return rates_graph

def resource(self):

mod_rsc = self.modules['mod'].rsc()

# Total
return {
"LUT" : buff_rsc['LUT']*self.coarse_in,
"FF" : buff_rsc['FF']*self.coarse_in,
"BRAM" : buff_rsc['BRAM']*self.coarse_in,
"DSP" : buff_rsc['DSP']*self.coarse_in,
}

def visualise(self,name): #TODO replace 'mod' with actual modules used
cluster = pydot.Cluster(name,label=name)

for i in range(self.coarse_in):
cluster.add_node(pydot.Node( "_".join([name,"mod",str(i)]), label="mod" ))

for i in range(self.coarse_out):
cluster.add_node(pydot.Node( "_".join([name,"mod",str(i)]), label="mod" ))

# get nodes in and out
nodes_in = [ "_".join([name,"mod",str(i)]) for i in range(self.coarse_in) ]
nodes_out = [ "_".join([name,"mod",str(i)]) for i in range(self.coarse_out) ]

return cluster, nodes_in, nodes_out

def functional_model(self, EEdata, LEdata, ctrl_pass):
#Exit select is not an ONNX or pytorch op
# check input dimensionality
assert EEdata.shape[0] == self.rows , "ERROR: invalid row dimension"
assert EEdata.shape[1] == self.cols , "ERROR: invalid column dimension"
assert EEdata.shape[2] == self.channels, "ERROR: invalid channel dimension"
assert LEdata.shape[0] == self.rows , "ERROR: invalid row dimension"
assert LEdata.shape[1] == self.cols , "ERROR: invalid column dimension"
assert LEdata.shape[2] == self.channels, "ERROR: invalid channel dimension"

if ctrl_pass:
return EEdata
else:
return LEdata
Loading

0 comments on commit d06bbfc

Please sign in to comment.