Skip to content

Commit

Permalink
updating layers to be compatible with HLS backend testing
Browse files Browse the repository at this point in the history
  • Loading branch information
Ben Biggs authored and Ben Biggs committed Jul 6, 2021
1 parent 6684b0e commit d5bc3d6
Show file tree
Hide file tree
Showing 6 changed files with 65 additions and 59 deletions.
58 changes: 28 additions & 30 deletions fpgaconvnet_optimiser/models/layers/BufferLayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,13 @@ def __init__(
rows: int,
cols: int,
channels: int,
coarse_in: int,
coarse_out: int,
coarse: int,
ctrledge,
drop_mode =True,
data_width =16,
):
# initialise parent class
super().__init__([rows],[cols],[channels],[coarse_in],[coarse_out])
super().__init__([rows],[cols],[channels],[coarse],[coarse])

#ctrledge links to exit condition layer
self.ctrledge = ctrledge
Expand All @@ -62,8 +61,8 @@ def layer_info(self,parameters,batch_size=1):
parameters.rows_out = self.rows_out(0)
parameters.cols_out = self.cols_out(0)
parameters.channels_out = self.channels_out(0)
parameters.coarse_in = self.coarse_in
parameters.coarse_out = self.coarse_out
parameters.coarse_in = self.coarse_in[0]
parameters.coarse_out = self.coarse_in[0]

## UPDATE MODULES ##
def update(self):
Expand All @@ -87,10 +86,10 @@ def resource(self):

# Total
return {
"LUT" : buff_rsc['LUT']*self.coarse_in,
"FF" : buff_rsc['FF']*self.coarse_in,
"BRAM" : buff_rsc['BRAM']*self.coarse_in,
"DSP" : buff_rsc['DSP']*self.coarse_in,
"LUT" : buff_rsc['LUT']*self.coarse_in[0],
"FF" : buff_rsc['FF']*self.coarse_in[0],
"BRAM" : buff_rsc['BRAM']*self.coarse_in[0],
"DSP" : buff_rsc['DSP']*self.coarse_in[0],
}

def visualise(self,name):
Expand All @@ -105,26 +104,25 @@ def visualise(self,name):

return cluster, nodes_in, nodes_out

def functional_model(self, data, ctrl_drop):
def functional_model(self, data, ctrl_drop, batch_size=1): #TODO implement batch size
#Buffer is not an ONNX or pytorch op
# check input dimensionality
assert data.shape[0] == self.rows_in(0) , "ERROR (data): invalid row dimension"
assert data.shape[1] == self.cols_in(0) , "ERROR (data): invalid column dimension"
assert data.shape[2] == self.channels_in(0), "ERROR (data): invalid channel dimension"

out = np.zeros((
self.rows,
self.cols,
self.channels),dtype=float)

if self.drop_mode: #non-inverted
if ctrl_drop:
return out
else:
return data #pass through
else: #inverted
if not ctrl_drop:
return out
else:
return data #pass through

assert data.shape[0] == batch_size , "ERROR: invalid mismatched batch"
assert data.shape[1] == self.rows_in(0) , "ERROR: invalid row dimension"
assert data.shape[2] == self.cols_in(0) , "ERROR: invalid column dimension"
assert data.shape[3] == self.channels_in(0) , "ERROR: invalid channel dimension"

data_out=[]
for b, ctrl in zip(data, ctrl_drop):
if self.drop_mode: #non-inverted
if ctrl == 1.0:
continue
else:
data_out.append(b) #pass through
else: #inverted
if not ctrl == 1.0:
continue
else:
data_out.append(b) #pass through

return np.asarray(data_out)
4 changes: 2 additions & 2 deletions fpgaconvnet_optimiser/models/layers/Layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,8 +285,8 @@ def width_out(self):
return self.data_width

def get_latency(self):
latency_in = max([ abs(self.workload_in(i)/(self.rate_in(i)*self.streams_in(i) )) for i in self.ports_in ])
latency_out = max([ abs(self.workload_out(i)/(self.rate_out(i)*self.streams_out(i))) for i in self.ports_out ])
latency_in = max([ abs(self.workload_in(i)/(self.rate_in(i)*self.streams_in(i) )) for i in range(self.ports_in) ])
latency_out = max([ abs(self.workload_out(i)/(self.rate_out(i)*self.streams_out(i))) for i in range(self.ports_out) ])
return max(latency_in,latency_out)

def pipeline_depth(self):
Expand Down
38 changes: 24 additions & 14 deletions fpgaconvnet_optimiser/models/layers/SoftMaxCmpLayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,17 +128,27 @@ def visualise(self,name):

return cluster, nodes_in, nodes_out

def functional_model(self, data, threshold):

assert data.shape[0] == self.rows , "ERROR (data): invalid row dimension"
assert data.shape[1] == self.cols , "ERROR (data): invalid column dimension"
assert data.shape[2] == self.channels, "ERROR (data): invalid channel dimension"

pk = softmax_layer(torch.from_numpy(data)).detach()
#get max value
top1 = torch.max(torch.from_numpy(data))
#True = early exit, drop buffered data
if top1 > threshold:
return 1.0
else:
return 0.0
def functional_model(self, data, batch_size=1):

assert data.shape[0] == self.rows_in(0) , "ERROR (data): invalid row dimension"
assert data.shape[1] == self.cols_in(0) , "ERROR (data): invalid column dimension"
assert data.shape[2] == self.channels_in(0), "ERROR (data): invalid channel dimension"

softmax_layer = torch.nn.Softmax(dim=-1)
out = np.zeros((batch_size, 3))
for b in range(batch_size):
pk = softmax_layer(torch.from_numpy(data)).detach()
#get max value
top1 = torch.max(torch.from_numpy(data))
#True = early exit, drop buffered data
if top1 > self.threshold:
out[b][0] = 1.0
out[b][1] = 1.0
out[b][2] = 1.0
#return 1.0
else:
out[b][0] = 0.0
out[b][1] = 0.0
out[b][2] = 0.0
#return 0.0
return out
11 changes: 5 additions & 6 deletions fpgaconvnet_optimiser/models/layers/SplitLayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,17 +160,16 @@ def functional_model(self, data, batch_size=1):


out = np.ndarray((
self.rows,
self.cols,
self.channels,
self.coarse),dtype=float)
self.rows_in(0),
self.cols_in(0),
self.channels_in(0),
self.ports_out),dtype=float)

for index,_ in np.ndenumerate(out):
out[index] = data[
index[0],
index[1],
index[2],
index[3]]
index[2]]

return out

10 changes: 5 additions & 5 deletions fpgaconvnet_optimiser/models/modules/Buffer.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def rsc(self):

bram_buffer = 0
if bram_buffer_size >= 512: #taken from Accum.py modules
bram_buffer = math.ceil( (bram_acc_buffer_size)/18000)
bram_buffer = math.ceil( (bram_buffer_size)/18000)
return {
"LUT" : 0, #int(np.dot(self.utilisation_model(), self.rsc_coef[0])),
"BRAM" : bram_buffer,
Expand All @@ -99,12 +99,12 @@ def functional_model(self, data, ctrl_drop):
self.channels),dtype=float)

if self.drop_mode: #non-inverted
if ctrl_drop:
return out
if ctrl_drop == 1.0:
return
else:
return data #pass through
else: #inverted
if not ctrl_drop:
return out
if not ctrl_drop == 1.0:
return
else:
return data #pass through
3 changes: 1 addition & 2 deletions fpgaconvnet_optimiser/models/modules/Compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,6 @@ def functional_model(self, exp_max_set, exp_sum_set):
#assert data.shape[1] == self.cols , "ERROR: invalid column dimension"
#assert data.shape[2] == self.channels, "ERROR: invalid channel dimension"
out = []
print(exp_max_set, exp_sum_set)
print(self.threshold)
thr_set = np.array(exp_sum_set) * self.threshold
for (exp_max, thr) in zip(exp_max_set, thr_set):
if self.cmp_type == 'gt':
Expand All @@ -110,3 +108,4 @@ def functional_model(self, exp_max_set, exp_sum_set):
out.append( 1.0)
else:
out.append( 0.0)
return np.array(out)

0 comments on commit d5bc3d6

Please sign in to comment.