-
Notifications
You must be signed in to change notification settings - Fork 12
/
python_loss_layer.py
97 lines (85 loc) · 3.55 KB
/
python_loss_layer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import caffe
import numpy as np
class EuclideanLossLayer(caffe.Layer):
"""
Compute the Euclidean Loss in the same manner as the C++ EuclideanLossLayer
to demonstrate the class interface for developing layers in Python.
"""
def setup(self, bottom, top):
# check input pair
if len(bottom) != 2:
raise Exception("Need two inputs to compute distance.")
def reshape(self, bottom, top):
# check input dimensions match
if bottom[0].count != bottom[1].count:
raise Exception("Inputs must have the same dimension.")
# difference is shape of inputs
self.diff = np.zeros_like(bottom[0].data, dtype=np.float32)
# loss output is scalar
top[0].reshape(1)
def forward(self, bottom, top):
self.diff[...] = bottom[0].data - bottom[1].data
top[0].data[...] = np.sum(self.diff**2) / bottom[0].num / 2.
def backward(self, top, propagate_down, bottom):
for i in range(2):
if not propagate_down[i]:
continue
if i == 0:
sign = 1
else:
sign = -1
bottom[i].diff[...] = sign * self.diff / bottom[i].num
class TrainValWeightedEuclideanLossLayer(caffe.Layer):
"""
Compute the Wegithed Loss
"""
count = 0
batch = 0
interval = 200
val_loss = []
pre_val_mean = 0
norm_trend = None
norm_loss = None
def setup(self, bottom, top):
# check input pair
if len(bottom) != 2:
raise Exception("Need two inputs to compute distance.")
self.batch = bottom[0].num / 2 # Indicate first half as train, second half as val
self.count = 0
self.norm_trend = np.ones(bottom[0].data.shape[1])
self.norm_loss = np.ones(bottom[0].data.shape[1])
def reshape(self, bottom, top):
# check input dimensions match
if bottom[0].count != bottom[1].count:
raise Exception("Inputs must have the same dimension.")
# difference is shape of inputs
self.diff = np.zeros_like(bottom[0].data, dtype=np.float32)
# loss output is scalar
top[0].reshape(1)
def forward(self, bottom, top):
self.diff[...] = bottom[0].data - bottom[1].data
self.val_loss.append(np.sum(self.diff[self.batch:]**2, axis=0))
top[0].data[...] = np.sum(self.diff[0:self.batch]**2) / self.batch / 2.
self.count += 1
def backward(self, top, propagate_down, bottom):
for i in range(2):
if not propagate_down[i]:
continue
if i == 0:
sign = 1
else:
sign = -1
if self.count == self.interval:
self.pre_val_mean = np.mean(self.val_loss[0:self.interval], axis=0)
if self.count >= 2 * self.interval and self.count % self.interval == 0:
begin_index = self.count - self.interval
end_index = self.count
cur_val_mean = np.mean(self.val_loss[begin_index:end_index], axis=0)
trend = abs(cur_val_mean - self.pre_val_mean) / cur_val_mean
self.norm_trend = trend / np.mean(trend)
self.norm_loss = cur_val_mean / np.mean(cur_val_mean)
self.pre_val_mean = cur_val_mean
weights = self.norm_trend * self.norm_loss
norm_weights = weights / np.mean(weights)
repmated_weight = np.tile(norm_weights, [self.batch, 1])
bottom[i].diff[0:self.batch] = sign * repmated_weight * self.diff[0:self.batch] / self.batch