forked from TheAlgorithms/Python
-
Notifications
You must be signed in to change notification settings - Fork 0
/
back_propagation_neural_network.py
200 lines (163 loc) · 5.88 KB
/
back_propagation_neural_network.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
#!/usr/bin/python
"""
A Framework of Back Propagation Neural Network(BP) model
Easy to use:
* add many layers as you want !!!
* clearly see how the loss decreasing
Easy to expand:
* more activation functions
* more loss functions
* more optimization method
Author: Stephen Lee
Github : https://github.com/RiptideBo
Date: 2017.11.23
"""
import numpy as np
from matplotlib import pyplot as plt
def sigmoid(x):
return 1 / (1 + np.exp(-1 * x))
class DenseLayer:
"""
Layers of BP neural network
"""
def __init__(
self, units, activation=None, learning_rate=None, is_input_layer=False
):
"""
common connected layer of bp network
:param units: numbers of neural units
:param activation: activation function
:param learning_rate: learning rate for paras
:param is_input_layer: whether it is input layer or not
"""
self.units = units
self.weight = None
self.bias = None
self.activation = activation
if learning_rate is None:
learning_rate = 0.3
self.learn_rate = learning_rate
self.is_input_layer = is_input_layer
def initializer(self, back_units):
self.weight = np.asmatrix(np.random.normal(0, 0.5, (self.units, back_units)))
self.bias = np.asmatrix(np.random.normal(0, 0.5, self.units)).T
if self.activation is None:
self.activation = sigmoid
def cal_gradient(self):
# activation function may be sigmoid or linear
if self.activation == sigmoid:
gradient_mat = np.dot(self.output, (1 - self.output).T)
gradient_activation = np.diag(np.diag(gradient_mat))
else:
gradient_activation = 1
return gradient_activation
def forward_propagation(self, xdata):
self.xdata = xdata
if self.is_input_layer:
# input layer
self.wx_plus_b = xdata
self.output = xdata
return xdata
else:
self.wx_plus_b = np.dot(self.weight, self.xdata) - self.bias
self.output = self.activation(self.wx_plus_b)
return self.output
def back_propagation(self, gradient):
gradient_activation = self.cal_gradient() # i * i 维
gradient = np.asmatrix(np.dot(gradient.T, gradient_activation))
self._gradient_weight = np.asmatrix(self.xdata)
self._gradient_bias = -1
self._gradient_x = self.weight
self.gradient_weight = np.dot(gradient.T, self._gradient_weight.T)
self.gradient_bias = gradient * self._gradient_bias
self.gradient = np.dot(gradient, self._gradient_x).T
# upgrade: the Negative gradient direction
self.weight = self.weight - self.learn_rate * self.gradient_weight
self.bias = self.bias - self.learn_rate * self.gradient_bias.T
# updates the weights and bias according to learning rate (0.3 if undefined)
return self.gradient
class BPNN:
"""
Back Propagation Neural Network model
"""
def __init__(self):
self.layers = []
self.train_mse = []
self.fig_loss = plt.figure()
self.ax_loss = self.fig_loss.add_subplot(1, 1, 1)
def add_layer(self, layer):
self.layers.append(layer)
def build(self):
for i, layer in enumerate(self.layers[:]):
if i < 1:
layer.is_input_layer = True
else:
layer.initializer(self.layers[i - 1].units)
def summary(self):
for i, layer in enumerate(self.layers[:]):
print(f"------- layer {i} -------")
print("weight.shape ", np.shape(layer.weight))
print("bias.shape ", np.shape(layer.bias))
def train(self, xdata, ydata, train_round, accuracy):
self.train_round = train_round
self.accuracy = accuracy
self.ax_loss.hlines(self.accuracy, 0, self.train_round * 1.1)
x_shape = np.shape(xdata)
for _ in range(train_round):
all_loss = 0
for row in range(x_shape[0]):
_xdata = np.asmatrix(xdata[row, :]).T
_ydata = np.asmatrix(ydata[row, :]).T
# forward propagation
for layer in self.layers:
_xdata = layer.forward_propagation(_xdata)
loss, gradient = self.cal_loss(_ydata, _xdata)
all_loss = all_loss + loss
# back propagation: the input_layer does not upgrade
for layer in self.layers[:0:-1]:
gradient = layer.back_propagation(gradient)
mse = all_loss / x_shape[0]
self.train_mse.append(mse)
self.plot_loss()
if mse < self.accuracy:
print("----达到精度----")
return mse
return None
def cal_loss(self, ydata, ydata_):
self.loss = np.sum(np.power((ydata - ydata_), 2))
self.loss_gradient = 2 * (ydata_ - ydata)
# vector (shape is the same as _ydata.shape)
return self.loss, self.loss_gradient
def plot_loss(self):
if self.ax_loss.lines:
self.ax_loss.lines.remove(self.ax_loss.lines[0])
self.ax_loss.plot(self.train_mse, "r-")
plt.ion()
plt.xlabel("step")
plt.ylabel("loss")
plt.show()
plt.pause(0.1)
def example():
x = np.random.randn(10, 10)
y = np.asarray(
[
[0.8, 0.4],
[0.4, 0.3],
[0.34, 0.45],
[0.67, 0.32],
[0.88, 0.67],
[0.78, 0.77],
[0.55, 0.66],
[0.55, 0.43],
[0.54, 0.1],
[0.1, 0.5],
]
)
model = BPNN()
for i in (10, 20, 30, 2):
model.add_layer(DenseLayer(i))
model.build()
model.summary()
model.train(xdata=x, ydata=y, train_round=100, accuracy=0.01)
if __name__ == "__main__":
example()