forked from peter-u-diehl/stdp-mnist
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Diehl&Cook_MNIST_evaluation.py
135 lines (122 loc) · 5.4 KB
/
Diehl&Cook_MNIST_evaluation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
'''
Created on 15.12.2014
@author: Peter U. Diehl
'''
import brian as b
from brian import *
import numpy as np
import matplotlib
import matplotlib.cm as cmap
import time
import os.path
import scipy
import cPickle as pickle
from struct import unpack
import brian.experimental.realtime_monitor as rltmMon
#------------------------------------------------------------------------------
# functions
#------------------------------------------------------------------------------
def get_labeled_data(picklename, bTrain = True):
"""Read input-vector (image) and target class (label, 0-9) and return
it as list of tuples.
"""
if os.path.isfile('%s.pickle' % picklename):
data = pickle.load(open('%s.pickle' % picklename))
else:
# Open the images with gzip in read binary mode
if bTrain:
images = open(MNIST_data_path + 'train-images.idx3-ubyte','rb')
labels = open(MNIST_data_path + 'train-labels.idx1-ubyte','rb')
else:
images = open(MNIST_data_path + 't10k-images.idx3-ubyte','rb')
labels = open(MNIST_data_path + 't10k-labels.idx1-ubyte','rb')
# Get metadata for images
images.read(4) # skip the magic_number
number_of_images = unpack('>I', images.read(4))[0]
rows = unpack('>I', images.read(4))[0]
cols = unpack('>I', images.read(4))[0]
# Get metadata for labels
labels.read(4) # skip the magic_number
N = unpack('>I', labels.read(4))[0]
if number_of_images != N:
raise Exception('number of labels did not match the number of images')
# Get the data
x = np.zeros((N, rows, cols), dtype=np.uint8) # Initialize numpy array
y = np.zeros((N, 1), dtype=np.uint8) # Initialize numpy array
for i in xrange(N):
if i % 1000 == 0:
print("i: %i" % i)
x[i] = [[unpack('>B', images.read(1))[0] for unused_col in xrange(cols)] for unused_row in xrange(rows) ]
y[i] = unpack('>B', labels.read(1))[0]
data = {'x': x, 'y': y, 'rows': rows, 'cols': cols}
pickle.dump(data, open("%s.pickle" % picklename, "wb"))
return data
def get_recognized_number_ranking(assignments, spike_rates):
summed_rates = [0] * 10
num_assignments = [0] * 10
for i in xrange(10):
num_assignments[i] = len(np.where(assignments == i)[0])
if num_assignments[i] > 0:
summed_rates[i] = np.sum(spike_rates[assignments == i]) / num_assignments[i]
return np.argsort(summed_rates)[::-1]
def get_new_assignments(result_monitor, input_numbers):
print result_monitor.shape
assignments = np.ones(n_e) * -1 # initialize them as not assigned
input_nums = np.asarray(input_numbers)
maximum_rate = [0] * n_e
for j in xrange(10):
num_inputs = len(np.where(input_nums == j)[0])
if num_inputs > 0:
rate = np.sum(result_monitor[input_nums == j], axis = 0) / num_inputs
for i in xrange(n_e):
if rate[i] > maximum_rate[i]:
maximum_rate[i] = rate[i]
assignments[i] = j
return assignments
MNIST_data_path = './'
data_path = './activity/'
training_ending = '10000'
testing_ending = '10000'
start_time_training = 0
end_time_training = int(training_ending)
start_time_testing = 0
end_time_testing = int(testing_ending)
n_e = 400
n_input = 784
ending = ''
print 'load MNIST'
training = get_labeled_data(MNIST_data_path + 'training')
testing = get_labeled_data(MNIST_data_path + 'testing', bTrain = False)
print 'load results'
training_result_monitor = np.load(data_path + 'resultPopVecs' + training_ending + ending + '.npy')
training_input_numbers = np.load(data_path + 'inputNumbers' + training_ending + '.npy')
testing_result_monitor = np.load(data_path + 'resultPopVecs' + testing_ending + '.npy')
testing_input_numbers = np.load(data_path + 'inputNumbers' + testing_ending + '.npy')
print training_result_monitor.shape
print 'get assignments'
test_results = np.zeros((10, end_time_testing-start_time_testing))
test_results_max = np.zeros((10, end_time_testing-start_time_testing))
test_results_top = np.zeros((10, end_time_testing-start_time_testing))
test_results_fixed = np.zeros((10, end_time_testing-start_time_testing))
assignments = get_new_assignments(training_result_monitor[start_time_training:end_time_training],
training_input_numbers[start_time_training:end_time_training])
print assignments
counter = 0
num_tests = end_time_testing / 10000
sum_accurracy = [0] * num_tests
while (counter < num_tests):
end_time = min(end_time_testing, 10000*(counter+1))
start_time = 10000*counter
test_results = np.zeros((10, end_time-start_time))
print 'calculate accuracy for sum'
for i in xrange(end_time - start_time):
test_results[:,i] = get_recognized_number_ranking(assignments,
testing_result_monitor[i+start_time,:])
difference = test_results[0,:] - testing_input_numbers[start_time:end_time]
correct = len(np.where(difference == 0)[0])
incorrect = np.where(difference != 0)[0]
sum_accurracy[counter] = correct/float(end_time-start_time) * 100
print 'Sum response - accuracy: ', sum_accurracy[counter], ' num incorrect: ', len(incorrect)
counter += 1
print 'Sum response - accuracy --> mean: ', np.mean(sum_accurracy), '--> standard deviation: ', np.std(sum_accurracy)
b.show()