-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathutils.py
115 lines (103 loc) · 3.64 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import torch
import torch.nn.parallel
import torch.optim
from tqdm import tqdm
from models.layers import *
def train(model, device, train_loader, criterion, optimizer, T, dvs):
running_loss = 0
model.train()
total = 0
correct = 0
for i, (images, labels) in enumerate(tqdm(train_loader)):
optimizer.zero_grad()
labels = labels.to(device)
images = images.to(device)
if dvs:
images = images.transpose(0, 1)
if T == 0:
outputs = model(images)
else:
outputs = model(images).mean(0)
loss = criterion(outputs, labels)
running_loss += loss.item()
loss.mean().backward()
optimizer.step()
total += float(labels.size(0))
_, predicted = outputs.cpu().max(1)
correct += float(predicted.eq(labels.cpu()).sum().item())
return running_loss, 100 * correct / total
def train_poisson(model, device, train_loader, criterion, optimizer, T):
running_loss = 0
model.train()
M = len(train_loader)
total = 0
correct = 0
for i, (images, labels) in enumerate((train_loader)):
optimizer.zero_grad()
labels = labels.to(device)
images = images.to(device)
if T > 0:
outputs = model(images).mean(0)
else:
outputs = model(images)
loss = criterion(outputs, labels)
running_loss += loss.item()
loss.mean().backward()
optimizer.step()
total += float(labels.size(0))
_, predicted = outputs.cpu().max(1)
correct += float(predicted.eq(labels.cpu()).sum().item())
return running_loss, 100 * correct / total
def val(model, test_loader, device, T, dvs, atk=None):
correct = 0
total = 0
model.eval()
for batch_idx, (inputs, targets) in enumerate(tqdm(test_loader)):
inputs = inputs.to(device)
if dvs:
inputs = inputs.transpose(0, 1)
if atk is not None:
atk.set_training_mode(model_training=False, batchnorm_training=False, dropout_training=False)
inputs = atk(inputs, targets.to(device))
model.set_simulation_time(T)
with torch.no_grad():
if T > 0:
outputs = model(inputs).mean(0)
else:
outputs = model(inputs)
_, predicted = outputs.cpu().max(1)
total += float(targets.size(0))
correct += float(predicted.eq(targets).sum().item())
final_acc = 100 * correct / total
return final_acc
def val_success_rate(model, test_loader, device, T, dvs, atk=None):
correct = 0
total = 0
tt = 0
model.eval()
for batch_idx, (inputs, targets) in enumerate((test_loader)):
inputs = inputs.to(device)
if dvs:
inputs = inputs.transpose(0, 1)
with torch.no_grad():
if T > 0:
outputs = model(inputs).mean(0)
else:
outputs = model(inputs)
_, predicted = outputs.cpu().max(1)
mask = predicted.eq(targets).float()
if atk is not None:
atk.set_training_mode(model_training=False, batchnorm_training=False, dropout_training=False)
inputs = atk(inputs, targets.to(device))
model.set_simulation_time(T)
with torch.no_grad():
if T > 0:
outputs = model(inputs).mean(0)
else:
outputs = model(inputs)
_, predicted = outputs.cpu().max(1)
predicted = ~(predicted.eq(targets))
total += mask.sum()
correct += (predicted.float()*mask).sum()
final_acc = 100 * correct / total
return final_acc