-
Notifications
You must be signed in to change notification settings - Fork 21
/
train_notmaskV3.py
433 lines (404 loc) · 18.4 KB
/
train_notmaskV3.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
# 路径置顶
import sys
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
sys.path.append(os.getcwd())
# 导入包
from torch.nn.modules.distance import PairwiseDistance
import torch.nn as nn
from tqdm import tqdm
import numpy as np
import torch
import time
# 导入文件
# from Models.Model_for_facenet import model, optimizer_model, start_epoch, flag_train_multi_gpu
from Data_loader.Data_loader_facenet_notmask import train_dataloader, test_dataloader
from Data_loader.Data_loader_facenet_mask import LFWestMask_dataloader
from Losses.Triplet_loss import TripletLoss
from validate_on_LFW import evaluate_lfw
from config_notmask import config
from Models.CBAM_Face_attention_Resnet_notmaskV3 import resnet18_cbam, resnet50_cbam, resnet101_cbam, resnet34_cbam, \
resnet152_cbam
pwd = os.path.abspath('./')
print("Using {} model architecture.".format(config['model']))
start_epoch = 0
if config['model'] == 18:
model = resnet18_cbam(pretrained=True, showlayer= False,num_classes=128)
elif config['model'] == 34:
model = resnet34_cbam(pretrained=True, showlayer= False, num_classes=128)
elif config['model'] == 50:
model = resnet50_cbam(pretrained=True, showlayer= False, num_classes=128)
elif config['model'] == 101:
model = resnet101_cbam(pretrained=True, showlayer= False, num_classes=128)
elif config['model'] == 152:
model = resnet152_cbam(pretrained=True, showlayer= False, num_classes=128)
model_path = os.path.join(pwd, 'Model_training_checkpoints')
x = [int(i.split('_')[4]) for i in os.listdir(model_path) if 'V3' in i]
x.sort()
for i in os.listdir(model_path):
if (len(x)!=0) and ('epoch_'+str(x[-1]) in i) and ('V3' in i):
model_pathi = os.path.join(model_path, i)
break
model_pathi = os.path.join(model_path, 'model_34_triplet_epoch_97_rocNotMasked0.951_rocMasked0.766notmaskV3.pt')
if os.path.exists(model_pathi) and ('V3' in model_pathi):
model_state = torch.load(model_pathi)
model.load_state_dict(model_state['model_state_dict'])
start_epoch = model_state['epoch']
print('loaded %s' % model_pathi)
else:
print('不存在预训练模型!')
if ('epoch_97' in model_pathi) and (start_epoch==216):
start_epoch = 97
flag_train_gpu = torch.cuda.is_available()
flag_train_multi_gpu = False
if flag_train_gpu and torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model.cuda()
flag_train_multi_gpu = True
print('Using multi-gpu training.')
elif flag_train_gpu and torch.cuda.device_count() == 1:
model.cuda()
print('Using single-gpu training.')
def adjust_learning_rate(optimizer, epoch):
if epoch<96:
lr = 0.125
elif (epoch>=96) and (epoch<160):
lr = 0.0625
elif (epoch >= 160) and (epoch < 210):
lr = 0.0155
elif (epoch >= 210) and (epoch < 250):
lr = 0.003
else:
lr = 0.0001
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def create_optimizer(model, new_lr):
# setup optimizer
if config['optimizer'] == "sgd":
optimizer_model = torch.optim.SGD(model.parameters(), lr = new_lr,
momentum=0.9, dampening=0.9,
weight_decay=0)
elif config['optimizer'] == "adagrad":
optimizer_model = torch.optim.Adagrad(model.parameters(), lr = new_lr,
lr_decay=1e-4,
weight_decay=0)
elif config['optimizer'] == "rmsprop":
optimizer_model = torch.optim.RMSprop(model.parameters(), lr = new_lr)
elif config['optimizer'] == "adam":
optimizer_model = torch.optim.Adam(model.parameters(), lr = new_lr,
weight_decay=0)
return optimizer_model
# 随机种子
seed = 0
optimizer_model = create_optimizer(model, 0.125)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
# 打卡时间、epoch
total_time_start = time.time()
start_epoch = start_epoch
end_epoch = start_epoch + config['epochs']
# 导入l2计算的
l2_distance = PairwiseDistance(2).cuda()
# 为了打日志先预制个最佳auc和最佳acc在前头
best_roc_auc = -1
best_accuracy = -1
print('Countdown 3 seconds')
time.sleep(1)
print('Countdown 2 seconds')
time.sleep(1)
print('Countdown 1 seconds')
time.sleep(1)
# epoch大循环
for epoch in range(start_epoch, end_epoch):
print("\ntraining on TrainDataset! ...")
epoch_time_start = time.time()
triplet_loss_sum = 0
attention_loss_sum = 0
num_hard = 0
model.train() # 训练模式
# step小循环
progress_bar = enumerate(tqdm(train_dataloader))
for batch_idx, (batch_sample) in progress_bar:
# for batch_idx, (batch_sample) in enumerate(train_dataloader):
# length = len(train_dataloader)
# fl=open('/home/Mask-face-recognitionV1/output.txt', 'w')
# for batch_idx, (batch_sample) in enumerate(train_dataloader):
# print(batch_idx, end=' ')
# fl.write(str(batch_idx)+' '+str(round((time.time()-epoch_time_start)*length/((batch_idx+1)*60), 2))+'; ')
# 获取本批次的数据
# 取出三张人脸图(batch*图)
anc_img = batch_sample['anc_img'].cuda()
pos_img = batch_sample['pos_img'].cuda()
neg_img = batch_sample['neg_img'].cuda()
# 取出三张mask图(batch*图)
mask_anc = batch_sample['mask_anc'].cuda()
mask_pos = batch_sample['mask_pos'].cuda()
mask_neg = batch_sample['mask_neg'].cuda()
# 模型运算
# 前向传播过程-拿模型分别跑三张图,生成embedding和loss(在训练阶段的输入是两张图,输出带loss,而验证阶段输入一张图,输出只有embedding)
anc_embedding, anc_attention_loss = model((anc_img, mask_anc))
pos_embedding, pos_attention_loss = model((pos_img, mask_pos))
neg_embedding, neg_attention_loss = model((neg_img, mask_neg))
anc_embedding = torch.div(anc_embedding, torch.norm(anc_embedding)) * 50
pos_embedding = torch.div(pos_embedding, torch.norm(pos_embedding)) * 50
neg_embedding = torch.div(neg_embedding, torch.norm(neg_embedding)) * 50
# 寻找困难样本
# 计算embedding的L2
pos_dist = l2_distance.forward(anc_embedding, pos_embedding)
neg_dist = l2_distance.forward(anc_embedding, neg_embedding)
# 找到满足困难样本标准的样本
all = (neg_dist - pos_dist < config['margin']).cpu().numpy().flatten()
hard_triplets = np.where(all == 1)
if len(hard_triplets[0]) == 0:
continue
# 选定困难样本——困难embedding
anc_hard_embedding = anc_embedding[hard_triplets].cuda()
pos_hard_embedding = pos_embedding[hard_triplets].cuda()
neg_hard_embedding = neg_embedding[hard_triplets].cuda()
# 选定困难样本——困难样本对应的attention loss
hard_anc_attention_loss = anc_attention_loss[hard_triplets]
hard_pos_attention_loss = pos_attention_loss[hard_triplets]
hard_neg_attention_loss = neg_attention_loss[hard_triplets]
# 损失计算
# 计算这个批次困难样本的三元损失
triplet_loss = TripletLoss(margin=config['margin']).forward(
anchor=anc_hard_embedding,
positive=pos_hard_embedding,
negative=neg_hard_embedding
).cuda()
# triplet_loss = TripletLoss(margin=config['margin']).forward(
# anchor=anc_embedding,
# positive=pos_embedding,
# negative=neg_embedding
# ).cuda()
# 计算这个批次困难样本的attention loss(这个loss实际上在forward过程里已经计算了,这里就是整合一下求个mean)
hard_attention_loss = torch.cat([hard_anc_attention_loss, hard_pos_attention_loss, hard_neg_attention_loss])
# hard_attention_loss = torch.cat([anc_attention_loss, pos_attention_loss, neg_attention_loss])
hard_attention_loss = torch.mean(hard_attention_loss).cuda()
hard_attention_loss = hard_attention_loss.type(torch.FloatTensor)
# 计算总顺势
LOSS = triplet_loss + hard_attention_loss
# LOSS = triplet_loss
# 反向传播过程
optimizer_model.zero_grad()
LOSS.backward()
optimizer_model.step()
# update the optimizer learning rate
adjust_learning_rate(optimizer_model, epoch)
# 记录log相关信息
# 计算本个批次内的困难样本数量
num_hard += len(anc_hard_embedding)
# 计算这个epoch内的总三元损失和计算损失所用的困难样本个数
triplet_loss_sum += triplet_loss.item()
attention_loss_sum += hard_attention_loss.item()
# if batch_idx>10:
# break
# if batch_idx==9:
# tim = time.time() - epoch_time_start
# print("需要的时间是:",round((tim*length)/600,2),"分钟")
# fl.close()
# 计算这个epoch里的平均损失
avg_triplet_loss = 0 if (num_hard == 0) else triplet_loss_sum / num_hard
avg_attention_loss = 0 if (num_hard == 0) else attention_loss_sum / num_hard
avg_loss = avg_triplet_loss + avg_attention_loss
epoch_time_end = time.time()
# 出测试集准确度
print("Validating on TestDataset! ...")
model.eval() # 验证模式
with torch.no_grad(): # 不传梯度了
distances, labels = [], []
progress_bar = enumerate(tqdm(test_dataloader))
for batch_index, (data_a, data_b, label) in progress_bar:
# data_a, data_b, label这仨是一批的矩阵
data_a = data_a.cuda()
data_b = data_b.cuda()
label = label.cuda()
output_a, output_b = model(data_a), model(data_b)
output_a = torch.div(output_a, torch.norm(output_a))
output_b = torch.div(output_b, torch.norm(output_b))
distance = l2_distance.forward(output_a, output_b)
# 列表里套矩阵
labels.append(label.cpu().detach().numpy())
distances.append(distance.cpu().detach().numpy())
# 展平
labels = np.array([sublabel for label in labels for sublabel in label])
distances = np.array([subdist for distance in distances for subdist in distance])
true_positive_rate, false_positive_rate, precision, recall, accuracy, roc_auc, best_distances, \
tar, far = evaluate_lfw(
distances=distances,
labels=labels,
epoch = 'epoch_'+str(epoch),
tag = 'NOTMaskedLFW_auc',
version = 'V3',
pltshow=True
)
print("Validating on LFWMASKTestDataset! ...")
with torch.no_grad(): # 不传梯度了
distances, labels = [], []
progress_bar = enumerate(tqdm(LFWestMask_dataloader))
for batch_index, (data_a, data_b, label) in progress_bar:
# data_a, data_b, label这仨是一批的矩阵
data_a = data_a.cuda()
data_b = data_b.cuda()
label = label.cuda()
output_a, output_b = model(data_a), model(data_b)
output_a = torch.div(output_a, torch.norm(output_a))
output_b = torch.div(output_b, torch.norm(output_b))
distance = l2_distance.forward(output_a, output_b)
# 列表里套矩阵
labels.append(label.cpu().detach().numpy())
distances.append(distance.cpu().detach().numpy())
# 展平
labels = np.array([sublabel for label in labels for sublabel in label])
distances = np.array([subdist for distance in distances for subdist in distance])
true_positive_rate_mask, false_positive_rate_mask, precision_mask, recall_mask, \
accuracy_mask, roc_auc_mask, best_distances_mask, \
tar_mask, far_mask = evaluate_lfw(
distances=distances,
labels=labels,
epoch = 'epoch_'+str(epoch),
tag = 'MaskedLFW_auc',
version = 'V3',
pltshow=True
)
# 打印并保存日志
# 从之前的文件里读出来最好的roc和acc,并进行更新
if os.path.exists('logs/lfw_{}_log_tripletnotmaskV3.txt'.format(config['model'])):
with open('logs/lfw_{}_log_tripletnotmaskV3.txt'.format(config['model']), 'r') as f:
lines = f.readlines()
my_line = lines[-3]
my_line = my_line.split('\t')
best_roc_auc = float(my_line[3].split(':')[1])
best_accuracy = float(my_line[5].split(':')[1])
# 确定什么时候保存权重:最后一个epoch就保存,AUC出现新高就保存
save = True
if config['save_last_model'] and epoch == end_epoch - 1:
save = True
if roc_auc > best_roc_auc:
best_roc_auc = roc_auc
save = True
if np.mean(accuracy) > best_accuracy:
best_accuracy = np.mean(accuracy)
if epoch % 3 == 0:
save = True
print('save: ', save)
# 打印日志内容
print('Epoch {}:\n \
train_log:\tLOSS: {:.3f}\ttri_loss: {:.3f}\tatt_loss: {:.3f}\thard_sample: {}\ttrain_time: {}\n \
test_log:\tAUC: {:.3f}\tACC: {:.3f}+-{:.3f}\trecall: {:.3f}+-{:.3f}\tPrecision {:.3f}+-{:.3f}\t'.format(
epoch + 1,
avg_loss,
avg_triplet_loss,
avg_attention_loss,
num_hard,
(epoch_time_end - epoch_time_start) / 3600,
roc_auc,
np.mean(accuracy),
np.std(accuracy),
np.mean(recall),
np.std(recall),
np.mean(precision),
np.std(precision),
)
)
# 打印戴口罩日志内容
print('Epoch {}:\n \
train_log:\tLOSS: {:.3f}\ttri_loss: {:.3f}\tatt_loss: {:.3f}\thard_sample: {}\ttrain_time: {}\n \
MASKED_LFW_test_log:\tAUC: {:.3f}\tACC: {:.3f}+-{:.3f}\trecall: {:.3f}+-{:.3f}\tPrecision {:.3f}+-{:.3f}\t'.format(
epoch + 1,
avg_loss,
avg_triplet_loss,
avg_attention_loss,
num_hard,
(epoch_time_end - epoch_time_start) / 3600,
roc_auc_mask,
np.mean(accuracy_mask),
np.std(accuracy_mask),
np.mean(recall_mask),
np.std(recall_mask),
np.mean(precision_mask),
np.std(precision_mask),
)
)
# 保存日志文件
with open('logs/lfw_{}_log_tripletnotmaskV3.txt'.format(config['model']), 'a') as f:
val_list = [
'epoch: ' + str(epoch + 1) + '\t',
'train:\t',
'LOSS: ' + str('%.3f' % avg_loss) + '\t',
'tri_loss: ' + str('%.3f' % avg_triplet_loss) + '\t',
'att_loss: ' + str('%.3f' % avg_attention_loss) + '\t',
'hard_sample: ' + str(num_hard) + '\t',
'train_time: ' + str('%.3f' % ((epoch_time_end - epoch_time_start) / 3600))
]
log = ''.join(str(value) for value in val_list)
f.writelines(log + '\n')
val_list = [
'epoch: ' + str(epoch + 1) + '\t',
'test:\t',
'auc_masked: ' + str('%.3f' % roc_auc_mask) + '\t',
'best_auc_MD: ' + str('%.3f' % best_roc_auc) + '\t',
'acc_MD: ' + str('%.3f' % np.mean(accuracy_mask)) + '+-' + str('%.3f' % np.std(accuracy_mask)) + '\t',
'best_acc_MD: ' + str('%.3f' % best_accuracy) + '\t',
'recall_MD: ' + str('%.3f' % np.mean(recall_mask)) + '+-' + str('%.3f' % np.std(recall_mask)) + '\t',
'precision_MD: ' + str('%.3f' % np.mean(precision_mask)) + '+-' + str(
'%.3f' % np.std(precision_mask)) + '\t',
'best_distances_MD: ' + str('%.3f' % np.mean(best_distances_mask)) + '+-' + str(
'%.3f' % np.std(best_distances_mask)) + '\t',
'tar_m: ' + str('%.3f' % np.mean(tar_mask)) + '\t',
]
log = ''.join(str(value) for value in val_list)
f.writelines(log + '\n')
val_list = [
'epoch: ' + str(epoch + 1) + '\t',
'test:\t',
'auc: ' + str('%.3f' % roc_auc) + '\t',
'best_auc: ' + str('%.3f' % best_roc_auc) + '\t',
'acc: ' + str('%.3f' % np.mean(accuracy)) + '+-' + str('%.3f' % np.std(accuracy)) + '\t',
'best_acc: ' + str('%.3f' % best_accuracy) + '\t',
'recall: ' + str('%.3f' % np.mean(recall)) + '+-' + str('%.3f' % np.std(recall)) + '\t',
'precision: ' + str('%.3f' % np.mean(precision)) + '+-' + str('%.3f' % np.std(precision)) + '\t',
'best_distances: ' + str('%.3f' % np.mean(best_distances)) + '+-' + str(
'%.3f' % np.std(best_distances)) + '\t',
'tar_m: ' + str('%.3f' % np.mean(tar)) + '\t',
]
log = ''.join(str(value) for value in val_list)
f.writelines(log + '\n')
val_list = [
'epoch: ' + str(epoch + 1) + '\t',
'config:\t',
'LR: ' + str(config['Learning_rate']) + '\t',
'optimizer: ' + str(config['optimizer']) + '\t',
'embedding_dim: ' + str(config['embedding_dim']) + '\t',
'pretrained: ' + str(config['pretrained']) + '\t',
'image_size: ' + str(config['image_size'])
]
log = ''.join(str(value) for value in val_list)
f.writelines(log + '\n' + '\n')
# 保存模型权重
if save:
state = {
'epoch': epoch + 1,
'embedding_dimension': config['embedding_dim'],
'batch_size_training': config['train_batch_size'],
'model_state_dict': model.state_dict(),
'model_architecture': config['model'],
'optimizer_model_state_dict': optimizer_model.state_dict()
}
#
if flag_train_multi_gpu:
state['model_state_dict'] = model.module.state_dict()
# For storing best euclidean distance threshold during LFW validation
# if flag_validate_lfw:
# state['best_distance_threshold'] = np.mean(best_distances)
#
torch.save(state, 'Model_training_checkpoints/model_{}_triplet_epoch_{}_rocNotMasked{:.3f}_rocMasked{:.3f}notmaskV3.pt'.format(config['model'],
epoch + 1,
roc_auc, roc_auc_mask))
# Training loop end
total_time_end = time.time()
total_time_elapsed = total_time_end - total_time_start
print("\nTraining finished: total time elapsed: {:.2f} hours.".format(total_time_elapsed / 3600))