-
Notifications
You must be signed in to change notification settings - Fork 12
/
narcissus_function.py
211 lines (170 loc) · 7.43 KB
/
narcissus_function.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.optim import Optimizer
import torch.backends.cudnn as cudnn
import tqdm
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import TensorDataset, DataLoader,Subset
import torchvision.models as models
import torch.nn.functional as F
from models import *
import os
import copy
import random
import matplotlib.pyplot as plt
import numpy as np
import cv2 as cv
from util import *
random_seed = 0
np.random.seed(random_seed)
random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.set_device(2)
device = 'cuda'
'''
The path for target dataset and public out-of-distribution (POOD) dataset. The setting used
here is CIFAR-10 as the target dataset and Tiny-ImageNet as the POOD dataset. Their directory
structure is as follows:
dataset_path--cifar-10-batches-py
|
|-tiny-imagenet-200
'''
dataset_path = '/home/minzhou/data/'
#The target class label
lab = 2
def narcissus_gen(dataset_path = dataset_path, lab = lab):
#Noise size, default is full image size
noise_size = 32
#Radius of the L-inf ball
l_inf_r = 16/255
#Model for generating surrogate model and trigger
surrogate_model = ResNet18_201().cuda()
generating_model = ResNet18_201().cuda()
#Surrogate model training epochs
surrogate_epochs = 200
#Learning rate for poison-warm-up
generating_lr_warmup = 0.1
warmup_round = 5
#Learning rate for trigger generating
generating_lr_tri = 0.01
gen_round = 1000
#Training batch size
train_batch_size = 350
#The model for adding the noise
patch_mode = 'add'
#The argumention use for surrogate model training stage
transform_surrogate_train = transforms.Compose([
transforms.Resize(32),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
#The argumention use for all training set
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
#The argumention use for all testing set
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
ori_train = torchvision.datasets.CIFAR10(root=dataset_path, train=True, download=False, transform=transform_train)
ori_test = torchvision.datasets.CIFAR10(root=dataset_path, train=False, download=False, transform=transform_test)
outter_trainset = torchvision.datasets.ImageFolder(root=dataset_path + 'tiny-imagenet-200/train/', transform=transform_surrogate_train)
#Outter train dataset
train_label = [get_labels(ori_train)[x] for x in range(len(get_labels(ori_train)))]
test_label = [get_labels(ori_test)[x] for x in range(len(get_labels(ori_test)))]
#Inner train dataset
train_target_list = list(np.where(np.array(train_label)==lab)[0])
train_target = Subset(ori_train,train_target_list)
concoct_train_dataset = concoct_dataset(train_target,outter_trainset)
surrogate_loader = torch.utils.data.DataLoader(concoct_train_dataset, batch_size=train_batch_size, shuffle=True, num_workers=16)
poi_warm_up_loader = torch.utils.data.DataLoader(train_target, batch_size=train_batch_size, shuffle=True, num_workers=16)
trigger_gen_loaders = torch.utils.data.DataLoader(train_target, batch_size=train_batch_size, shuffle=True, num_workers=16)
# Batch_grad
condition = True
noise = torch.zeros((1, 3, noise_size, noise_size), device=device)
surrogate_model = surrogate_model
criterion = torch.nn.CrossEntropyLoss()
# outer_opt = torch.optim.RAdam(params=base_model.parameters(), lr=generating_lr_outer)
surrogate_opt = torch.optim.SGD(params=surrogate_model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
surrogate_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(surrogate_opt, T_max=surrogate_epochs)
#Training the surrogate model
print('Training the surrogate model')
for epoch in range(0, surrogate_epochs):
surrogate_model.train()
loss_list = []
for images, labels in surrogate_loader:
images, labels = images.cuda(), labels.cuda()
surrogate_opt.zero_grad()
outputs = surrogate_model(images)
loss = criterion(outputs, labels)
loss.backward()
loss_list.append(float(loss.data))
surrogate_opt.step()
surrogate_scheduler.step()
ave_loss = np.average(np.array(loss_list))
print('Epoch:%d, Loss: %.03f' % (epoch, ave_loss))
#Save the surrogate model
save_path = './checkpoint/surrogate_pretrain_' + str(surrogate_epochs) +'.pth'
torch.save(surrogate_model.state_dict(),save_path)
#Prepare models and optimizers for poi_warm_up training
poi_warm_up_model = generating_model
poi_warm_up_model.load_state_dict(surrogate_model.state_dict())
poi_warm_up_opt = torch.optim.RAdam(params=poi_warm_up_model.parameters(), lr=generating_lr_warmup)
#Poi_warm_up stage
poi_warm_up_model.train()
for param in poi_warm_up_model.parameters():
param.requires_grad = True
#Training the surrogate model
for epoch in range(0, warmup_round):
poi_warm_up_model.train()
loss_list = []
for images, labels in poi_warm_up_loader:
images, labels = images.cuda(), labels.cuda()
poi_warm_up_model.zero_grad()
poi_warm_up_opt.zero_grad()
outputs = poi_warm_up_model(images)
loss = criterion(outputs, labels)
loss.backward(retain_graph = True)
loss_list.append(float(loss.data))
poi_warm_up_opt.step()
ave_loss = np.average(np.array(loss_list))
print('Epoch:%d, Loss: %e' % (epoch, ave_loss))
#Trigger generating stage
for param in poi_warm_up_model.parameters():
param.requires_grad = False
batch_pert = torch.autograd.Variable(noise.cuda(), requires_grad=True)
batch_opt = torch.optim.RAdam(params=[batch_pert],lr=generating_lr_tri)
for minmin in tqdm.notebook.tqdm(range(gen_round)):
loss_list = []
for images, labels in trigger_gen_loaders:
images, labels = images.cuda(), labels.cuda()
new_images = torch.clone(images)
clamp_batch_pert = torch.clamp(batch_pert,-l_inf_r*2,l_inf_r*2)
new_images = torch.clamp(apply_noise_patch(clamp_batch_pert,new_images.clone(),mode=patch_mode),-1,1)
per_logits = poi_warm_up_model.forward(new_images)
loss = criterion(per_logits, labels)
loss_regu = torch.mean(loss)
batch_opt.zero_grad()
loss_list.append(float(loss_regu.data))
loss_regu.backward(retain_graph = True)
batch_opt.step()
ave_loss = np.average(np.array(loss_list))
ave_grad = np.sum(abs(batch_pert.grad).detach().cpu().numpy())
print('Gradient:',ave_grad,'Loss:', ave_loss)
if ave_grad == 0:
break
noise = torch.clamp(batch_pert,-l_inf_r*2,l_inf_r*2)
best_noise = noise.clone().detach().cpu()
plt.imshow(np.transpose(noise[0].detach().cpu(),(1,2,0)))
plt.show()
print('Noise max val:',noise.max())
return best_noise