-
Notifications
You must be signed in to change notification settings - Fork 17
/
fid_score.py
323 lines (279 loc) · 13.1 KB
/
fid_score.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
#!/usr/bin/env python2
"""Calculates the Frechet Inception Distance (FID) to evalulate GANs
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone program, it compares the distribution of
images that are stored as PNG/JPEG at a specified location with a
distribution given by summary statistics (in pickle format).
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectively.
##################################################################
#This code adapted from https://github.com/mseitzer/pytorch-fid #
##################################################################
See --help to see further details.
Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead
of Tensorflow
Copyright 2018 Institute of Bioinformatics, JKU Linz
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import torch
from inception_score import InceptionV3
from torch.nn.functional import adaptive_avg_pool2d
try:
from tqdm import tqdm
except ImportError:
# If not tqdm is not available, provide a mock version of it
def tqdm(x):
return x
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('path', type=str, nargs=2,
help=('Path to the generated images or '
'to .npz statistic files'))
parser.add_argument('--batch-size', type=int, default=50,
help='Batch size to use')
parser.add_argument('--dims', type=int, default=2048,
choices=list(InceptionV3.BLOCK_INDEX_BY_DIM),
help=('Dimensionality of Inception features to use. '
'By default, uses pool3 features'))
parser.add_argument('-c', '--gpu', default='1', type=str,
help='GPU to use (leave blank for CPU only)')
def _get_no_grad_ctx_mgr(require_grad):
"""Returns a the `torch.no_grad` context manager for PyTorch version >=
0.4, or a no-op context manager otherwise.
"""
if not require_grad and float(torch.__version__[0:3]) >= 0.4:
return torch.no_grad()
return contextlib.suppress()
# Pytorch implementation of matrix sqrt, from Tsung-Yu Lin, and Subhransu Maji
# https://github.com/msubhransu/matrix-sqrt
def sqrt_newton_schulz(A, numIters, dtype=None):
if dtype is None:
dtype = A.type()
batchSize = A.shape[0]
dim = A.shape[1]
normA = A.mul(A).sum(dim=1).sum(dim=1).sqrt()
Y = A.div(normA.view(batchSize, 1, 1).expand_as(A)).to("cuda:0")
I = torch.eye(dim, dim).view(1, dim, dim).repeat(batchSize, 1, 1).type(dtype).to("cuda:0")
Z = torch.eye(dim, dim).view(1, dim, dim).repeat(batchSize, 1, 1).type(dtype).to("cuda:0")
for i in range(numIters):
T = 0.5 * (3.0 * I - Z.bmm(Y))
Y = Y.bmm(T)
Z = T.bmm(Z)
sA = Y * torch.sqrt(normA).view(batchSize, 1, 1).expand_as(A)
return sA
# A pytorch implementation of cov, from Modar M. Alfadly
# https://discuss.pytorch.org/t/covariance-and-gradient-support/16217/2
def torch_cov(m, rowvar=False):
'''Estimate a covariance matrix given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
Args:
m: A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
rowvar: If `rowvar` is True, then each row represents a
variable, with observations in the columns. Otherwise, the
relationship is transposed: each column represents a variable,
while the rows contain observations.
Returns:
The covariance matrix of the variables.
'''
if m.dim() > 2:
raise ValueError('m has more than 2 dimensions')
if m.dim() < 2:
m = m.view(1, -1)
if not rowvar and m.size(0) != 1:
m = m.t()
# m = m.type(torch.double) # uncomment this line if desired
fact = 1.0 / (m.size(1) - 1)
m -= torch.mean(m, dim=1, keepdim=True)
mt = m.t() # if complex: mt = m.t().conj()
return fact * m.matmul(mt).squeeze()
def get_activations(gen_imgs, model, batch_size=50, dims=2048,
cuda=False, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
model.eval()
if gen_imgs.shape[0] % batch_size != 0:
print(('Warning: number of images is not a multiple of the '
'batch size. Some samples are going to be ignored.'))
if batch_size > gen_imgs.shape[0]:
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = gen_imgs.shape[0]
n_batches = gen_imgs.shape[0] // batch_size
# normalize
gen_imgs = (gen_imgs + 1.0) / 2.0
pred_arr = []
for i in tqdm(range(n_batches)):
if verbose:
print('\rPropagating batch %d/%d' % (i + 1, n_batches),
end='', flush=True)
start = i * batch_size
end = start + batch_size
images = gen_imgs[start: end]
model.to("cuda:0")
pred = model(images.to("cuda:0"))[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.shape[2] != 1 or pred.shape[3] != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr += [pred.view(batch_size, -1)]
if verbose:
print('done')
return torch.cat(pred_arr, dim=0)
def torch_calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Pytorch implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representive data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representive data set.
Returns:
-- : The Frechet Distance.
"""
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Run 50 itrs of newton-schulz to get the matrix sqrt of sigma1 dot sigma2
covmean = sqrt_newton_schulz(sigma1.mm(sigma2).unsqueeze(0), 50).squeeze()
out = (diff.dot(diff) + torch.trace(sigma1) + torch.trace(sigma2)
- 2 * torch.trace(covmean))
return out
def calculate_activation_statistics(gen_imgs, model, batch_size=50,
dims=2048, cuda=False, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- gen_imgs : gen_imgs, tensor
-- model : Instance of inception model
-- batch_size : The images numpy array is split into batches with
batch size batch_size. A reasonable batch size
depends on the hardware.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the
number of calculated batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the inception model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the inception model.
"""
act = get_activations(gen_imgs, model, batch_size, dims, cuda, verbose)
mu = torch.mean(act, dim=0)
sigma = torch_cov(act, rowvar=False)
return mu, sigma
def _compute_statistics_of_path(path, model, batch_size, dims, cuda):
if isinstance(path, str):
assert path.endswith('.npz')
f = np.load(path)
if 'mean' in f:
m, s = f['mean'][:], f['cov'][:]
else:
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
# a tensor
gen_imgs = path
m, s = calculate_activation_statistics(gen_imgs, model, batch_size,
dims, cuda)
return m, s
def calculate_fid_given_paths_torch(gen_imgs, path, require_grad=False, batch_size=50, cuda=True, dims=2048):
"""
Calculates the FID of two paths
:param gen_imgs: The value range of gen_imgs should be (-1, 1). Just the output of tanh.
:param path: fid file path. *.npz.
:param batch_size:
:param cuda:
:param dims:
:return:
"""
if not os.path.exists(path):
raise RuntimeError('Invalid path: %s' % path)
assert gen_imgs.shape[0] >= dims, f'gen_imgs size: {gen_imgs.shape}' # or will lead to nan
with _get_no_grad_ctx_mgr(require_grad=require_grad):
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx])
if cuda:
model.cuda()
m1, s1 = _compute_statistics_of_path(gen_imgs, model, batch_size,
dims, cuda)
# print(f'generated stat: {m1}, {s1}')
m2, s2 = _compute_statistics_of_path(path, model, batch_size,
dims, cuda)
# print(f'GT stat: {m2}, {s2}')
fid_value = torch_calculate_frechet_distance(m1.to("cuda:0"), s1.to("cuda:0"), torch.tensor(m2).float().cuda().to("cuda:0"),
torch.tensor(s2).float().cuda().to("cuda:0"))
return fid_value
def get_fid(fid_stat, epoch, generator, num_img, val_batch_size, latent_dim, writer_dict=None, cls_idx=None):
generator.eval()
with torch.no_grad():
# eval mode
generator = generator.eval()
eval_iter = num_img // val_batch_size
img_list = []
for _ in tqdm(range(eval_iter), desc='sample images'):
noise = torch.cuda.FloatTensor(np.random.normal(0, 1, (val_batch_size, latent_dim)))
# Generate a batch of images
#if args.n_classes > 0:
# if cls_idx is not None:
# label = torch.ones(noise.shape[0]) * cls_idx
# label = label.type(torch.cuda.LongTensor)
# else:
# label = torch.randint(low=0, high=args.n_classes, size=(noise.shape[0],), device='cuda')
# gen_imgs = generator(noise)
#else:
gen_imgs = generator(noise)
#if isinstance(gen_imgs, tuple):
# gen_imgs = gen_imgs[0]
img_list += [gen_imgs]
img_list = torch.cat(img_list, 0)
fid_score = calculate_fid_given_paths_torch(img_list, fid_stat)
if writer_dict:
writer = writer_dict['writer']
global_steps = writer_dict['valid_global_steps']
writer.add_scalar('FID_score', fid_score, global_steps)
writer_dict['valid_global_steps'] = global_steps + 1
return fid_score