-
Notifications
You must be signed in to change notification settings - Fork 0
/
gaussianlosses.py
69 lines (53 loc) · 2.19 KB
/
gaussianlosses.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import tensorflow as tf
import numpy as np
def MSE(y, y_, reduce=True):
ax = list(range(1, len(y.shape)))
mse = tf.reduce_mean((y-y_)**2, axis=ax)
return tf.reduce_mean(mse) if reduce else mse
def RMSE(y, y_):
rmse = tf.sqrt(tf.reduce_mean((y-y_)**2))
return rmse
def Gaussian_NLL(y, mu, sigma, reduce=True):
ax = list(range(1, len(y.shape)))
logprob = -tf.math.log(sigma) - 0.5*tf.math.log(2*np.pi) - 0.5*((y-mu)/sigma)**2
loss = tf.reduce_mean(-logprob, axis=ax)
return tf.reduce_mean(loss) if reduce else loss
def Gaussian_NLL_logvar(y, mu, logvar, reduce=True):
ax = list(range(1, len(y.shape)))
log_liklihood = 0.5 * (
-tf.exp(-logvar)*(mu-y)**2 - tf.math.log(2*tf.constant(np.pi, dtype=logvar.dtype)) - logvar
)
loss = tf.reduce_mean(-log_liklihood, axis=ax)
return tf.reduce_mean(loss) if reduce else loss
def NIG_NLL(y, gamma, v, alpha, beta, reduce=True):
twoBlambda = 2*beta*(1+v)
nll = 0.5*tf.math.log(np.pi/v) \
- alpha*tf.math.log(twoBlambda) \
+ (alpha+0.5) * tf.math.log(v*(y-gamma)**2 + twoBlambda) \
+ tf.math.lgamma(alpha) \
- tf.math.lgamma(alpha+0.5)
return tf.reduce_mean(nll) if reduce else nll
def KL_NIG(mu1, v1, a1, b1, mu2, v2, a2, b2):
KL = 0.5*(a1-1)/b1 * (v2*tf.square(mu2-mu1)) \
+ 0.5*v2/v1 \
- 0.5*tf.math.log(tf.abs(v2)/tf.abs(v1)) \
- 0.5 + a2*tf.math.log(b1/b2) \
- (tf.math.lgamma(a1) - tf.math.lgamma(a2)) \
+ (a1 - a2)*tf.math.digamma(a1) \
- (b1 - b2)*a1/b1
return KL
def NIG_Reg(y, gamma, v, alpha, beta, omega=0.01, reduce=True, kl=False):
# error = tf.stop_gradient(tf.abs(y-gamma))
error = tf.abs(y-gamma)
if kl:
kl = KL_NIG(gamma, v, alpha, beta, gamma, omega, 1+omega, beta)
reg = error*kl
else:
evi = 2*v+(alpha)
reg = error*evi
return tf.reduce_mean(reg) if reduce else reg
def EvidentialRegression(y_true, evidential_output, coeff=1e-2):
gamma, v, alpha, beta = tf.split(evidential_output, 4, axis=-1)
loss_nll = NIG_NLL(y_true, gamma, v, alpha, beta)
loss_reg = NIG_Reg(y_true, gamma, v, alpha, beta)
return loss_nll + coeff * loss_reg