-
Notifications
You must be signed in to change notification settings - Fork 2
/
adv_or_kernel.jl
245 lines (203 loc) · 5.68 KB
/
adv_or_kernel.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
## Kernelized Adversarial Ordinal Regression
include("types.jl")
include("kernel.jl")
function train_adv_or_kernel(X::Matrix, y::Vector, C::Real=1.0,
kernel::Symbol=:linear, kernel_params::Vector=[], feature::Symbol=:mc;
perturb::Real=0.0, tol::Real=1e-6, psdtol::Real=1e-6,
log::Real=0, n_thread::Int=0, verbose::Bool=true)
n = length(y)
# add one
X1 = [ones(n) X]' # transpose
m = size(X1, 1)
# number of class
nc = maximum(y)
# dual parameter
nd = n * nc * 2 # number of dual parameter
alpha = zeros(nd)
if verbose
println("Start >> prepare variables")
tic()
end
# kernel
kernel_func = linear_kernel
if kernel == :gaussian
kernel_func = gaussian_kernel
elseif kernel == :polynomial
kernel_func = polynomial_kernel
end
# precompute kernels
K = zeros(n,n)
for i=1:n, j=i:n
K[i,j] = kernel_func(X1[:,i], X1[:,j], kernel_params...)
if (i != j); K[j,i] = K[i,j] end
end
# params for QP
Q = zeros(nd,nd)
nu = zeros(nd)
A = spzeros(2n,nd) # sparse matrix
b = ones(2n) * (C/2)
# params layout
# [i1j1a1, i2j1a1, ..., inj1a1, i1j2a1, ..., injka1, i1j1a1 ..., injka2]
ly = Vector{Tuple{Int64,Int64,Int64}}(nd)
l = 1
for k=1:2, j=1:nc, i=1:n
ly[l] = (i,j,k)
l += 1
end
# prepare nu (linear coefficient)
l = 1
for k=1:2, j=1:nc
s = (k == 1) ? 1 : -1
nu[l:l+n-1] = s * j * ones(n)
l += n
end
if feature == :th
# Quadratic coefficient
for i=1:nd, j=i:nd
ii = ly[i][1]; c_ii = ly[i][2]
jj = ly[j][1]; c_jj = ly[j][2]
Q[i,j] = (c_ii - y[ii]) * (c_jj - y[jj]) * K[ii,jj]
th_ii = zeros(nc-1)
if c_ii < y[ii]
th_ii[c_ii:y[ii]-1] = 1.
elseif c_ii > y[ii]
th_ii[y[ii]:c_ii-1] = -1.
end
th_jj = zeros(nc-1)
if c_jj < y[jj]
th_jj[c_jj:y[jj]-1] = 1.
elseif c_jj > y[jj]
th_jj[y[jj]:c_jj-1] = -1.
end
Q[i,j] += dot(th_ii, th_jj)
if (i != j); Q[j,i] = Q[i,j] end
end
# println("sym : ", issymmetric(Q))
else
# Quadratic coefficient
for i=1:nd, j=i:nd
ii = ly[i][1]; c_ii = ly[i][2]
jj = ly[j][1]; c_jj = ly[j][2]
# use kernel
if (c_ii == c_jj) && (y[ii] == y[jj]) && !(c_ii == y[ii]) && !(c_jj == y[jj])
Q[i,j] = 2K[ii,jj]
elseif (c_ii == y[jj]) && (y[ii] == c_jj) && !(c_ii == y[ii]) && !(c_jj == y[jj])
Q[i,j] = -2K[ii,jj]
elseif (c_ii == c_jj) && !(c_ii == y[ii]) && !(c_jj == y[jj])
Q[i,j] = K[ii,jj]
elseif (y[ii] == y[jj]) && !(c_ii == y[ii]) && !(c_jj == y[jj])
Q[i,j] = K[ii,jj]
elseif (c_ii == y[jj]) && !(c_ii == y[ii]) && !(c_jj == y[jj])
Q[i,j] = -K[ii,jj]
elseif (y[ii] == c_jj) && !(c_ii == y[ii]) && !(c_jj == y[jj])
Q[i,j] = -K[ii,jj]
end
if (i != j); Q[j,i] = Q[i,j] end
end
end
## add perturbation
for i=1:nd
Q[i,i] = Q[i,i] + perturb
end
# prepare A
for i=1:nd
ii = ly[i][1]
if ly[i][3] == 1
A[ii, i] = 1.
elseif ly[i][3] == 2
A[ii+n, i] = 1.
end
end
if verbose
toc()
tic()
end
if verbose println(">> Optim :: Gurobi") end
# gurobi solver
# gurobi environtment
env = Gurobi.Env()
# Method : 0=primal simplex, 1=dual simplex, 2=barrier ; default for QP: barrier
# Threads : default = 0 (use all threads)
setparams!(env, PSDTol=psdtol, LogToConsole=log, Method=2, Threads=n_thread)
## init model
model = gurobi_model(env,
sense = :minimize,
H = Q,
f = -nu,
Aeq = A,
beq = b,
lb = zeros(nd)
)
# Print the model to check correctness
# print(model)
# Solve with Gurobi
Gurobi.optimize(model)
if verbose
toc()
println("<< End QP")
end
# get solution
alpha = get_solution(model)
return KernelORAdvModel(kernel, kernel_params, feature, alpha, nc, ly)
end
function predict_or_adv_kernel(model::KernelORAdvModel, X_test::Matrix, X_train::Matrix, y_train::Vector)
alpha = model.alpha
nc = model.n_class
ly = model.layout
feature = model.feature
n = size(X_test, 1)
nd = length(alpha)
X1 = [ones(n) X_test]' # transpose
m = size(X1, 1)
# training data
n_tr = size(X_train, 1)
X1_tr = [ones(n_tr) X_train]' # transpose
# kernel
kernel = model.kernel
kernel_params = model.kernel_params
# kernel function
kernel_func = linear_kernel
if kernel == :gaussian
kernel_func = gaussian_kernel
elseif kernel == :polynomial
kernel_func = polynomial_kernel
end
# compute Kernel
K = [ kernel_func(X1_tr[:,i], X1[:,j], kernel_params...)::Float64 for i=1:n_tr, j=1:n]
pred = zeros(n)
for i=1:n
fs = zeros(nc)
if feature == :th
for j = 1:nd
ii = ly[j][1]
c_ii = ly[j][2]
th_ii = zeros(nc-1)
if c_ii < y_train[ii]
th_ii[c_ii : y_train[ii]-1] = 1.
elseif c_ii > y_train[ii]
th_ii[y_train[ii] : c_ii-1] = -1.
end
for l = 1:nc
fs[l] -= alpha[j] * ( (c_ii - y_train[ii]) * l * K[ii,i] + sum(th_ii[l:end]) )
end
end
else
for j = 1:nd
mult = zeros(nc)
ii = ly[j][1]
c_ii = ly[j][2]
mult[c_ii] += 1.
mult[y_train[ii]] -= 1.
fs -= alpha[j] * (mult * K[ii,i])
end
end
pred[i] = indmax(fs)
end
return pred::Vector{Float64}
end
function test_or_adv_kernel(model::KernelORAdvModel, X_test::Matrix, y_test::Vector, X_train::Matrix, y_train::Vector)
n = size(X_test, 1)
pred = predict_or_adv_kernel(model, X_test, X_train, y_train)
mae = mean(abs.(pred - y_test))
return mae::Float64
end