-
Notifications
You must be signed in to change notification settings - Fork 16
/
Copy pathcompute_rank2.m
149 lines (139 loc) · 5.29 KB
/
compute_rank2.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
% calculate the matching characteristics.
% By Fei Xiong,
% ECE Dept,
% Northeastern University
% 2013-11-04
% Input:
% Method: the distance learning algorithm struct. In this function
% two field are used.
% P is the projection matrix. d'-by-Ntr (assume the kernel trick is used.)
% kernel is the name of the kernel function.
% train: The data used to learn the projection matric. Each row is a
% sample vector. Ntr-by-d
% test: The data used to test and calculate the CMC for the
% algorithm. Each row is a sample vector. Nts-by-d
% ix_partition: the randomly generated partition of the test set.
% Each row is a randomly generated partition. 1 represents
% this test sample is used as reference sample, while 0
% represents such sample is used as probe sample. Nit-by-Nts
% IDs: The identity of the samples in the test set. Nts-by-1, where
% Nts is the size of test set. Nts-by-1
function [R ,Alldist , ixx] = compute_rank2(Method, train, test, ix_partition, IDs)
for k =1:size(ix_partition,1) % calculate the CMC for each random partition.
% set the kernel matrix for reference and prob set.
ix_ref = ix_partition(k,:) ==1;
% when the probe set is not the same as test gallery set, it will be
% labeled as "-1"
if min(min(double(ix_partition))) < 0
ix_prob = ix_partition(k,:) ==-1;
else
ix_prob = ix_partition(k,:) ==0;
end
ref_ID = IDs(ix_ref);
prob_ID = IDs(ix_prob);
if gpuDeviceCount > 0
dis = gpuArray.zeros(sum(ix_prob),sum(ix_ref));
else
dis = zeros(sum(ix_prob),sum(ix_ref));
end
for c = 1:numel(test)
A = Method{c}.P; % Projection vector
if gpuDeviceCount > 0 % use gpu to speed up
train{c} = gpuArray(train{c});
test{c} = gpuArray(test{c});
A = gpuArray(A);
end
if strcmp(Method{c}.name,'oLFDA')
K_test = test{c}';
else
[K_test] = ComputeKernelTest(train{c}, test{c}, Method{c}); %compute the kernel matrix.
end
K_ref = K_test(:, ix_ref);
K_prob = K_test(:, ix_prob);
for i =1: size(K_prob,2)
diff = bsxfun(@minus, K_ref,K_prob(:,i));
diff = A*diff;
dis(i, :) = dis(i, :) + sum(diff.^2,1);
end
end
% calculate the distance and ranking for each prob sample
for i =1:sum(ix_prob)
% diff = bsxfun(@minus, K_ref,K_prob(:,i));
% diff = A*diff;
% dis(i, :) = sum(diff.^2,1);
[tmp, ix] = sort(dis(i, :));
r(i) = find(ref_ID(ix) == prob_ID(i));
ixx(i,:)=ix;
end
% returned ranking matrix, each row is the ranking for a reference/prob
% set partition
% copy data back to CPU
if gpuDeviceCount > 0
R(k, :) = gather(r);
Alldist{k} = gather(dis); % distance matrix
else
R(k, :) = r;
Alldist{k} = dis; % distance matrix
end
end
% reset GPU memery if used
if gpuDeviceCount > 0
reset(gpuDevice());
end
return;
% Calculate the kernel matrix for train and test set.
% TODO: Replace the ComputeKernel function in ComputeKernel.m
% Input:
% Method: the distance learning algorithm struct. In this function
% only field used "kernel", the name of the kernel function.
% train: The data used to learn the projection matric. Each row is a
% sample vector. Ntr-by-d
% test: The data used to test and calculate the CMC for the
% algorithm. Each row is a sample vector. Nts-by-d
function [K_test] = ComputeKernelTest(train, test, Method)
if (size(train,2))>2e4 && (strcmp(Method.kernel, 'chi2') || strcmp(Method.kernel, 'chi2-rbf'))
% if the input data matrix is too large then use parallel computing
% tool box.
% poolobj = parpool;
switch Method.kernel
case {'linear'}
K_test = train * test';
case {'chi2'}
for i =1:size(test,1)
dotp = bsxfun(@times, test(i,:), train);
sump = bsxfun(@plus, test(i,:), train);
K_test(:,i) = 2* sum(dotp./(sump+1e-10),2);
end
case {'chi2-rbf'}
sigma = Method.rbf_sigma;
for i =1:size(test,1)
subp = bsxfun(@minus, test(i,:), train);
subp = subp.^2;
sump = bsxfun(@plus, test(i,:), train);
K_test(:,i) = sum(subp./(sump+1e-10),2);
end
K_test =exp(-K_test./sigma);
end
% delete(poolobj)
else
switch Method.kernel
case {'linear'}
K_test = train * test';
case {'chi2'}
for i =1:size(test,1)
dotp = bsxfun(@times, test(i,:), train);
sump = bsxfun(@plus, test(i,:), train);
K_test(:,i) = 2* sum(dotp./(sump+1e-10),2);
end
case {'chi2-rbf'}
sigma = Method.rbf_sigma;
for i =1:size(test,1)
subp = bsxfun(@minus, test(i,:), train);
subp = subp.^2;
sump = bsxfun(@plus, test(i,:), train);
K_test(:,i) = sum(subp./(sump+1e-10),2);
end
K_test =exp(-K_test./sigma);
end
end
return;