-
Notifications
You must be signed in to change notification settings - Fork 42
/
Copy pathlooest.m
executable file
·132 lines (114 loc) · 3.69 KB
/
looest.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
% LOOEST - Computes an approximate leave-one-out estimate of the error rate
% for a two-class SVM using the margin sensitivity method.
%
% Syntax: [loo_est,conf_matrix,g_loo] = looest
% (evaluates the current SVM in memory)
%
% [loo_est,conf_matrix,g_loo] = looest(X,y,a,b,g,ind,type,scale,Rs,Q)
% (evaluates the given SVM)
%
% loo_est: number of leave-one-out errors
% conf_matrix: confusion matrix
% g_loo: estimates of g for each example after the example is unlearned
% X: matrix of training vectors stored columnwise
% y: column vector of class labels (-1/+1) for training vectors
% a: alpha coefficients
% b: bias
% g: partial derivatives of cost function w.r.t. alpha coefficients
% ind: cell array containing indices of margin, error and reserve vectors
% ind{1}: indices of margin vectors
% ind{2}: indices of error vectors
% ind{3}: indices of reserve vectors
% type: kernel type
% 1: linear kernel X'*Y
% 2-4: polynomial kernel (scale*X'*Y + 1)^type
% 5: Gaussian kernel with variance 1/(2*scale)
% scale: kernel scale
% Rs: inverse of extended kernel matrix for margin vectors
% Q: extended kernel matrix for all vectors
%
% Version 3.22e -- Comments to [email protected]
%
function [loo_est,conf_matrix,g_loo] = looest(varargin)
% flags for example state
MARGIN = 1;
ERROR = 2;
RESERVE = 3;
UNLEARNED = 4;
if (nargin == 0)
% define global variables
global a;
global b;
global g;
global ind;
global Q;
global Rs;
global scale;
global type;
global X;
global y;
else
% define arguments
X = varargin{1};
y = varargin{2};
a = varargin{3};
b = varargin{4};
g = varargin{5};
ind = varargin{6};
type = varargin{7};
scale = varargin{8};
Rs = varargin{9};
Q = varargin{10};
end;
% if the user wants g_loo, make sure to compute g_loo for the error vectors with initial g < -1.
% if we only care about the error rate, we don't need to unlearn these examples because they are
% guaranteed to be classified incorrectly.
if (nargout == 3)
g_flag = 1;
else
g_flag = 0;
end;
% initialize variables
loo_est = 0;
g_loo = g;
% initialize confusion matrix
conf_matrix = zeros(2,3);
if (length(ind{RESERVE}) > 0)
conf_matrix(1,1) = sum(y(ind{RESERVE}) == 1);
conf_matrix(2,2) = sum(y(ind{RESERVE}) == -1);
end;
% begin approximate leave-one-out estimation
ind_loo = [ind{MARGIN} ind{ERROR}];
num_margin_vectors = length(ind{MARGIN});
disp('Beginning margin estimation.');
for i = 1:length(ind_loo)
% select example
indc = ind_loo(i);
% instead of unlearning the example, estimate the resulting g
% based on the margin sensitivity for the example
if ((g(indc) >= -1) | (g_flag))
g_est = gestloo(indc);
g_loo(indc) = g_est;
else
g_est = g(indc);
end;
% check to see if the example would be misclassified and record results
loo_est = loo_est + (g_est < -1);
if (y(indc) == 1)
j = 1 + (g_est <= -1) + (g_est == -1);
conf_matrix(1,j) = conf_matrix(1,j) + 1;
else
j = 1 + (g_est >= -1) + (g_est == -1);
conf_matrix(2,j) = conf_matrix(2,j) + 1;
end;
if (mod(i,50) == 0)
s = sprintf('Estimated margins for %d examples.',i);
disp(s);
end;
end;
if (mod(i,50) ~= 0)
s = sprintf('Estimated margins for %d examples.',i);
disp(s);
end;
s = sprintf('Process complete!\n');
disp(s);