PLearn 0.1
|
00001 // -*- C++ -*- 00002 00003 // ThresholdedKernel.cc 00004 // 00005 // Copyright (C) 2005 Olivier Delalleau 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 /* ******************************************************* 00036 * $Id: ThresholdedKernel.cc 6508 2006-12-15 02:35:49Z lamblin $ 00037 ******************************************************* */ 00038 00039 // Authors: Olivier Delalleau 00040 00044 #include "ThresholdedKernel.h" 00045 #include <plearn/math/PRandom.h> 00046 00047 namespace PLearn { 00048 using namespace std; 00049 00051 // ThresholdedKernel // 00053 ThresholdedKernel::ThresholdedKernel(): 00054 knn(2), 00055 knn_approximation(0), 00056 max_size_for_full_gram(5000), 00057 method("knn"), 00058 threshold(0) 00059 { 00060 } 00061 00062 PLEARN_IMPLEMENT_OBJECT(ThresholdedKernel, 00063 "Thresholds an underlying kernel.", 00064 "" 00065 ); 00066 00068 // declareOptions // 00070 void ThresholdedKernel::declareOptions(OptionList& ol) 00071 { 00072 declareOption(ol, "method", &ThresholdedKernel::method, OptionBase::buildoption, 00073 "Which method is used to threshold the underlying kernel:\n" 00074 " - 'knn' : if y is such that K(x,y) is strictly less than K(x,n_k(x)) where n_k(x)\n" 00075 " is the k-th neighbor of x as given by K, and K(x,y) < K(n_k(y), y), then\n" 00076 " K(x,y) is thresholded\n"); 00077 00078 declareOption(ol, "threshold", &ThresholdedKernel::threshold, OptionBase::buildoption, 00079 "The value returned when K(x,y) is thresholded."); 00080 00081 declareOption(ol, "knn", &ThresholdedKernel::knn, OptionBase::buildoption, 00082 "When 'method' is 'knn', this is 'k' in n_k(x) (x will be counted if in data matrix)."); 00083 00084 declareOption(ol, "knn_approximation", &ThresholdedKernel::knn_approximation, 00085 OptionBase::buildoption, 00086 "When 'method' is 'knn', this option can take several values:\n" 00087 " - 0 : it is ignored, the 'knn' nearest neighbors are computed normally\n" 00088 " - p > 1 : the value of K(x,n_k(x)) is estimated by computing K(x,y) for p\n" 00089 " values of y taken randomly in the dataset (the same value of y\n" 00090 " may be taken more than once)\n" 00091 " - 0 < f <= 1 : same as above, with p = f * n (n = dataset size)"); 00092 00093 declareOption(ol, "max_size_for_full_gram", &ThresholdedKernel::max_size_for_full_gram, OptionBase::buildoption, 00094 "When the dataset has more than 'max_size_for_full_gram' samples, the full Gram\n" 00095 "matrix will not be computed in memory (less efficient, but scales better)."); 00096 00097 // Now call the parent class' declareOptions 00098 inherited::declareOptions(ol); 00099 } 00100 00102 // build // 00104 void ThresholdedKernel::build() 00105 { 00106 inherited::build(); 00107 build_(); 00108 } 00109 00111 // build_ // 00113 void ThresholdedKernel::build_() 00114 { 00115 if (source_kernel && !source_kernel->is_symmetric) 00116 PLERROR("In ThresholdedKernel::build_ - The source kernel must currently " 00117 "be symmetric"); 00118 PLASSERT( knn_approximation >= 0); 00119 knn_approx = !fast_exact_is_equal(knn_approximation, 0); 00120 if (knn_approx) { 00121 if (knn_approximation > 1) 00122 n_approx = int(round(knn_approximation)); 00123 // Otherwise, n_approx is set in setDataForKernelMatrix. 00124 } else 00125 n_approx = -1; 00126 } 00127 00129 // computeGramMatrix // 00131 void ThresholdedKernel::computeGramMatrix(Mat K) const { 00132 if (cache_gram_matrix && gram_matrix_is_cached) { 00133 K << gram_matrix; 00134 return; 00135 } 00136 source_kernel->computeGramMatrix(K); 00137 thresholdGramMatrix(K); 00138 if (cache_gram_matrix) { 00139 int l = K.length(); 00140 gram_matrix.resize(l,l); 00141 gram_matrix << K; 00142 gram_matrix_is_cached = true; 00143 } 00144 } 00145 00147 // evaluate // 00149 real ThresholdedKernel::evaluate(const Vec& x1, const Vec& x2) const { 00150 real k_x1_x2 = source_kernel->evaluate(x1, x2); 00151 if (method == "knn") { 00152 if (knn_approx) 00153 evaluate_random_k_x_i(x1, k_x_xi); 00154 else 00155 source_kernel->evaluate_all_x_i(x1, k_x_xi); 00156 negateElements(k_x_xi); 00157 partialSortRows(k_x_xi_mat, knn_sub); 00158 if (k_x1_x2 >= - k_x_xi[knn_sub-1]) 00159 return k_x1_x2; 00160 if (knn_approx) 00161 evaluate_random_k_x_i(x2, k_x_xi); 00162 else 00163 source_kernel->evaluate_all_i_x(k_x_xi, x2); 00164 partialSortRows(k_x_xi_mat, knn_sub); 00165 negateElements(k_x_xi); 00166 if (k_x1_x2 >= -k_x_xi[knn_sub-1]) 00167 return k_x1_x2; 00168 return threshold; 00169 } 00170 PLERROR("ThresholdedKernel::evaluate: unsupported method '%s'", method.c_str()); 00171 return MISSING_VALUE; 00172 } 00173 00175 // evaluate_i_j // 00177 real ThresholdedKernel::evaluate_i_j(int i, int j) const { 00178 real k_i_j = source_kernel->evaluate_i_j(i, j); 00179 if (method == "knn") { 00180 if (k_i_j >= knn_kernel_values[i] || k_i_j >= knn_kernel_values[j]) 00181 return k_i_j; 00182 else 00183 return threshold; 00184 } 00185 PLERROR("ThresholdedKernel::evaluate_i_j: unsupported method '%s'", method.c_str()); 00186 return MISSING_VALUE; 00187 } 00188 00190 // evaluate_i_x // 00192 real ThresholdedKernel::evaluate_i_x(int i, const Vec& x, real squared_norm_of_x) const { 00193 // Default = uses the Kernel implementation. 00194 // Alternative = return source_kernel->evaluate_i_x(i,x,squared_norm_of_x); 00195 return Kernel::evaluate_i_x(i, x, squared_norm_of_x); 00196 } 00197 00199 // evaluate_i_x_again // 00201 real ThresholdedKernel::evaluate_i_x_again(int i, const Vec& x, real squared_norm_of_x, bool first_time) const { 00202 if (method == "knn") { 00203 if (first_time) { 00204 if (knn_approx) 00205 evaluate_random_k_x_i(x, k_x_xi); 00206 else 00207 source_kernel->evaluate_all_i_x(x, k_x_xi); 00208 negateElements(k_x_xi); 00209 partialSortRows(k_x_xi_mat, knn_sub); 00210 k_x_threshold = - k_x_xi[knn_sub - 1]; 00211 } 00212 real k_i_x = source_kernel->evaluate_i_x_again(i, x, squared_norm_of_x, first_time); 00213 if (k_i_x >= k_x_threshold || k_i_x >= knn_kernel_values[i]) 00214 return k_i_x; 00215 else 00216 return threshold; 00217 } 00218 PLERROR("ThresholdedKernel::evaluate_i_x_again: unsupported method '%s'", method.c_str()); 00219 return MISSING_VALUE; 00220 } 00221 00222 00224 // evaluate_random_k_x_i // 00226 void ThresholdedKernel::evaluate_random_k_x_i(const Vec& x, const Vec& k_x_xi) 00227 const 00228 { 00229 PP<PRandom> random = PRandom::common(false); // PRandom with fixed seed. 00230 int k = k_x_xi.length(); 00231 for (int j = 0; j < k; j++) { 00232 int i = random->uniform_multinomial_sample(n_examples); 00233 k_x_xi[j] = source_kernel->evaluate_x_i(x,i); 00234 } 00235 } 00236 00238 // evaluate_x_i // 00240 real ThresholdedKernel::evaluate_x_i(const Vec& x, int i, real squared_norm_of_x) const { 00241 return Kernel::evaluate_x_i(x, i, squared_norm_of_x); 00242 } 00243 00245 // evaluate_x_i_again // 00247 real ThresholdedKernel::evaluate_x_i_again(const Vec& x, int i, real squared_norm_of_x, bool first_time) const { 00248 if (method == "knn") { 00249 if (first_time) { 00250 if (knn_approx) 00251 evaluate_random_k_x_i(x, k_x_xi); 00252 else 00253 source_kernel->evaluate_all_x_i(x, k_x_xi); 00254 negateElements(k_x_xi); 00255 partialSortRows(k_x_xi_mat, knn_sub); 00256 k_x_threshold = - k_x_xi[knn_sub - 1]; 00257 } 00258 real k_x_i = source_kernel->evaluate_x_i_again(x, i, squared_norm_of_x, first_time); 00259 if (k_x_i >= k_x_threshold || k_x_i >= knn_kernel_values[i]) 00260 return k_x_i; 00261 else 00262 return threshold; 00263 } 00264 return MISSING_VALUE; 00265 } 00266 00268 // makeDeepCopyFromShallowCopy // 00270 void ThresholdedKernel::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00271 { 00272 inherited::makeDeepCopyFromShallowCopy(copies); 00273 deepCopyField(knn_kernel_values, copies); 00274 deepCopyField(k_x_xi, copies); 00275 deepCopyField(k_x_xi_mat, copies); 00276 } 00277 00279 // setDataForKernelMatrix // 00281 void ThresholdedKernel::setDataForKernelMatrix(VMat the_data) { 00282 inherited::setDataForKernelMatrix(the_data); 00283 int n = the_data->length(); 00284 PP<ProgressBar> pb; 00285 knn_sub = knn_approx ? int(round(knn * real(n_approx) / real(n))) 00286 : knn; 00287 if (knn_sub <= 0) 00288 PLERROR("In ThresholdedKernel::setDataForKernelMatrix - Not " 00289 "enough neighbors considered"); 00290 if (method == "knn") { 00291 knn_kernel_values.resize(n); 00292 if (knn_approx) { 00293 if (knn_approximation <= 1) { 00294 n_approx = int(round(knn_approximation * n)); 00295 PLASSERT( n_approx >= 1 ); 00296 } else { 00297 int k_int = int(round(knn_approximation)); 00298 if (k_int > n) 00299 PLERROR("In ThresholdedKernel::setDataForKernelMatrix - " 00300 "'knn_approximation' (%d) cannot be more than the " 00301 " number of data points (%d)", k_int, n); 00302 } 00303 } 00304 if (knn_sub > n) 00305 PLERROR("In ThresholdedKernel::setDataForKernelMatrix - The number" 00306 " of nearest neighbors to compute (%d) must be less than " 00307 "the length of the dataset (%d)", knn_sub, n); 00308 if (n <= max_size_for_full_gram && !knn_approx) { 00309 // Can afford to store the Gram matrix in memory. 00310 gram_matrix.resize(n,n); 00311 source_kernel->computeGramMatrix(gram_matrix); 00312 if (report_progress) 00313 pb = new ProgressBar("Finding nearest neighbors", n); 00314 Mat sorted_k_i(n, 1); 00315 for (int i = 0; i < n; i++) { 00316 sorted_k_i << gram_matrix(i); 00317 negateElements(sorted_k_i); // For sorting. 00318 partialSortRows(sorted_k_i, knn); 00319 knn_kernel_values[i] = - sorted_k_i(knn - 1, 0); 00320 if (report_progress) 00321 pb->update(i+1); 00322 } 00323 if (cache_gram_matrix) { 00324 // Since we have the Gram matrix at hand, we may cache it now. 00325 thresholdGramMatrix(gram_matrix); 00326 gram_matrix_is_cached = true; 00327 } else 00328 // Free memory. 00329 gram_matrix = Mat(); 00330 } else { 00331 // Computing the whole Gram matrix will probably not fit in memory, 00332 // or we do not even want / afford to compute it. 00333 if (cache_gram_matrix) { 00334 // We will cache the sparse Gram matrix. 00335 sparse_gram_matrix.resize(n); 00336 for (int i = 0; i < n; i++) 00337 sparse_gram_matrix[i].resize(0,2); 00338 } 00339 if (report_progress) 00340 pb = new ProgressBar("Computing Gram matrix of source kernel and " 00341 "finding nearest neighbors", n); 00342 int n_used = knn_approx ? n_approx : n; 00343 Mat k_i_mat(n_used, 1); 00344 Vec k_i(n_used); 00345 Vec row(2); 00346 TVec<int> neighb_i, neighb_j; 00347 PP<PRandom> random = PRandom::common(false); // Has fixed seed. 00348 for (int i = 0; i < n; i++) { 00349 if (knn_approx) { 00350 for (int j = 0; j < n_approx; j++) { 00351 int k = random->uniform_multinomial_sample(n); 00352 k_i[j] = source_kernel->evaluate_i_j(i,k); 00353 } 00354 } else { 00355 for (int j = 0; j < n; j++) 00356 k_i[j] = source_kernel->evaluate_i_j(i,j); 00357 } 00358 k_i_mat << k_i; 00359 negateElements(k_i_mat); // For sorting. 00360 partialSortRows(k_i_mat, knn_sub); 00361 knn_kernel_values[i] = - k_i_mat(knn_sub - 1, 0); 00362 if (report_progress) 00363 pb->update(i+1); 00364 if (cache_gram_matrix) { 00365 if (knn_approx) 00366 PLERROR("In ThresholdedKernel::setDataForKernelMatrix " 00367 "- Cannot currently cache the Gram matrix when" 00368 " using the knn approximation"); 00369 PLASSERT( !knn_approx ); 00370 // Let us cache the sparse Gram matrix. 00371 if (!fast_exact_is_equal(threshold, 0)) 00372 PLWARNING("In ThresholdedKernel::setDataForKernelMatrix - The sparse " 00373 "Gram matrix will be cached based on a non-zero threshold"); 00374 real k_min = knn_kernel_values[i]; 00375 Mat& g_i = sparse_gram_matrix[i]; 00376 int ki = g_i.length(); 00377 neighb_i.resize(ki); 00378 for (int j = 0; j < ki; j++) 00379 neighb_i[j] = int(g_i(j,0)); 00380 for (int j = 0; j < n; j++) 00381 if (k_i[j] >= k_min && neighb_i.find(j) == -1) { 00382 row[0] = j; 00383 row[1] = k_i[j]; 00384 g_i.appendRow(row); 00385 Mat& g_j = sparse_gram_matrix[j]; 00386 int kj = g_j.length(); 00387 bool already_there = false; 00388 for (int l = 0; l < kj; l++) 00389 if (i == int(g_j(0,1))) { 00390 already_there = true; 00391 break; 00392 } 00393 if (!already_there) { 00394 row[0] = i; 00395 g_j.appendRow(row); 00396 } 00397 } 00398 sparse_gram_matrix_is_cached = true; 00399 } 00400 } 00401 } 00402 } 00403 k_x_xi.resize(knn_approx ? n_approx : n); 00404 k_x_xi_mat = k_x_xi.toMat(k_x_xi.length(), 1); 00405 PLASSERT( !knn_kernel_values.hasMissing() ); 00406 } 00407 00409 // thresholdGramMatrix // 00411 void ThresholdedKernel::thresholdGramMatrix(const Mat& K) const { 00412 PP<ProgressBar> pb; 00413 int n = K.length(); 00414 if (K.width() != n) 00415 PLERROR("In ThresholdedKernel::thresholdGramMatrix - A square matrix is expected"); 00416 if (report_progress) 00417 pb = new ProgressBar("Thresholding Gram matrix", n); 00418 if (method == "knn") { 00419 for (int i = 0; i < n; i++) { 00420 real* K_i = K[i]; 00421 real knn_kernel_values_i = knn_kernel_values[i]; 00422 for (int j = 0; j < n; j++, K_i++) 00423 if (*K_i < knn_kernel_values_i && *K_i < knn_kernel_values[j]) 00424 *K_i = threshold; 00425 if (report_progress) 00426 pb->update(i+1); 00427 } 00428 } 00429 } 00430 00431 00432 } // end of namespace PLearn 00433 00434 00435 /* 00436 Local Variables: 00437 mode:c++ 00438 c-basic-offset:4 00439 c-file-style:"stroustrup" 00440 c-file-offsets:((innamespace . 0)(inline-open . 0)) 00441 indent-tabs-mode:nil 00442 fill-column:79 00443 End: 00444 */ 00445 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :