PLearn 0.1
Kernel.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PLearn (A C++ Machine Learning Library)
00004 // Copyright (C) 1998 Pascal Vincent
00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal
00006 // Copyright (C) 2001-2002 Nicolas Chapados, Ichiro Takeuchi, Jean-Sebastien Senecal
00007 // Copyright (C) 2002 Xiangdong Wang, Christian Dorion
00008 
00009 // Redistribution and use in source and binary forms, with or without
00010 // modification, are permitted provided that the following conditions are met:
00011 // 
00012 //  1. Redistributions of source code must retain the above copyright
00013 //     notice, this list of conditions and the following disclaimer.
00014 // 
00015 //  2. Redistributions in binary form must reproduce the above copyright
00016 //     notice, this list of conditions and the following disclaimer in the
00017 //     documentation and/or other materials provided with the distribution.
00018 // 
00019 //  3. The name of the authors may not be used to endorse or promote
00020 //     products derived from this software without specific prior written
00021 //     permission.
00022 // 
00023 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00024 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00025 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00026 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00027 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00028 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00029 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00030 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00031 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00032 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00033 // 
00034 // This file is part of the PLearn library. For more information on the PLearn
00035 // library, go to the PLearn Web site at www.plearn.org
00036 
00037 
00038 /* *******************************************************      
00039  * $Id: Kernel.cc 10083 2009-04-04 20:33:18Z chapados $
00040  * This file is part of the PLearn library.
00041  ******************************************************* */
00042 
00043 #define PL_LOG_MODULE_NAME "Kernel"
00044 
00045 #include "Kernel.h"
00046 #include <plearn/io/pl_log.h>
00047 #include <plearn/base/lexical_cast.h>
00048 #include <plearn/base/tostring.h>
00049 #include <plearn/base/ProgressBar.h>
00050 #include <plearn/math/TMat_maths.h>
00051 #include <plearn/base/RemoteDeclareMethod.h>
00052 
00053 namespace PLearn {
00054 using namespace std;
00055 
00056   
00057 PLEARN_IMPLEMENT_ABSTRACT_OBJECT(Kernel,
00058                                  "A Kernel is a real-valued function K(x,y).",
00059                                  ""
00060     );
00061 Kernel::~Kernel() {}
00062 
00064 // Kernel //
00066 Kernel::Kernel(bool is__symmetric, bool call_build_):
00067     inherited(call_build_),
00068     lock_xi(false),
00069     lock_xj(false),
00070     lock_k_xi_x(false),
00071     data_inputsize(-1),
00072     gram_matrix_is_cached(false),
00073     sparse_gram_matrix_is_cached(false),
00074     n_examples(-1),
00075     cache_gram_matrix(false),
00076     is_symmetric(is__symmetric),
00077     report_progress(0)
00078 {
00079     if (call_build_)
00080         build_();
00081 }
00082 
00084 // declareOptions //
00086 void Kernel::declareOptions(OptionList &ol)
00087 {
00088 
00089     // Build options.
00090 
00091     declareOption(ol, "is_symmetric", &Kernel::is_symmetric, OptionBase::buildoption,
00092                   "Whether this kernel is symmetric or not.");
00093   
00094     declareOption(ol, "report_progress", &Kernel::report_progress, OptionBase::buildoption,
00095                   "If set to 1, a progress bar will be displayed when computing the Gram matrix,\n"
00096                   "or for other possibly costly operations.");
00097   
00098     declareOption(ol, "specify_dataset", &Kernel::specify_dataset, OptionBase::buildoption,
00099                   "If set, then setDataForKernelMatrix will be called with this dataset at build time");
00100 
00101     declareOption(ol, "cache_gram_matrix", &Kernel::cache_gram_matrix, OptionBase::buildoption,
00102                   "If set to 1, the Gram matrix will be cached in memory to avoid multiple computations.");
00103 
00104     // Learnt options.
00105   
00106     declareOption(ol, "data_inputsize", &Kernel::data_inputsize, OptionBase::learntoption,
00107                   "The inputsize of 'data' (if -1, is set to data.width()).");
00108   
00109     declareOption(ol, "n_examples", &Kernel::n_examples, OptionBase::learntoption,
00110                   "The number of examples in 'data'.");
00111   
00112     inherited::declareOptions(ol);
00113 }
00114 
00116 // declareMethods //
00118 void Kernel::declareMethods(RemoteMethodMap& rmm)
00119 {
00120     // Insert a backpointer to remote methods; note that this
00121     // different than for declareOptions()
00122     rmm.inherited(inherited::_getRemoteMethodMap_());
00123 
00124     declareMethod(
00125         rmm, "returnComputedGramMatrix", &Kernel::returnComputedGramMatrix,
00126         (BodyDoc("\n"),
00127          RetDoc ("Returns the Gram Matrix")));
00128 
00129     declareMethod(
00130         rmm, "evaluate", &Kernel::evaluate,
00131         (BodyDoc("Evaluate the kernel on two vectors\n"),
00132          ArgDoc("x1","first vector"),
00133          ArgDoc("x2","second vector"),
00134          RetDoc ("K(x1,x2)")));
00135 
00136     declareMethod(
00137         rmm, "setDataForKernelMatrix", &Kernel::setDataForKernelMatrix,
00138         (BodyDoc("This method sets the data VMat that will be used to define the kernel\n"
00139                  "matrix. It may precompute values from this that may later accelerate\n"
00140                  "the evaluation of a kernel matrix element\n"),
00141          ArgDoc("data", "The data matrix to set into the kernel")));
00142 }
00143 
00145 // build //
00147 void Kernel::build() {
00148     inherited::build();
00149     build_();
00150 }
00151 
00153 // build_ //
00155 void Kernel::build_() {
00156     if (specify_dataset) {
00157         this->setDataForKernelMatrix(specify_dataset);
00158     }
00159 }
00160 
00162 // makeDeepCopyFromShallowCopy //
00164 void Kernel::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00165 {
00166     inherited::makeDeepCopyFromShallowCopy(copies);
00167     deepCopyField(evaluate_xi, copies);
00168     deepCopyField(evaluate_xj, copies);
00169     deepCopyField(k_xi_x, copies);
00170     deepCopyField(data, copies);
00171     deepCopyField(gram_matrix, copies);
00172     deepCopyField(sparse_gram_matrix, copies);
00173     deepCopyField(specify_dataset, copies);
00174 }
00175 
00177 // train                  //
00179 void Kernel::train(VMat data)
00180 {}
00181 
00183 // setDataForKernelMatrix //
00185 void Kernel::setDataForKernelMatrix(VMat the_data)
00186 { 
00187     data = the_data; 
00188     if (data) {
00189         data_inputsize = data->inputsize();
00190         if (data_inputsize == -1) {
00191             // Default value when no inputsize is specified.
00192             data_inputsize = data->width();
00193         }
00194         n_examples = data->length();
00195     } else {
00196         data_inputsize = 0;
00197         n_examples = 0;
00198     }
00199     gram_matrix_is_cached = false;
00200     sparse_gram_matrix_is_cached = false;
00201 }
00202 
00204 // addDataForKernelMatrix //
00206 void Kernel::addDataForKernelMatrix(const Vec& newRow)
00207 {
00208     try{
00209         data->appendRow(newRow);
00210     }
00211     catch(const PLearnError&){
00212         PLERROR("Kernel::addDataForKernelMatrix: if one intends to use this method,\n"
00213                 "he must provide a data matrix for which the appendRow method is\n"
00214                 "implemented.");
00215     }
00216 }
00217 
00219 // evaluate_i_j //
00221 real Kernel::evaluate_i_j(int i, int j) const {
00222     static real result;
00223     if (lock_xi || lock_xj) {
00224         // This should not happen often, but you never know...
00225         Vec xi(data_inputsize);
00226         Vec xj(data_inputsize);
00227         data->getSubRow(i, 0, xi);
00228         data->getSubRow(j, 0, xj);
00229         return evaluate(xi, xj);
00230     } else {
00231         lock_xi = true;
00232         lock_xj = true;
00233         evaluate_xi.resize(data_inputsize);
00234         evaluate_xj.resize(data_inputsize);
00235         data->getSubRow(i, 0, evaluate_xi);
00236         data->getSubRow(j, 0, evaluate_xj);
00237         result = evaluate(evaluate_xi, evaluate_xj);
00238         lock_xi = false;
00239         lock_xj = false;
00240         return result;
00241     }
00242 }
00243 
00244 
00246 // evaluate_i_x //
00248 real Kernel::evaluate_i_x(int i, const Vec& x, real squared_norm_of_x) const  {
00249     static real result;
00250     if (lock_xi) {
00251         Vec xi(data_inputsize);
00252         data->getSubRow(i, 0, xi);
00253         return evaluate(xi, x);
00254     } else {
00255         lock_xi = true;
00256         evaluate_xi.resize(data_inputsize);
00257         data->getSubRow(i, 0, evaluate_xi);
00258         result = evaluate(evaluate_xi, x);
00259         lock_xi = false;
00260         return result;
00261     }
00262 }
00263 
00265 // evaluate_x_i //
00267 real Kernel::evaluate_x_i(const Vec& x, int i, real squared_norm_of_x) const {
00268     static real result;
00269     if(is_symmetric)
00270         return evaluate_i_x(i,x,squared_norm_of_x);
00271     else {
00272         if (lock_xi) {
00273             Vec xi(data_inputsize);
00274             data->getSubRow(i, 0, xi);
00275             return evaluate(x, xi);
00276         } else {
00277             lock_xi = true;
00278             evaluate_xi.resize(data_inputsize);
00279             data->getSubRow(i, 0, evaluate_xi);
00280             result = evaluate(x, evaluate_xi);
00281             lock_xi = false;
00282             return result;
00283         }
00284     }
00285 }
00286 
00288 // evaluate_i_x_again //
00290 real Kernel::evaluate_i_x_again(int i, const Vec& x, real squared_norm_of_x, bool first_time) const {
00291     return evaluate_i_x(i, x, squared_norm_of_x);
00292 }
00293 
00295 // evaluate_x_i_again //
00297 real Kernel::evaluate_x_i_again(const Vec& x, int i, real squared_norm_of_x, bool first_time) const {
00298     return evaluate_x_i(x, i, squared_norm_of_x);
00299 }
00300 
00302 // evaluate_all_i_x //
00304 void Kernel::evaluate_all_i_x(const Vec& x, const Vec& k_xi_x, real squared_norm_of_x, int istart) const {
00305     k_xi_x[0] = evaluate_i_x_again(istart, x, squared_norm_of_x, true);
00306     int i_max = istart + k_xi_x.length();
00307     for (int i = istart + 1; i < i_max; i++) {
00308         k_xi_x[i] = evaluate_i_x_again(i, x, squared_norm_of_x);
00309     }
00310 }
00311 
00313 // evaluate_all_x_i //
00315 void Kernel::evaluate_all_x_i(const Vec& x, const Vec& k_x_xi, real squared_norm_of_x, int istart) const {
00316     k_x_xi[0] = evaluate_x_i_again(x, istart, squared_norm_of_x, true);
00317     int i_max = istart + k_x_xi.length();
00318     for (int i = istart + 1; i < i_max; i++) {
00319         k_x_xi[i] = evaluate_x_i_again(x, i, squared_norm_of_x);
00320     }
00321 }
00322 
00324 // isInData //
00326 bool Kernel::isInData(const Vec& x, int* i) const {
00327     return data->find(x, 1e-8, i);
00328 }
00329 
00331 // computeNearestNeighbors //
00333 void Kernel::computeNearestNeighbors(const Vec& x, Mat& k_xi_x_sorted, int knn) const {
00334     Vec k_val;
00335     bool unlock = true;
00336     if (lock_k_xi_x) {
00337         k_val.resize(n_examples);
00338         unlock = false;
00339     }
00340     else {
00341         lock_k_xi_x = true;
00342         k_xi_x.resize(n_examples);
00343         k_val = k_xi_x;
00344     }
00345     k_xi_x_sorted.resize(n_examples, 2);
00346     // Compute the distance from x to all training points.
00347     evaluate_all_i_x(x, k_val);
00348     // Find the knn nearest neighbors.
00349     for (int i = 0; i < n_examples; i++) {
00350         k_xi_x_sorted(i,0) = k_val[i];
00351         k_xi_x_sorted(i,1) = real(i);
00352     }
00353     partialSortRows(k_xi_x_sorted, knn);
00354     if (unlock)
00355         lock_k_xi_x = false;
00356 }
00357 
00359 // computeGramMatrix //
00361 void Kernel::computeGramMatrix(Mat K) const
00362 {
00363     if (!data)
00364         PLERROR("Kernel::computeGramMatrix should be called only after setDataForKernelMatrix");
00365     if (!is_symmetric)
00366         PLERROR("In Kernel::computeGramMatrix - Currently not implemented for non-symmetric kernels");
00367     if (cache_gram_matrix && gram_matrix_is_cached) {
00368         K << gram_matrix;
00369         return;
00370     }
00371     if (K.length() != data.length() || K.width() != data.length())
00372         PLERROR("Kernel::computeGramMatrix: the argument matrix K should be\n"
00373                 "of size %d x %d (currently of size %d x %d)",
00374                 data.length(), data.length(), K.length(), K.width());
00375     int l=data->length();
00376     int m=K.mod();
00377     PP<ProgressBar> pb;
00378     int count = 0;
00379     if (report_progress)
00380         pb = new ProgressBar("Computing Gram matrix for " + classname(), (l * (l + 1)) / 2);
00381     real Kij;
00382     real* Ki;
00383     real* Kji_;
00384     for (int i=0;i<l;i++)
00385     {
00386         Ki = K[i];
00387         Kji_ = &K[0][i];
00388         for (int j=0; j<=i; j++,Kji_+=m)
00389         {
00390             Kij = evaluate_i_j(i,j);
00391             *Ki++ = Kij;
00392             if (j<i)
00393                 *Kji_ = Kij;
00394         }
00395         if (report_progress) {
00396             count += i + 1;
00397             PLASSERT( pb.isNotNull() );
00398             pb->update(count);
00399         }
00400     }
00401     if (cache_gram_matrix) {
00402         gram_matrix.resize(l,l);
00403         gram_matrix << K;
00404         gram_matrix_is_cached = true;
00405     }
00406 }
00407 
00408 Mat Kernel::returnComputedGramMatrix() const
00409 {
00410     if (!data)
00411         PLERROR("Kernel::returnComputedGramMatrix should be called only after setDataForKernelMatrix");
00412     int l=data.length();
00413     Mat K(l,l);
00414     computeGramMatrix(K);
00415     return K;
00416 }
00417 
00418 
00420 // computePartialGramMatrix //
00422 void Kernel::computePartialGramMatrix(const TVec<int>& subset_indices, Mat K) const
00423 {
00424     if (!data)
00425         PLERROR("Kernel::computePartialGramMatrix should be called only after setDataForKernelMatrix");
00426     if (!is_symmetric)
00427         PLERROR("In Kernel::computePartialGramMatrix - Currently not implemented for non-symmetric kernels");
00428     if (K.length() != subset_indices.length() || K.width() != subset_indices.length())
00429         PLERROR("Kernel::computePartialGramMatrix: the argument matrix K should be\n"
00430                 "of size %d x %d (currently of size %d x %d)",
00431                 subset_indices.length(), subset_indices.length(), K.length(), K.width());
00432 
00433     int l=subset_indices.size();
00434     int m=K.mod();
00435     PP<ProgressBar> pb;
00436     int count = 0;
00437     if (report_progress)
00438         pb = new ProgressBar("Computing Partial Gram matrix for " + classname(),
00439                              (l * (l + 1)) / 2);
00440     real Kij;
00441     real* Ki;
00442     real* Kji_;
00443     for (int i=0;i<l;i++)
00444     {
00445         int index_i = subset_indices[i];
00446         Ki = K[i];
00447         Kji_ = &K[0][i];
00448         for (int j=0; j<=i; j++,Kji_+=m)
00449         {
00450             int index_j = subset_indices[j];
00451             Kij = evaluate_i_j(index_i, index_j);
00452             *Ki++ = Kij;
00453             if (j<i)
00454                 *Kji_ = Kij;
00455         }
00456         if (report_progress) {
00457             count += i + 1;
00458             PLASSERT( pb.isNotNull() );
00459             pb->update(count);
00460         }
00461     }
00462 }
00463 
00465 // computeTestGramMatrix //
00467 void Kernel::computeTestGramMatrix(Mat test_elements, Mat K, Vec self_cov) const
00468 {
00469     if (!data)
00470         PLERROR("Kernel::computeTestGramMatrix should be called only after setDataForKernelMatrix");
00471 
00472     if (test_elements.width() != data.width())
00473         PLERROR("Kernel::computeTestGramMatrix: the input matrix test_elements "
00474                 "should be of width %d (currently of width %d)",
00475                 data.width(), test_elements.width());
00476     
00477     if (K.length() != test_elements.length() || K.width() != data.length())
00478         PLERROR("Kernel::computeTestGramMatrix: the output matrix K should be\n"
00479                 "of size %d x %d (currently of size %d x %d)",
00480                 test_elements.length(), data.length(), K.length(), K.width());
00481 
00482     if (self_cov.size() != test_elements.length())
00483         PLERROR("Kernel::computeTestGramMatrix: the output vector self_cov should be\n"
00484                 "of length %d (currently of length %d)",
00485                 test_elements.length(), self_cov.size());
00486 
00487     int n=test_elements.length();
00488     PP<ProgressBar> pb = report_progress?
00489         new ProgressBar("Computing Test Gram matrix for " + classname(), n)
00490         : 0;
00491 
00492     for (int i=0 ; i<n ; ++i)
00493     {
00494         Vec cur_test_elem = test_elements(i);
00495         evaluate_all_i_x(cur_test_elem, K(i));
00496         self_cov[i] = evaluate(cur_test_elem, cur_test_elem);
00497         
00498         if (pb)
00499             pb->update(i);
00500     }
00501 }
00502 
00503 
00505 // computeSparseGramMatrix //
00507 void Kernel::computeSparseGramMatrix(TVec<Mat> K) const
00508 {
00509     if (!data) PLERROR("Kernel::computeSparseGramMatrix should be called only after setDataForKernelMatrix");
00510     if (!is_symmetric)
00511         PLERROR("In Kernel::computeGramMatrix - Currently not implemented for non-symmetric kernels");
00512     if (cache_gram_matrix && sparse_gram_matrix_is_cached) {
00513         for (int i = 0; i < sparse_gram_matrix.length(); i++) {
00514             K[i].resize(sparse_gram_matrix[i].length(), 2);
00515             K[i] << sparse_gram_matrix[i];
00516         }
00517         return;
00518     }
00519     if (cache_gram_matrix && gram_matrix_is_cached) {
00520         // We can obtain the sparse Gram matrix from the full one.
00521         int n = K.length();
00522         Vec row(2);
00523         for (int i = 0; i < n; i++) {
00524             Mat& K_i = K[i];
00525             K_i.resize(0,2);
00526             real* gram_ij = gram_matrix[i];
00527             for (int j = 0; j < n; j++, gram_ij++)
00528                 if (!fast_exact_is_equal(*gram_ij, 0)) {
00529                     row[0] = j;
00530                     row[1] = *gram_ij;
00531                     K_i.appendRow(row);
00532                 }
00533         }
00534         // TODO Use a method to avoid code duplication below.
00535         sparse_gram_matrix.resize(n);
00536         for (int i = 0; i < n; i++) {
00537             sparse_gram_matrix[i].resize(K[i].length(), 2);
00538             sparse_gram_matrix[i] << K[i];
00539         }
00540         sparse_gram_matrix_is_cached = true;
00541         return;
00542     }
00543     int l=data->length();
00544     PP<ProgressBar> pb;
00545     int count = 0;
00546     if (report_progress) {
00547         pb = new ProgressBar("Computing sparse Gram matrix for " + classname(), (l * (l + 1)) / 2);
00548     }
00549     Vec j_and_Kij(2);
00550     for (int i = 0; i < l; i++)
00551         K[i].resize(0,2);
00552     for (int i=0;i<l;i++)
00553     {
00554         for (int j=0; j<=i; j++)
00555         {
00556             j_and_Kij[1] = evaluate_i_j(i,j);
00557             if (!fast_exact_is_equal(j_and_Kij[1], 0)) {
00558                 j_and_Kij[0] = j;
00559                 K[i].appendRow(j_and_Kij);
00560                 if (j < i) {
00561                     j_and_Kij[0] = i;
00562                     K[j].appendRow(j_and_Kij);
00563                 }
00564             }
00565         }
00566         if (pb) {
00567             count += i + 1;
00568             pb->update(count);
00569         }
00570     }
00571     if (cache_gram_matrix) {
00572         sparse_gram_matrix.resize(l);
00573         for (int i = 0; i < l; i++) {
00574             sparse_gram_matrix[i].resize(K[i].length(), 2);
00575             sparse_gram_matrix[i] << K[i];
00576         }
00577         sparse_gram_matrix_is_cached = true;
00578     }
00579 }
00580 
00581 
00583 // computeGramMatrixDerivative //
00585 void Kernel::computeGramMatrixDerivative(Mat& KD, const string& kernel_param,
00586                                          real epsilon) const
00587 {
00588     MODULE_LOG << "Computing Gram matrix derivative by finite differences "
00589                << "for hyper-parameter '" << kernel_param << "'"
00590                << endl;
00591     
00592     // This function is conceptually const, but the evaluation by finite
00593     // differences in a generic way requires some change-options, which
00594     // formally require a const-away cast.
00595     Kernel* This = const_cast<Kernel*>(this);
00596     bool old_cache = cache_gram_matrix;
00597     This->cache_gram_matrix = false;
00598 
00599     if (!data)
00600         PLERROR("Kernel::computeGramMatrixDerivative should be called only after "
00601                 "setDataForKernelMatrix");
00602 
00603     int W = nExamples();
00604     KD.resize(W,W);
00605     Mat KDminus(W,W);
00606 
00607     string cur_param_str = getOption(kernel_param);
00608     real cur_param = lexical_cast<real>(cur_param_str);
00609 
00610     // Compute the positive part of the finite difference
00611     This->changeOption(kernel_param, tostring(cur_param+epsilon));
00612     This->build();                           
00613     computeGramMatrix(KD);
00614 
00615     // Compute the negative part of the finite difference
00616     This->changeOption(kernel_param, tostring(cur_param-epsilon));
00617     This->build();                           
00618     computeGramMatrix(KDminus);
00619 
00620     // Finalize computation
00621     KD -= KDminus;
00622     KD /= real(2.*epsilon);
00623 
00624     This->changeOption(kernel_param, cur_param_str);
00625     This->build();                           
00626     This->cache_gram_matrix = old_cache;
00627 }
00628 
00629 
00631 // setParameters //
00633 void Kernel::setParameters(Vec paramvec)
00634 { PLERROR("setParameters(Vec paramvec) not implemented for this kernel"); }
00635 
00637 // getParameters //
00639 Vec Kernel::getParameters() const
00640 { return Vec(); }
00641 
00643 // hasData //
00645 bool Kernel::hasData() {
00646     return data.isNotNull();
00647 }
00648 
00650 // apply //
00652 void Kernel::apply(VMat m1, VMat m2, Mat& result) const
00653 {
00654     result.resize(m1->length(), m2->length());
00655     int m1w = m1->inputsize();
00656     if (m1w == -1) { // No inputsize specified: using width instead.
00657         m1w = m1->width();
00658     }
00659     int m2w = m2->inputsize();
00660     if (m2w == -1) {
00661         m2w = m2->width();
00662     }
00663     Vec m1_i(m1w);
00664     Vec m2_j(m2w);
00665     PP<ProgressBar> pb;
00666     bool easy_case = (is_symmetric && m1 == m2);
00667     int l1 = m1->length();
00668     int l2 = m2->length();
00669     if (report_progress) {
00670         int nb_steps;
00671         if (easy_case) {
00672             nb_steps = (l1 * (l1 + 1)) / 2;
00673         } else {
00674             nb_steps = l1 * l2;
00675         }
00676         pb = new ProgressBar("Applying " + classname() + " to two matrices", nb_steps);
00677     }
00678     int count = 0;
00679     if(easy_case)
00680     {
00681         for(int i=0; i<m1->length(); i++)
00682         {
00683             m1->getSubRow(i,0,m1_i);
00684             for(int j=0; j<=i; j++)
00685             {
00686                 m2->getSubRow(j,0,m2_j);
00687                 real val = evaluate(m1_i,m2_j);
00688                 result(i,j) = val;
00689                 result(j,i) = val;
00690             }
00691             if (pb) {
00692                 count += i + 1;
00693                 pb->update(count);
00694             }
00695         }
00696     }
00697     else
00698     {
00699         for(int i=0; i<m1->length(); i++)
00700         {
00701             m1->getSubRow(i,0,m1_i);
00702             for(int j=0; j<m2->length(); j++)
00703             {
00704                 m2->getSubRow(j,0,m2_j);
00705                 result(i,j) = evaluate(m1_i,m2_j);
00706             }
00707             if (pb) {
00708                 count += l2;
00709                 pb->update(count);
00710             }
00711         }
00712     }
00713 }
00714 
00715 
00716 void Kernel::apply(VMat m, const Vec& x, Vec& result) const
00717 {
00718     result.resize(m->length());
00719     int mw = m->inputsize();
00720     if (mw == -1) { // No inputsize specified: using width instead.
00721         mw = m->width();
00722     }
00723     Vec m_i(mw);
00724     for(int i=0; i<m->length(); i++)
00725     {
00726         m->getSubRow(i,0,m_i);
00727         result[i] = evaluate(m_i,x);
00728     }
00729 }
00730 
00731 
00732 void Kernel::apply(Vec x, VMat m, Vec& result) const
00733 {
00734     result.resize(m->length());
00735     int mw = m->inputsize();
00736     if (mw == -1) { // No inputsize specified: using width instead.
00737         mw = m->width();
00738     }
00739     Vec m_i(mw);
00740     for(int i=0; i<m->length(); i++)
00741     {
00742         m->getSubRow(i,0,m_i);
00743         result[i] = evaluate(x,m_i);
00744     }
00745 }
00746 
00747 
00748 Mat Kernel::apply(VMat m1, VMat m2) const
00749 {
00750     Mat result;
00751     apply(m1,m2,result);
00752     return result;
00753 }
00754 
00756 // test //
00758 real Kernel::test(VMat d, real threshold, real sameness_below_threshold, real sameness_above_threshold) const
00759 {
00760     int nerrors = 0;
00761     int inputsize = (d->width()-1)/2;
00762     for(int i=0; i<d->length(); i++)
00763     {
00764         Vec inputs = d(i);
00765         Vec input1 = inputs.subVec(0,inputsize);
00766         Vec input2 = inputs.subVec(inputsize,inputsize);
00767         real sameness = inputs[inputs.length()-1];
00768         real kernelvalue = evaluate(input1,input2);
00769         cerr << "[" << kernelvalue << " " << sameness << "]\n";
00770         if(kernelvalue<threshold)
00771         {
00772             if(fast_exact_is_equal(sameness, sameness_above_threshold))
00773                 nerrors++;
00774         }
00775         else // kernelvalue>=threshold
00776         {
00777             if(fast_exact_is_equal(sameness, sameness_below_threshold))
00778                 nerrors++;
00779         }
00780     }
00781     return real(nerrors)/d->length();
00782 }
00783 
00784 
00786 // computeKNNeighbourMatrixFromDistanceMatrix //
00788 TMat<int> Kernel::computeKNNeighbourMatrixFromDistanceMatrix(const Mat& D, int knn, bool insure_self_first_neighbour, bool report_progress)
00789 {
00790     int npoints = D.length();
00791     TMat<int> neighbours(npoints, knn);  
00792     Mat tmpsort(npoints,2);
00793 
00794     PP<ProgressBar> pb;
00795     if (report_progress) {
00796         pb = new ProgressBar("Computing neighbour matrix", npoints);
00797     }
00798   
00799     Mat indices;
00800     for(int i=0; i<npoints; i++)
00801     {
00802         for(int j=0; j<npoints; j++)
00803         {
00804             tmpsort(j,0) = D(i,j);
00805             tmpsort(j,1) = j;
00806         }
00807         if(insure_self_first_neighbour)
00808             tmpsort(i,0) = -FLT_MAX;
00809 
00810         partialSortRows(tmpsort, knn);
00811         indices = tmpsort.column(1).subMatRows(0,knn);
00812         for (int j = 0; j < knn; j++) {
00813             neighbours(i,j) = int(indices(j,0));
00814         }
00815         if (pb)
00816             pb->update(i);
00817     }
00818     return neighbours;
00819 }
00820 
00822 // computeNeighbourMatrixFromDistanceMatrix //
00824 //  You should use computeKNNeighbourMatrixFromDistanceMatrix instead.
00825 Mat Kernel::computeNeighbourMatrixFromDistanceMatrix(const Mat& D, bool insure_self_first_neighbour, bool report_progress)
00826 {
00827     int npoints = D.length();
00828     Mat neighbours(npoints, npoints);  
00829     Mat tmpsort(npoints,2);
00830 
00831     PP<ProgressBar> pb;
00832     if (report_progress) {
00833         pb = new ProgressBar("Computing neighbour matrix", npoints);
00834     }
00835   
00836     //for(int i=0; i<2; i++)
00837     for(int i=0; i<npoints; i++)
00838     {
00839         for(int j=0; j<npoints; j++)
00840         {
00841             tmpsort(j,0) = D(i,j);
00842             tmpsort(j,1) = j;
00843         }
00844         if(insure_self_first_neighbour)
00845             tmpsort(i,0) = -FLT_MAX;
00846 
00847         sortRows(tmpsort);
00848         neighbours(i) << tmpsort.column(1);
00849         if (pb)
00850             pb->update(i);
00851     }
00852     return neighbours;
00853 }
00854 
00856 // estimateHistograms //
00858 Mat Kernel::estimateHistograms(VMat d, real sameness_threshold, real minval, real maxval, int nbins) const
00859 {
00860     real binwidth = (maxval-minval)/nbins;
00861     int inputsize = (d->width()-1)/2;
00862     Mat histo(2,nbins);
00863     Vec histo_below = histo(0);
00864     Vec histo_above = histo(1);
00865     int nbelow=0;
00866     int nabove=0;
00867     for(int i=0; i<d->length(); i++)
00868     {
00869         Vec inputs = d(i);
00870         Vec input1 = inputs.subVec(0,inputsize);
00871         Vec input2 = inputs.subVec(inputsize,inputsize);
00872         real sameness = inputs[inputs.length()-1];
00873         real kernelvalue = evaluate(input1,input2);
00874         if(kernelvalue>=minval && kernelvalue<maxval)
00875         {
00876             int binindex = int((kernelvalue-minval)/binwidth);
00877             if(sameness<sameness_threshold)
00878             {
00879                 histo_below[binindex]++;
00880                 nbelow++;
00881             }
00882             else
00883             {
00884                 histo_above[binindex]++;
00885                 nabove++;
00886             }
00887         }
00888     }
00889     histo_below /= real(nbelow);
00890     histo_above /= real(nabove);
00891     return histo;
00892 }
00893 
00894 
00895 Mat Kernel::estimateHistograms(Mat input_and_class, real minval, real maxval, int nbins) const
00896 {
00897     real binwidth = (maxval-minval)/nbins;
00898     int inputsize = input_and_class.width()-1;
00899     Mat inputs = input_and_class.subMatColumns(0,inputsize);
00900     Mat classes = input_and_class.column(inputsize);
00901     Mat histo(4,nbins);
00902     Vec histo_mean_same = histo(0);
00903     Vec histo_mean_other = histo(1);
00904     Vec histo_min_same = histo(2);
00905     Vec histo_min_other = histo(3);
00906 
00907     for(int i=0; i<inputs.length(); i++)
00908     {
00909         Vec input = inputs(i);
00910         real input_class = classes(i,0);
00911         real sameclass_meandist = 0.0;
00912         real otherclass_meandist = 0.0;
00913         real sameclass_mindist = FLT_MAX;
00914         real otherclass_mindist = FLT_MAX;
00915         for(int j=0; j<inputs.length(); j++)
00916             if(j!=i)
00917             {
00918                 real dist = evaluate(input, inputs(j));
00919                 if(fast_exact_is_equal(classes(j,0), input_class))
00920                 {
00921                     sameclass_meandist += dist;
00922                     if(dist<sameclass_mindist)
00923                         sameclass_mindist = dist;
00924                 }
00925                 else
00926                 {
00927                     otherclass_meandist += dist;
00928                     if(dist<otherclass_mindist)
00929                         otherclass_mindist = dist;
00930                 }
00931             }
00932         sameclass_meandist /= (inputs.length()-1);
00933         otherclass_meandist /= (inputs.length()-1);      
00934         if(sameclass_meandist>=minval && sameclass_meandist<maxval)
00935             histo_mean_same[int((sameclass_meandist-minval)/binwidth)]++;
00936         if(sameclass_mindist>=minval && sameclass_mindist<maxval)
00937             histo_min_same[int((sameclass_mindist-minval)/binwidth)]++;
00938         if(otherclass_meandist>=minval && otherclass_meandist<maxval)
00939             histo_mean_other[int((otherclass_meandist-minval)/binwidth)]++;
00940         if(otherclass_mindist>=minval && otherclass_mindist<maxval)
00941             histo_min_other[int((otherclass_mindist-minval)/binwidth)]++;
00942     }
00943     histo_mean_same /= sum(histo_mean_same, false);
00944     histo_min_same /= sum(histo_min_same, false);
00945     histo_mean_other /= sum(histo_mean_other, false);
00946     histo_min_other /= sum(histo_min_other, false);
00947     return histo;
00948 }
00949 
00950 /*
00951   void
00952   Kernel::oldwrite(ostream& out) const
00953   {
00954   writeHeader(out,"Kernel");
00955   writeField(out,"is_symmetric",is_symmetric);
00956   writeFooter(out,"Kernel");
00957   }
00958 
00959 
00960   void
00961   Kernel::oldread(istream& in)
00962   {
00963   readHeader(in,"Kernel");
00964   readField(in,"is_symmetric",is_symmetric);
00965   readFooter(in,"Kernel");
00966   }
00967 */
00968 
00970 // findClosestPairsOfDifferentClass //
00972 // last column of data is supposed to be a classnum
00973 // returns a matrix of (index1, index2, distance)
00974 Mat findClosestPairsOfDifferentClass(int k, VMat data, Ker dist)
00975 {
00976     Mat result(k,3);
00977     real maxdistinlist = -FLT_MAX;
00978     int posofmaxdistinlist = -1;
00979     int kk=0; // number of pairs already in list
00980     Vec rowi(data.width());
00981     Vec inputi = rowi.subVec(0,rowi.length()-1);
00982     real& targeti = rowi[rowi.length()-1];
00983     Vec rowj(data.width());
00984     Vec inputj = rowj.subVec(0,rowj.length()-1);
00985     real& targetj = rowj[rowj.length()-1];
00986     for(int i=0; i<data.length(); i++)
00987     {
00988         data->getRow(i,rowi);
00989         for(int j=0; j<data.length(); j++)
00990         {
00991             data->getRow(j,rowj);
00992             if(!fast_exact_is_equal(targeti, targetj))
00993             {
00994                 real d = dist(inputi,inputj);
00995                 if(kk<k)
00996                 {
00997                     result(kk,0) = i;
00998                     result(kk,1) = j;
00999                     result(kk,2) = d;
01000                     if(d>maxdistinlist)
01001                     {
01002                         maxdistinlist = d;
01003                         posofmaxdistinlist = kk;
01004                     }
01005                     kk++;
01006                 }
01007                 else if(d<maxdistinlist)
01008                 {
01009                     result(posofmaxdistinlist,0) = i;
01010                     result(posofmaxdistinlist,1) = j;
01011                     result(posofmaxdistinlist,2) = d;
01012                     posofmaxdistinlist = argmax(result.column(2));
01013                     maxdistinlist = result(posofmaxdistinlist,2);
01014                 }
01015             }
01016         }
01017     }
01018     sortRows(result, 2);//use partialSortRows instead
01019     return result;
01020 }
01021 
01022 } // end of namespace PLearn
01023 
01024 
01025 /*
01026   Local Variables:
01027   mode:c++
01028   c-basic-offset:4
01029   c-file-style:"stroustrup"
01030   c-file-offsets:((innamespace . 0)(inline-open . 0))
01031   indent-tabs-mode:nil
01032   fill-column:79
01033   End:
01034 */
01035 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines