PLearn 0.1
KernelProjection.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // KernelProjection.cc
00004 //
00005 // Copyright (C) 2004 Olivier Delalleau 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: KernelProjection.cc 8431 2008-01-30 16:25:33Z tihocan $ 
00037  ******************************************************* */
00038 
00039 // Authors: Olivier Delalleau
00040 
00043 #include "KernelProjection.h"
00044 #include <time.h>               
00045 #include <plearn/math/plapack.h>            
00046 
00047 namespace PLearn {
00048 using namespace std;
00049 
00051 // KernelProjection //
00053 KernelProjection::KernelProjection() 
00054     : n_comp_kept(-1),
00055       n_examples(-1),
00056       first_output(true),
00057       compute_costs(false),
00058       free_extra_components(true),
00059       ignore_n_first(0),
00060       min_eigenvalue(-REAL_MAX),
00061       n_comp(1),
00062       n_comp_for_cost(-1),
00063       normalize("none")
00064   
00065 {
00066 }
00067 
00068 PLEARN_IMPLEMENT_OBJECT(KernelProjection,
00069                         "Performs dimensionality reduction by learning eigenfunctions of a kernel.", 
00070                         ""
00071     );
00072 
00074 // declareOptions //
00076 void KernelProjection::declareOptions(OptionList& ol)
00077 {
00078 
00079     // Build options.
00080 
00081     declareOption(ol, "kernel", &KernelProjection::kernel, OptionBase::buildoption,
00082                   "The kernel used to compute the Gram matrix.");
00083 
00084     declareOption(ol, "n_comp", &KernelProjection::n_comp, OptionBase::buildoption,
00085                   "Number of components computed.");
00086 
00087     declareOption(ol, "normalize", &KernelProjection::normalize, OptionBase::buildoption,
00088                   "The kind of normalization performed when computing the output\n"
00089                   " - 'none'      : classical projection on the eigenvectors\n"
00090                   " - 'unit_var'  : normalization to get unit variance on each coordinate\n"
00091                   " - 'unit_eigen': ignore the eigenvalues and do as if they were all 1\n"
00092                   " - 'unit_coord': coordinates are normalized so that they have norm 1\n");
00093 
00094     declareOption(ol, "min_eigenvalue", &KernelProjection::min_eigenvalue, OptionBase::buildoption,
00095                   "Any component associated with an eigenvalue <= min_eigenvalue will be discarded.");
00096 
00097     declareOption(ol, "compute_costs", &KernelProjection::compute_costs, OptionBase::buildoption,
00098                   "Whether we should compute costs or not.");
00099 
00100     declareOption(ol, "n_comp_for_cost", &KernelProjection::n_comp_for_cost, OptionBase::buildoption,
00101                   "The number of components considered when computing a cost (default = -1 means n_comp).");
00102 
00103     declareOption(ol, "free_extra_components", &KernelProjection::free_extra_components, OptionBase::buildoption,
00104                   "If set to 1, components computed but not kept won't be available after training.");
00105 
00106     declareOption(ol, "ignore_n_first", &KernelProjection::ignore_n_first, OptionBase::buildoption,
00107                   "Will ignore the first 'ignore_n_first' eigenvectors, if this option is > 0.");
00108 
00109     // Learnt options.
00110 
00111     declareOption(ol, "eigenvalues", &KernelProjection::eigenvalues, OptionBase::learntoption,
00112                   "The eigenvalues of the Gram matrix.");
00113 
00114     declareOption(ol, "eigenvectors", &KernelProjection::eigenvectors, OptionBase::learntoption,
00115                   "The eigenvectors of the Gram matrix.");
00116 
00117     declareOption(ol, "n_comp_kept", &KernelProjection::n_comp_kept, OptionBase::learntoption,
00118                   "The actual number of components actually kept in the output (we may discard\n"
00119                   "some because of low eigenvalues).");
00120 
00121     declareOption(ol, "n_examples", &KernelProjection::n_examples, OptionBase::learntoption,
00122                   "The number of points in the training set.");
00123 
00124     // Now call the parent class' declareOptions
00125     inherited::declareOptions(ol);
00126 
00127     // Hide unused options.
00128 
00129     redeclareOption(ol, "seed", &KernelProjection::seed_, OptionBase::nosave,
00130                     "No seed used here.");
00131 
00132 }
00133 
00135 // build //
00137 void KernelProjection::build()
00138 {
00139     inherited::build();
00140     build_();
00141 }
00142 
00144 // build_ //
00146 void KernelProjection::build_()
00147 {
00148     if (n_comp_kept == -1) {
00149         n_comp_kept = n_comp;
00150     }
00151     first_output = true;  // Safer.
00152     last_input.resize(0);
00153 }
00154 
00156 // computeCostsFromOutputs //
00158 void KernelProjection::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00159                                                const Vec& target, Vec& costs) const
00160 {
00161     if (!compute_costs)
00162         return;
00163     // fs_squared_norm_reconstruction_error (see getTestCostNames).
00164     real k_x_x = kernel->evaluate(input, input);
00165     real fs_norm;
00166     if (n_comp_for_cost > 0) {
00167         // Only take the 'n_comp_for_cost' first components.
00168         fs_norm = pownorm(output.subVec(0, n_comp_for_cost));
00169     } else {
00170         fs_norm = pownorm(output);
00171     }
00172     costs.resize(2);
00173     if (last_input.length() == 0) {
00174         last_input.resize(input.length());
00175         last_output.resize(output.length());
00176         last_input << input;
00177         last_output << output;
00178         costs[1] = MISSING_VALUE;
00179     } else {
00180         real k_x_y = kernel->evaluate(input, last_input);
00181         real fs_dotp;
00182         if (n_comp_for_cost > 0) {
00183             // Only take the 'n_comp_for_cost' first components.
00184             fs_dotp = dot(output.subVec(0, n_comp_for_cost), last_output.subVec(0, n_comp_for_cost));
00185         } else {
00186             fs_dotp = dot(output, last_output);
00187         }
00188         last_input.resize(0);
00189         real diff = k_x_y - fs_dotp;
00190         costs[1] = diff * diff;
00191     }
00192     costs[0] = abs(k_x_x - fs_norm);
00193     if (k_x_x - fs_norm < -1e-5) {
00194         // TODO Remove this later after making sure it didn't happen.
00195         perr << "Negative error: " << k_x_x - fs_norm << " (k_x_x = " << k_x_x << ", fs_norm = " << fs_norm << ")" << endl;
00196     }
00197 }                                
00198 
00200 // computeOutput //
00202 void KernelProjection::computeOutput(const Vec& input, Vec& output) const
00203 {
00204     PLASSERT( outputsize() > 0 );
00205     static real* result_ptr;
00206     if (first_output) {
00207         // Initialize k_x_xi, used_eigenvectors and result correctly.
00208         k_x_xi.resize(n_examples);
00209         used_eigenvectors = eigenvectors.subMatRows(0, n_comp_kept);
00210         result.resize(n_comp_kept,1);
00211         first_output = false;
00212     }
00213     // Compute the K(x,x_i).
00214     kernel->evaluate_all_i_x(input, k_x_xi);
00215     // Compute the output.
00216     rowSum(used_eigenvectors * k_x_xi, result);
00217     output.resize(n_comp_kept);
00218     result_ptr = result[0];
00219     if (normalize == "none") {
00220         real norm_coeff = sqrt(real(n_examples));
00221         for (int i = 0; i < n_comp_kept; i++) {
00222             output[i] = *(result_ptr++) / eigenvalues[i] * norm_coeff;
00223         }
00224     } else if (normalize == "unit_var") {
00225         for (int i = 0; i < n_comp_kept; i++) {
00226             output[i] = *(result_ptr++) / sqrt(eigenvalues[i]);
00227         }
00228     } else if (normalize == "unit_eigen") {
00229         output << result;
00230         output *= sqrt(real(n_examples));
00231     } else if (normalize == "unit_coord") {
00232         output << result;
00233         real norm = PLearn::norm(output,2);
00234         if (!fast_exact_is_equal(norm, 0))
00235             output /= norm;
00236     } else {
00237         PLERROR("In KernelProjection::computeOutput - Wrong value for 'normalize')");
00238     }
00239 }    
00240 
00242 // forget //
00244 void KernelProjection::forget()
00245 {
00246     stage = 0;
00247     if (verbosity > 1)
00248         pout << "forget: n_comp_kept = " << n_comp_kept << endl;
00249     n_comp_kept = n_comp;
00250     if (verbosity > 1)
00251         pout << "forget: n_comp_kept = " << n_comp_kept << endl;
00252     n_examples = 0;
00253     first_output = true;
00254     last_input.resize(0);
00255     // Free memory.
00256     eigenvectors = Mat();
00257     eigenvalues = Vec();
00258 }
00259     
00261 // getTestCostNames //
00263 TVec<string> KernelProjection::getTestCostNames() const
00264 {
00265     TVec<string> t;
00266     if (!compute_costs)
00267         return t;
00268     // Feature space squared norm reconstruction error:
00269     // | K(x,x) - ||output||^2 |
00270     t.append("fs_squared_norm_reconstruction_error");
00271     // Feature space dot product reconstruction squared error:
00272     // ( K(x,y) - <output_x,output_y> )^2
00273     t.append("fs_dotp_reconstruction_squared_error");
00274     return t;
00275 }
00276 
00278 // getTrainCostNames //
00280 TVec<string> KernelProjection::getTrainCostNames() const
00281 {
00282     return getTestCostNames();
00283 }
00284 
00286 // makeDeepCopyFromShallowCopy //
00288 void KernelProjection::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00289 {
00290     inherited::makeDeepCopyFromShallowCopy(copies);
00291     deepCopyField(k_x_xi, copies);
00292     deepCopyField(result, copies);
00293     deepCopyField(used_eigenvectors, copies);
00294     deepCopyField(last_input, copies);
00295     deepCopyField(last_output, copies);
00296     deepCopyField(kernel, copies);
00297     deepCopyField(eigenvalues, copies);
00298     deepCopyField(eigenvectors, copies);
00299 }
00300 
00301 
00303 // outputsize //
00305 int KernelProjection::outputsize() const
00306 {
00307     return n_comp_kept;
00308 }
00309 
00311 // setTrainingSet //
00313 void KernelProjection::setTrainingSet(VMat training_set, bool call_forget) {
00314     inherited::setTrainingSet(training_set, call_forget);
00315     n_examples = training_set->length();
00316     // Save the dataset in the kernel, because it may be needed after we reload
00317     // the learner.
00318     if (kernel)
00319     {
00320         kernel->specify_dataset = training_set;
00321         kernel->build();  
00322     }
00323     else
00324         PLERROR("KernelProjection::setTrainingSet: You cannot use setTrainingSet without a kernel set");
00325 }
00326 
00328 // train //
00330 void KernelProjection::train()
00331 {
00332     if (stage == 1) {
00333         PLWARNING("In KernelProjection::train - Learner has already been trained");
00334         return;
00335     }
00336     Mat gram(n_examples,n_examples);
00337     // (1) Compute the Gram matrix.
00338     if (report_progress) {
00339         kernel->report_progress = true;
00340     }
00341     clock_t time_for_gram = clock();
00342     kernel->computeGramMatrix(gram);
00343     time_for_gram = clock() - time_for_gram;
00344     if (verbosity >= 3) {
00345         pout << flush;
00346     }
00347     // (2) Compute its eigenvectors and eigenvalues.
00348     eigenVecOfSymmMat(gram, n_comp + ignore_n_first, eigenvalues, eigenvectors);
00349     if (ignore_n_first > 0) {
00350         eigenvalues = eigenvalues.subVec(ignore_n_first, eigenvalues.length() - ignore_n_first);
00351         eigenvectors = eigenvectors.subMatRows(ignore_n_first, eigenvectors.length() - ignore_n_first);
00352     }
00353     
00354     n_comp_kept = eigenvalues.length(); // Could be different of n_comp.
00355     // (3) Discard low eigenvalues.
00356     int p = 0;
00357     while (p < n_comp_kept && eigenvalues[p] > min_eigenvalue)
00358         p++;
00359     n_comp_kept = p;
00360 
00361     // (4) Optionally remove the discarded components.
00362     if (free_extra_components) {
00363         eigenvalues.resize(n_comp_kept);
00364         eigenvectors.resize(n_comp_kept, eigenvectors.width());
00365     }
00366     // All done!
00367     first_output = true;
00368     stage = 1;
00369 }
00370 
00371 } // end of namespace PLearn
00372 
00373 
00374 /*
00375   Local Variables:
00376   mode:c++
00377   c-basic-offset:4
00378   c-file-style:"stroustrup"
00379   c-file-offsets:((innamespace . 0)(inline-open . 0))
00380   indent-tabs-mode:nil
00381   fill-column:79
00382   End:
00383 */
00384 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines