PLearn 0.1
LLC.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // LLC.cc
00004 //
00005 // Copyright (C) 2005 Olivier Delalleau 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: LLC.cc 7407 2007-05-29 14:28:19Z tihocan $ 
00037  ******************************************************* */
00038 
00039 // Authors: Olivier Delalleau
00040 
00044 #include "LLC.h"
00045 #include <plearn/io/openString.h>
00046 #include <plearn/ker/ReconstructionWeightsKernel.h>
00047 #include <plearn/math/plapack.h>
00048 
00049 namespace PLearn {
00050 using namespace std;
00051 
00053 // LLC //
00055 LLC::LLC() 
00056     : sum_of_dim(-1),
00057       knn(5),
00058       n_comp(1),
00059       regularization(0),
00060       train_mixture(true)
00061 {}
00062 
00063 PLEARN_IMPLEMENT_OBJECT(LLC,
00064                         "Locally Linear Coordination.",
00065                         "This is the algorithm described in 'Automatic alignment of local representations'\n"
00066                         "by Teh and Roweis (2003).\n"
00067     );
00068 
00070 // declareOptions //
00072 void LLC::declareOptions(OptionList& ol)
00073 {
00074     // ### For the "flags" of each option, you should typically specify  
00075     // ### one of OptionBase::buildoption, OptionBase::learntoption or 
00076     // ### OptionBase::tuningoption. Another possible flag to be combined with
00077     // ### is OptionBase::nosave
00078 
00079     // Build options.
00080 
00081     declareOption(ol, "knn", &LLC::knn, OptionBase::buildoption,
00082                   "Number of neighbors used to compute local reconstruction weights.");
00083 
00084     declareOption(ol, "mixture", &LLC::mixture, OptionBase::buildoption,
00085                   "A mixture of local dimensionality reducers.");
00086 
00087     declareOption(ol, "n_comp", &LLC::n_comp, OptionBase::buildoption,
00088                   "Number of components computed.");
00089 
00090     declareOption(ol, "regularization", &LLC::regularization, OptionBase::buildoption,
00091                   "A regularization coefficient (to use if crash in the eigensystem, but assume the consequences).");
00092 
00093     declareOption(ol, "train_mixture", &LLC::train_mixture, OptionBase::buildoption,
00094                   "Whether the mixture should be trained or not.");
00095 
00096     // Learnt options.
00097 
00098     declareOption(ol, "L", &LLC::L, OptionBase::learntoption,
00099                   "The matrix of factors (bias and linear transformation for each neighborhood).");
00100 
00101     declareOption(ol, "sum_of_dim", &LLC::sum_of_dim, OptionBase::learntoption,
00102                   "Must be equal to mixture->outputsize().");
00103 
00104     // Now call the parent class' declareOptions.
00105     inherited::declareOptions(ol);
00106 }
00107 
00109 // build //
00111 void LLC::build()
00112 {
00113     inherited::build();
00114     build_();
00115 }
00116 
00118 // build_ //
00120 void LLC::build_()
00121 {
00122     // ### This method should do the real building of the object,
00123     // ### according to set 'options', in *any* situation. 
00124     // ### Typical situations include:
00125     // ###  - Initial building of an object from a few user-specified options
00126     // ###  - Building of a "reloaded" object: i.e. from the complete set of all serialised options.
00127     // ###  - Updating or "re-building" of an object after a few "tuning" options have been modified.
00128     // ### You should assume that the parent class' build_() has already been called.
00129     if (sum_of_dim > 0)
00130         mixture_output.resize(sum_of_dim);
00131 }
00132 
00134 // computeCostsFromOutputs //
00136 void LLC::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00137                                   const Vec& target, Vec& costs) const
00138 {
00139     // No cost to compute.
00140 }                                
00141 
00143 // computeOutput //
00145 void LLC::computeOutput(const Vec& input, Vec& output) const
00146 {
00147     output.resize(n_comp);
00148     // As in the train method, we assume the mixture has just the nice output
00149     // we need.
00150     mixture->computeOutput(input, mixture_output);
00151     product(output, L, mixture_output);
00152 }    
00153 
00155 // forget //
00157 void LLC::forget()
00158 {
00159     stage = 0;
00160     sum_of_dim = -1;
00161     L.resize(0,0);
00162 }
00163     
00165 // getTestCostNames //
00167 TVec<string> LLC::getTestCostNames() const
00168 {
00169     static TVec<string> noCost;
00170     return noCost;
00171 }
00172 
00174 // getTrainCostNames //
00176 TVec<string> LLC::getTrainCostNames() const
00177 {
00178     static TVec<string> noCost;
00179     return noCost;
00180 }
00181 
00183 // makeDeepCopyFromShallowCopy //
00185 void LLC::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00186 {
00187     inherited::makeDeepCopyFromShallowCopy(copies);
00188 
00189     // ### ex:
00190     // deepCopyField(trainvec, copies);
00191 
00192     // ### Remove this line when you have fully implemented this method.
00193     PLERROR("LLC::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!");
00194 }
00195 
00197 // outputsize //
00199 int LLC::outputsize() const
00200 {
00201     return n_comp;
00202 }
00203 
00205 // train //
00207 void LLC::train()
00208 {
00209     PLASSERT( mixture );
00210     if (stage >= nstages) {
00211         PLWARNING("In LLC::train - Learner has already been trained, skipping training");
00212         return;
00213     }
00214     if (verbosity >= 2)
00215         pout << "Computing local reconstruction weights" << endl;
00216     PP<ReconstructionWeightsKernel> reconstruct = new ReconstructionWeightsKernel();
00217     reconstruct->knn = knn + 1; // +1 because it includes the point itself.
00218     reconstruct->build();
00219     reconstruct->setDataForKernelMatrix(train_set);
00220     int n = train_set->length();
00221     Mat lle_mat(n,n);
00222     reconstruct->computeLLEMatrix(lle_mat); // Fill lle_mat with W + W' - W' W.
00223     for (int i = 0; i < n; i++)
00224         lle_mat(i,i) = lle_mat(i,i) - 1;    // lle_mat = - (I - W') * (I - W)
00225     if (train_mixture) {
00226         if (verbosity >= 2)
00227             pout << "Training mixture" << endl;
00228         mixture->setTrainingSet(train_set);
00229         mixture->train();
00230     }
00231     // Obtain the number of components in the mixture (= the number of 'experts').
00232     // We assume here the mixture has a 'n_components' option.
00233     int n_comp_mixture;
00234     string mixture_n_components = mixture->getOption("n_components");
00235     openString(mixture_n_components, PStream::plearn_ascii) >> n_comp_mixture;
00236     // Obtain the dimension of each expert in the mixture.
00237     // We assume here the mixture has a 'outputsizes' option which is a TVec<int>
00238     // containing the outputsize of each expert.
00239     TVec<int> dimension;
00240     string mixture_outputsizes;
00241     PStream in = openString(mixture_outputsizes, PStream::plearn_ascii);
00242     in >> dimension;
00243     sum_of_dim = n_comp_mixture;
00244     for (int k = 0; k < dimension.length(); k++)
00245         sum_of_dim += dimension[k];
00246     mixture_output.resize(sum_of_dim);
00247     // Compute the output of the mixture for all elements in the training set.
00248     // The output must be a vector of size 'sum_of_dim' which is the concatenation
00249     // of the output of each expert in the mixture, each weighted by its
00250     // responsibility r_k (that can depend on x, and such that sum_k r_k = 1),
00251     // and with a bias (= r_k) added as the first dimension of each expert.
00252     if (verbosity >= 2)
00253         pout << "Computing mixture outputs" << endl;
00254     Mat U(n, sum_of_dim);
00255     mixture->useOnTrain(U);
00256     if (verbosity >= 2)
00257         pout << "Building the generalized eigenvector system" << endl;
00258     Mat B(sum_of_dim, sum_of_dim);
00259     transposeProduct(B, U, U);
00260     B /= real(1.0 / n);           // B = 1/n U' U
00261     Mat A(sum_of_dim, sum_of_dim);
00262     Mat tmp(n, sum_of_dim);
00263     product(tmp, lle_mat, U);
00264     // A = - U' (I - W') (I - W) U (because we want the smallest eigenvalues).
00265     transposeProduct(A, U, tmp);
00266     tmp = Mat(); // Free memory.
00267     fillItSymmetric(A); // A and B should be already symmetric, but it may be safer
00268     fillItSymmetric(B); // to ensure it.
00269     if (verbosity >= 2)
00270         pout << "Solving the generalized eigensystem" << endl;
00271     Vec eigen_val;
00272     Mat eigen_vec;
00273     if (regularization > 0)
00274         regularizeMatrix(B, regularization);
00275     generalizedEigenVecOfSymmMat(A, B, 1, n_comp + 1, eigen_val, eigen_vec);
00276     // Ignore the smallest eigenvalue (should be 0).
00277     if (verbosity >= 5)
00278         pout << "Smallest eigenvalue: " << eigen_val[0] << endl;
00279     L = eigen_vec.subMatRows(1, eigen_vec.length() - 1);
00280     if (verbosity >= 2)
00281         pout << "Training is over" << endl;
00282     stage = 1;
00283 }
00284 
00285 } // end of namespace PLearn
00286 
00287 
00288 /*
00289   Local Variables:
00290   mode:c++
00291   c-basic-offset:4
00292   c-file-style:"stroustrup"
00293   c-file-offsets:((innamespace . 0)(inline-open . 0))
00294   indent-tabs-mode:nil
00295   fill-column:79
00296   End:
00297 */
00298 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines