PLearn 0.1
GaussianDistribution.cc
Go to the documentation of this file.
00001 // -*- C++ -*-4 1999/10/29 20:41:34 dugas
00002 
00003 // PLearn (A C++ Machine Learning Library)
00004 // Copyright (C) 2002 Pascal Vincent
00005 //
00006 // Redistribution and use in source and binary forms, with or without
00007 // modification, are permitted provided that the following conditions are met:
00008 //
00009 //  1. Redistributions of source code must retain the above copyright
00010 //     notice, this list of conditions and the following disclaimer.
00011 //
00012 //  2. Redistributions in binary form must reproduce the above copyright
00013 //     notice, this list of conditions and the following disclaimer in the
00014 //     documentation and/or other materials provided with the distribution.
00015 //
00016 //  3. The name of the authors may not be used to endorse or promote
00017 //     products derived from this software without specific prior written
00018 //     permission.
00019 //
00020 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00021 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00022 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00023 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00024 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00025 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00026 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00027 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00028 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00029 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00030 //
00031 // This file is part of the PLearn library. For more information on the PLearn
00032 // library, go to the PLearn Web site at www.plearn.org
00033 
00034 
00035 
00036 
00037 /* *******************************************************
00038  * $Id: GaussianDistribution.cc 9418 2008-09-02 15:33:46Z nouiz $
00039  * This file is part of the PLearn library.
00040  ******************************************************* */
00041 
00044 #include "GaussianDistribution.h"
00045 #include <plearn/vmat/VMat_basic_stats.h>
00046 #include <plearn/math/plapack.h>
00047 #include <plearn/math/distr_maths.h>
00048 
00049 namespace PLearn {
00050 using namespace std;
00051 
00052 #define ZEROGAMMA
00053 
00054 PLEARN_IMPLEMENT_OBJECT(GaussianDistribution,
00055                         "A Gaussian distribution represented compactly by the k leading eigenvalues and eigenvectors of its covariance matrix.",
00056                         "This class can be used either to fit a Gaussian to data \n"
00057                         "or to explicitly represent a Gaussian with a covariance matrix \n"
00058                         "of the form C = VDV' (possibly regularized by adding gamma.I).\n"
00059                         "When fitting to data, an eigendecomposition of the empirical \n"
00060                         "covariance matrix is performed, and the top k eigenvalues\n"
00061                         "and associated eigenvectors V are kept.\n"
00062                         "The actual variances used for the principal directions in D are obtained\n"
00063                         "from the empirical or specified eigenvalues in the following way:\n"
00064                         "  var_i = max(eigenvalue_i+gamma, min_eig) \n"
00065                         "In addition, a variance for the remaining directions \n"
00066                         "in the null space of VDV' (directions orthogonal to the \n"
00067                         "eigenvectors in V) is obtained by:\n"
00068                         "  remaining_var = use_last_eig?max(last_eigenvalue+gamma, min_eig) \n"
00069                         "                              :max(gamma, min_eig) \n"
00070                         "So the full expression of the actual covariance matrix used is: \n"
00071                         "  C = VDV' + remaining_var.I \n"
00072                         "with D_ii = max(eigenvalue_i+gamma, min_eig) - remaining_var \n"
00073                         "Note that with min_eig=0 and use_last_eig=false, we get: \n"
00074                         "  C = V.diag(eigenvalues).V' + gamma.I \n");
00075 
00076 
00077 
00078 void GaussianDistribution::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00079 {
00080     inherited::makeDeepCopyFromShallowCopy(copies);
00081     deepCopyField(mu, copies);
00082     deepCopyField(covarmat, copies);
00083     deepCopyField(eigenvalues, copies);
00084     deepCopyField(eigenvectors, copies);
00085     deepCopyField(given_mu, copies);
00086 }
00087 
00088 
00089 GaussianDistribution::GaussianDistribution()
00090     :k(1000),
00091      gamma(0),
00092      min_eig(0),
00093      use_last_eig(false),
00094      ignore_weights_below(0)
00095 {
00096 }
00097 
00098 
00099 void GaussianDistribution::declareOptions(OptionList& ol)
00100 {
00101     // Build options
00102     declareOption(ol, "k", &GaussianDistribution::k, OptionBase::buildoption,
00103                   "number of eigenvectors to keep when training");
00104 
00105     declareOption(ol, "gamma", &GaussianDistribution::gamma, OptionBase::buildoption,
00106                   "Value to add to the empirical eigenvalues to obtain actual variance.\n");
00107     declareOption(ol, "min_eig", &GaussianDistribution::min_eig, OptionBase::buildoption,
00108                   "Imposes a minimum over the actual variances to be used.\n"
00109                   "Actual variance used in the principal directions is max(min_eig, eigenvalue_i+gamma)\n");
00110     declareOption(ol, "use_last_eig", &GaussianDistribution::use_last_eig, OptionBase::buildoption,
00111                   "If true, the actual variance used for directions in the nullspace of VDV' \n"
00112                   "(i.e. orthogonal to the kept eigenvectors) will be the same as the\n"
00113                   "actual variance used for the last principal direction. \n"
00114                   "If false, the actual variance used for directions in the nullspace \n"
00115                   "will be max(min_eig, gamma)\n");
00116 
00117     declareOption(ol, "ignore_weights_below", &GaussianDistribution::ignore_weights_below, OptionBase::buildoption | OptionBase::nosave,
00118                   "DEPRECATED: When doing a weighted fitting (weightsize==1), points with a weight below this value will be ignored");
00119 
00120     declareOption(ol, "given_mu", &GaussianDistribution::given_mu, OptionBase::buildoption,
00121                   "If this is set (i.e. not an empty vec), then train will not learn mu from the data, but simply copy its value given here.");
00122 
00123     declareOption(ol, "given_covarmat", &GaussianDistribution::given_covarmat, OptionBase::buildoption,
00124                   "If this is set (i.e. not an empty mat), then train will not learn covar from the data, but simply copy its value given here.");
00125 
00126     // Learnt options
00127     declareOption(ol, "mu", &GaussianDistribution::mu, OptionBase::learntoption, "");
00128     declareOption(ol, "covarmat", &GaussianDistribution::covarmat, OptionBase::learntoption, "");
00129     declareOption(ol, "eigenvalues", &GaussianDistribution::eigenvalues, OptionBase::learntoption, "");
00130     declareOption(ol, "eigenvectors", &GaussianDistribution::eigenvectors, OptionBase::learntoption, "");
00131 
00132     inherited::declareOptions(ol);
00133 }
00134 
00136 // declareMethods //
00138 void GaussianDistribution::declareMethods(RemoteMethodMap& rmm)
00139 {
00140     // Insert a backpointer to remote methods; note that this is
00141     // different than for declareOptions()
00142     rmm.inherited(inherited::_getRemoteMethodMap_());
00143 
00144     declareMethod(
00145         rmm, "computeEigenDecomposition", &GaussianDistribution::computeEigenDecomposition,
00146         (BodyDoc("Compute eigenvectors and corresponding eigenvalues.\n")));
00147 }
00148 
00150 // build //
00152 void GaussianDistribution::build()
00153 {
00154     inherited::build();
00155     build_();
00156 }
00157 
00159 // build_ //
00161 void GaussianDistribution::build_()
00162 {
00163     if (!fast_exact_is_equal(ignore_weights_below, 0))
00164         PLERROR("In GaussianDistribution::build_ - For the sake of simplicity, the "
00165                 "option 'ignore_weights_below' in GaussianDistribution has been "
00166                 "removed. If you were using it, please feel free to complain.");
00167     if (mu.length()>0 && predicted_size<=0)
00168     {
00169         predicted_size = mu.length();
00170         inherited::build();
00171     }
00172 }
00173 
00174 void GaussianDistribution::forget()
00175 { }
00176 
00177 void GaussianDistribution::train()
00178 {
00179     VMat training_set = getTrainingSet();
00180     int d = training_set.width();
00181     int ws = training_set->weightsize();
00182 
00183     if(d != inputsize()+ws)
00184         PLERROR("In GaussianDistribution::train width of training_set should be equal to inputsize()+weightsize()");
00185 
00186     // First get mean and covariance
00187     if(given_mu.length()>0)
00188     { // we have a fixed given_mu
00189         PLASSERT(given_covarmat.length()==0);
00190         d = given_mu.length();
00191         mu.resize(d);
00192         mu << given_mu;
00193         if(ws==0)
00194             computeCovar(training_set, mu, covarmat);
00195         else if(ws==1)
00196             computeInputCovar(training_set, mu, covarmat);
00197         else
00198             PLERROR("In GaussianDistribution, weightsize can only be 0 or 1");
00199     }
00200     else if(given_covarmat.length()>0)
00201     {
00202         d=given_covarmat.length();
00203         PLASSERT(d==given_covarmat.width());
00204         covarmat.resize(d,d);
00205         covarmat << given_covarmat;
00206         if(ws==0)
00207             computeMean(training_set, mu);
00208         else if(ws==1)
00209             computeInputMean(training_set, mu);
00210         else
00211             PLERROR("In GaussianDistribution, weightsize can only be 0 or 1");
00212        
00213     }
00214     else
00215     {
00216         if(ws==0)
00217             computeMeanAndCovar(training_set, mu, covarmat);
00218         else if(ws==1)
00219             computeInputMeanAndCovar(training_set, mu, covarmat);
00220         else
00221             PLERROR("In GaussianDistribution, weightsize can only be 0 or 1");
00222     }
00223 
00224     computeEigenDecomposition();
00225 }
00226 
00227 void GaussianDistribution::computeEigenDecomposition()
00228 {
00229     VMat training_set = getTrainingSet();
00230     int l = training_set.length();
00231     int d = training_set.width();
00232     int maxneigval = min(k, min(l,d));  // The maximum number of eigenvalues we want.
00233 
00234     // Compute eigendecomposition only if there is a training set...
00235     // Otherwise, just empty the eigen-* matrices
00236     static Mat covarmat_tmp;
00237     if (l>0 && maxneigval>0)
00238     {
00239         // On copie covarmat car cette matrice est detruite par la fonction eigenVecOfSymmMat
00240         covarmat_tmp = covarmat.copy();
00241         eigenVecOfSymmMat(covarmat_tmp, maxneigval, eigenvalues, eigenvectors, (verbosity>=4));
00242         int neig = 0;
00243         while(neig<eigenvalues.length() && eigenvalues[neig]>0.)
00244             neig++;
00245         eigenvalues.resize(neig);
00246         eigenvectors.resize(neig,mu.length());
00247     }
00248     else
00249     {
00250         eigenvalues.resize(0);
00251         eigenvectors.resize(0, mu.length());
00252     }
00253 }
00254 
00255 real GaussianDistribution::log_density(const Vec& x) const
00256 {
00257     static Vec actual_eigenvalues;
00258 
00259     if(min_eig<=0 && !use_last_eig)
00260         return logOfCompactGaussian(x, mu, eigenvalues, eigenvectors, gamma, true);
00261     else
00262     {
00263         int neig = eigenvalues.length();
00264         real remaining_eig = 0; // variance for directions in null space
00265         actual_eigenvalues.resize(neig);
00266         for(int j=0; j<neig; j++)
00267             actual_eigenvalues[j] = max(eigenvalues[j]+gamma, min_eig);
00268         if(use_last_eig)
00269             remaining_eig = actual_eigenvalues[neig-1];
00270         else
00271             remaining_eig = max(gamma, min_eig);
00272         return logOfCompactGaussian(x, mu, actual_eigenvalues, eigenvectors, remaining_eig);
00273     }
00274 }
00275 
00276 void GaussianDistribution::generate(Vec& x) const
00277 {
00278     static Vec r;
00279     int neig = eigenvalues.length();
00280     int m = mu.length();
00281     r.resize(neig);
00282 
00283     real remaining_eig = 0;
00284     if(use_last_eig)
00285         remaining_eig = max(eigenvalues[neig-1]+gamma, min_eig);
00286     else
00287         remaining_eig = max(gamma, min_eig);
00288 
00289     random_gen->fill_random_normal(r);
00290     for(int i=0; i<neig; i++)
00291     {
00292         real neweig = max(eigenvalues[i]+gamma, min_eig)-remaining_eig;
00293         r[i] *= sqrt(neweig);
00294     }
00295     x.resize(m);
00296     transposeProduct(x,eigenvectors,r);
00297     if(remaining_eig>0.)
00298     {
00299         r.resize(m);
00300         random_gen->fill_random_normal(r,0,sqrt(remaining_eig));
00301         x += r;
00302     }
00303     x += mu;
00304 }
00305 
00307 // inputsize //
00309 int GaussianDistribution::inputsize() const {
00310     if (train_set || mu.length() == 0)
00311         return inherited::inputsize();
00312     return mu.length();
00313 }
00314 
00315 } // end of namespace PLearn
00316 
00317 
00318 /*
00319   Local Variables:
00320   mode:c++
00321   c-basic-offset:4
00322   c-file-style:"stroustrup"
00323   c-file-offsets:((innamespace . 0)(inline-open . 0))
00324   indent-tabs-mode:nil
00325   fill-column:79
00326   End:
00327 */
00328 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines