PLearn 0.1
PCA.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PCA.cc
00004 //
00005 // Copyright (C) 2003  Pascal Vincent 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: PCA.cc 8184 2007-10-15 20:09:46Z nouiz $ 
00037  ******************************************************* */
00038 
00039 #define PL_LOG_MODULE_NAME "PCA"
00040 
00042 #include "PCA.h"
00043 #include <plearn/io/pl_log.h>
00044 #include <plearn/vmat/CenteredVMatrix.h>
00045 #include <plearn/vmat/GetInputVMatrix.h>
00046 #include <plearn/math/plapack.h>
00047 #include <plearn/math/random.h>     
00048 #include <plearn/vmat/VMat_basic_stats.h>
00049 
00050 namespace PLearn {
00051 using namespace std;
00052 
00053 PCA::PCA() 
00054     : _oldest_observation(-1),
00055       algo("classical"),
00056       _horizon(-1),
00057       ncomponents(2),
00058       sigmasq(0),
00059       normalize(false),
00060       normalize_warning(true),
00061       impute_missing(false)
00062 { }
00063 
00064 PLEARN_IMPLEMENT_OBJECT(
00065     PCA, 
00066     "Performs a Principal Component Analysis preprocessing (projecting on the principal directions).",
00067     "This learner finds the empirical covariance matrix of the input part of\n"
00068     "the training data, and learns to project its input vectors along the\n"
00069     "principal eigenvectors of that matrix, optionally scaling by the inverse\n"
00070     "of the square root of the eigenvalues (to obtained 'sphered', i.e.\n"
00071     "Normal(0,I) data).\n"
00072     "\n"
00073     "Alternative EM algorithms are provided, that may be useful when there is\n"
00074     "a lot of data or the dimension is very high.\n"
00075     "\n"
00076     "Note that for the 'classical' algorithm, it is no longer an error to\n"
00077     "specify a number of components larger than the training set's inputsize;\n"
00078     "if this happens, the number of components is simply set to be the inputsize,\n"
00079     "and a warning message is output to the PCA named log\n"
00080     );
00081 
00082 void PCA::declareOptions(OptionList& ol)
00083 {
00084     declareOption(
00085         ol, "ncomponents", &PCA::ncomponents, OptionBase::buildoption,
00086         "The number of principal components to keep (that's also the outputsize).");
00087   
00088     declareOption(
00089         ol, "sigmasq", &PCA::sigmasq, OptionBase::buildoption,
00090         "This gets added to the diagonal of the covariance matrix prior to\n"
00091         "eigen-decomposition (classical algorighm only)");
00092   
00093     declareOption(
00094         ol, "normalize", &PCA::normalize, OptionBase::buildoption, 
00095         "If true, we divide by sqrt(eigenval) after projecting on the eigenvec.");
00096   
00097     declareOption(
00098         ol, "algo", &PCA::algo, OptionBase::buildoption,
00099         "The algorithm used to perform the Principal Component Analysis:\n"
00100         "- 'classical'   : compute the eigenvectors of the covariance matrix\n"
00101         "  \n"
00102         "- 'incremental' : Uses the classical algorithm but computes the\n"
00103         "                  covariance matrix in an incremental manner. When\n"
00104         "                  'incremental' is used, a new training set is\n"
00105         "                  assumed to be a superset of the old training set,\n"
00106         "                  i.e. begining with the rows of the old training\n"
00107         "                  set but ending with some new rows.\n"
00108         "\n"
00109         "- 'em'          : EM algorithm from \"EM algorithms for PCA and\n"
00110         "                  SPCA\" by S. Roweis\n"
00111         "\n"
00112         "- 'em_orth'     : a variant of 'em', where orthogonal components\n"
00113         "                  are directly computed\n");
00114 
00115     declareOption(
00116         ol, "horizon", &PCA::_horizon, OptionBase::buildoption,
00117         "Incremental algorithm option: This option specifies a window over\n"
00118         "which the PCA should be done. That is, if the length of the training\n"
00119         "set is greater than 'horizon', the observations that will effectively\n"
00120         "contribute to the covariance matrix will only be the last 'horizon'\n"
00121         "ones. All negative values being interpreted as 'keep all observations'.\n"
00122         "\n"
00123         "Default: -1 (all observations are kept)" );
00124   
00125     // TODO Option added October 26th, 2004. Should be removed in a few months.
00126     declareOption(
00127         ol, "normalize_warning", &PCA::normalize_warning, OptionBase::buildoption, 
00128         "(Temp. option). If true, display a warning about the 'normalize' option.");
00129 
00130     declareOption(
00131         ol, "impute_missing", &PCA::impute_missing,
00132         OptionBase::buildoption,
00133         "If true, if a missing value is encountered on an input variable\n"
00134         "for a computeOutput, it is replaced by the estimated mu for that\n"
00135         "variable before projecting on the principal components\n");
00136     
00137     // learnt options
00138     declareOption(
00139         ol, "mu", &PCA::mu, OptionBase::learntoption,
00140         "The (weighted) mean of the samples");
00141 
00142     declareOption(
00143         ol, "eigenvals", &PCA::eigenvals, OptionBase::learntoption,
00144         "The ncomponents eigenvalues corresponding to the principal directions kept");
00145 
00146     declareOption(
00147         ol, "eigenvecs", &PCA::eigenvecs, OptionBase::learntoption,
00148         "A ncomponents x inputsize matrix containing the principal eigenvectors");
00149   
00150     // Now call the parent class' declareOptions
00151     inherited::declareOptions(ol);
00152 
00153     declareOption(
00154         ol, "oldest_observation", &PCA::_oldest_observation,
00155         OptionBase::learntoption,
00156         "Incremental algo:\n"
00157         "The first time values are fed to _incremental_stats, we must remember\n"
00158         "the first observation in order not to remove observation that never\n"
00159         "contributed to the covariance matrix.\n"
00160         "\n"
00161         "Initialized to -1;" );
00162 }
00163 
00165 // build //
00167 void PCA::build()
00168 {
00169     inherited::build();
00170     build_();
00171 }
00172 
00174 // build_ //
00176 void PCA::build_()
00177 {
00178     if (normalize_warning)
00179         PLWARNING("In PCA - The default value for option 'normalize' is now 0 instead of 1. Make sure you did not rely on this default value,"
00180                   "and set the 'normalize_warning' option to 0 to remove this warning");
00181 
00182     if ( algo == "incremental" )
00183     {    
00184         _incremental_stats.compute_covariance  = true;
00185         _incremental_stats.no_removal_warnings = true;
00186     }
00187 }
00188 
00190 // computeCostsFromOutputs //
00192 void PCA::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00193                                   const Vec& target, Vec& costs) const
00194 {
00195     static Vec reconstructed_input;
00196     reconstruct(output, reconstructed_input);
00197     costs.resize(1);
00198     costs[0] = powdistance(input, reconstructed_input);
00199 }                                
00200 
00202 // computeOutput //
00204 void PCA::computeOutput(const Vec& input, Vec& output) const
00205 {
00206     static Vec x;
00207     x.resize(input.length());
00208     x << input;
00209 
00210     // Perform missing-value imputation if requested
00211     if (impute_missing)
00212         for (int i=0, n=x.size() ; i<n ; ++i)
00213             if (is_missing(x[i]))
00214                 x[i] = mu[i];
00215                 
00216     // Project on eigenvectors
00217     x -= mu;
00218     output.resize(ncomponents);
00219 
00220     if(normalize)
00221     {
00222         for(int i=0; i<ncomponents; i++)
00223             output[i] = dot(x,eigenvecs(i)) / sqrt(eigenvals[i]);
00224     }
00225     else
00226     {
00227         for(int i=0; i<ncomponents; i++)
00228             output[i] = dot(x,eigenvecs(i));
00229     }
00230 }    
00231 
00233 // setTrainingSet //
00235 
00236 void PCA::setTrainingSet( VMat training_set, bool call_forget )
00237 {
00238     inherited::setTrainingSet( training_set, call_forget );
00239 
00240     // Even if call_forget is false, the classical PCA algorithm must start
00241     // from scratch if the dataset changed. If call_forget is true, forget
00242     // was already called by the inherited::setTrainingSet
00243     if ( !call_forget && algo == "classical" )
00244         forget();
00245   
00246     if ( algo == "incremental" )
00247         nstages = training_set.length();
00248 }
00249 
00250 
00252 // forget //
00254 void PCA::forget()
00255 {
00256     stage           = 0;
00257 
00258     if ( algo == "incremental" )
00259     {
00260         _incremental_stats.forget();
00261         _oldest_observation = -1;
00262     }
00263 }
00264 
00266 // getTestCostNames //
00268 TVec<string> PCA::getTestCostNames() const
00269 {
00270     return TVec<string>(1,"squared_reconstruction_error");
00271 }
00272 
00274 // getTrainCostNames //
00276 TVec<string> PCA::getTrainCostNames() const
00277 {
00278     return TVec<string>();
00279 }
00280 
00282 // makeDeepCopyFromShallowCopy //
00284 void PCA::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00285 {
00286     inherited::makeDeepCopyFromShallowCopy(copies);
00287     deepCopyField(mu, copies);
00288     deepCopyField(eigenvals, copies);
00289     deepCopyField(eigenvecs, copies);
00290 }
00291 
00292 
00294 // outputsize //
00296 int PCA::outputsize() const
00297 {
00298     return ncomponents;
00299 }
00300 
00302 // train //
00304 
00305 void PCA::classical_algo( )
00306 {  
00307     if ( ncomponents > train_set->inputsize() ) {
00308         ncomponents = train_set->inputsize();
00309         IMP_MODULE_LOG
00310             << "PCA::train: You asked for " << ncomponents
00311             << "components, but the training set inputsize is only "
00312             << train_set->inputsize()
00313             << "; using " << train_set->inputsize() << " components"
00314             << endl;
00315     }
00316 
00317     PP<ProgressBar> pb;
00318     if (report_progress)
00319         pb = new ProgressBar("Training PCA", 2);
00320 
00321     Mat covarmat;
00322     computeInputMeanAndCovar(train_set, mu, covarmat, sigmasq);
00323     if (mu.hasMissing() || covarmat.hasMissing())
00324         PLERROR("PCA::classical_algo: missing values encountered in training set\n");
00325     if (pb)
00326         pb->update(1);
00327   
00328     eigenVecOfSymmMat(covarmat, ncomponents, eigenvals, eigenvecs);
00329     if (pb)
00330         pb->update(2);
00331 
00332     stage += 1;
00333 }
00334 
00335 void PCA::incremental_algo()
00336 {
00337     PP<ProgressBar> pb;
00338     if (report_progress)
00339         pb = new ProgressBar("Incremental PCA", 2);
00340 
00345     int start = stage;
00346     if ( stage == 0 && _horizon > 0 )
00347     {
00348         int window_start = train_set.length() - _horizon;
00349         start = window_start > 0 ? window_start : 0;
00350     }
00351 
00352     /*
00353       The first time values are fed to _incremental_stats, we must remember
00354       the first observation in order not to remove observation that never
00355       contributed to the covariance matrix.
00356 
00357       See the following 'if ( old >= oldest_observation )' statement.
00358     */
00359     if ( _oldest_observation == -1 )
00360         _oldest_observation = start;
00361     PLASSERT( _horizon <= 0 || (start-_horizon) <= _oldest_observation ); 
00362   
00363     Vec observation;
00364     for ( int obs=start; obs < train_set.length(); obs++ )
00365     {
00366         observation.resize( train_set.width() );
00367 
00368         // Stores the new observation
00369         observation << train_set( obs );
00370         if (observation.hasMissing())
00371             PLERROR("PCA::incremental_algo: missing values encountered in training set\n");
00372       
00373         // This adds the contribution of the new observation
00374         _incremental_stats.update( observation );
00375       
00376         if ( _horizon > 0 &&
00377              (obs - _horizon) == _oldest_observation )
00378         {
00379             // Stores the old observation
00380             observation << train_set( _oldest_observation );
00381         
00382             // This removes the contribution of the old observation
00383             _incremental_stats.remove_observation( observation );
00384             _oldest_observation++;
00385         }
00386     }
00387 
00388     if (pb)
00389         pb->update(1);
00390       
00391     // Recomputes the eigenvals and eigenvecs from the updated
00392     // incremental statistics
00393     mu           =  _incremental_stats.getMean();
00394     Mat covarmat =  _incremental_stats.getCovariance();
00395     eigenVecOfSymmMat( covarmat, ncomponents, eigenvals, eigenvecs );      
00396 
00397     if (pb)
00398         pb->update(2);
00399   
00400     // Remember the number of observation
00401     stage = train_set.length();
00402 }
00403 
00404 // Here, I just copied the ... content of the if ( algo == "em" ) { ... }
00405 // that you could find in train() before... Obviously, there is still some
00406 // clean up to do.
00407 void PCA::em_algo()
00408 {
00409     PP<ProgressBar> pb;
00410 
00411     int n = train_set->length();
00412     int p = train_set->inputsize();
00413     int k = ncomponents;
00414   
00415     // Fill the matrix C with random data.
00416     Mat C(k,p);
00417 
00418     fill_random_normal(C);
00419     // Center the data.
00420     VMat centered_data = new CenteredVMatrix(new GetInputVMatrix(train_set));
00421     Vec sample_mean = static_cast<CenteredVMatrix*>((VMatrix*) centered_data)->getMu();
00422     mu.resize(sample_mean.length());
00423     mu << sample_mean;
00424     Mat Y = centered_data.toMat();
00425     Mat X(n,k);
00426     Mat tmp_k_k(k,k);
00427     Mat tmp_k_k_2(k,k);
00428     Mat tmp_p_k(p,k);
00429     Mat tmp_k_n(k,n);
00430     // Iterate through EM.
00431     if (report_progress)
00432         pb = new ProgressBar("Training EM PCA", nstages - stage);
00433     int init_stage = stage;
00434     while (stage < nstages) {
00435         // E-step: X <- Y C' (C C')^-1
00436         productTranspose(tmp_k_k, C, C);
00437         matInvert(tmp_k_k, tmp_k_k_2);
00438         transposeProduct(tmp_p_k, C, tmp_k_k_2);
00439         product(X, Y, tmp_p_k);
00440         // M-step: C <- (X' X)^-1 X' Y
00441         transposeProduct(tmp_k_k, X, X);
00442         matInvert(tmp_k_k, tmp_k_k_2);
00443         productTranspose(tmp_k_n, tmp_k_k_2, X);
00444         product(C, tmp_k_n, Y);
00445         stage++;
00446         if (report_progress)
00447             pb->update(stage - init_stage);
00448     }
00449     // Compute the orthonormal projection matrix.
00450     int n_base = GramSchmidtOrthogonalization(C);
00451     if (n_base != k) {
00452         PLWARNING("In PCA::train - The rows of C are not linearly independent");
00453     }
00454     // Compute the projected data.
00455     productTranspose(X, Y, C);
00456     // And do a PCA to get the eigenvectors and eigenvalues.
00457     PCA true_pca;
00458     VMat proj_data(X);
00459     true_pca.ncomponents = k;
00460     true_pca.normalize = 0;
00461     true_pca.setTrainingSet(proj_data);
00462     true_pca.train();
00463     // Transform back eigenvectors to input space.
00464     eigenvecs.resize(k, p);
00465     product(eigenvecs, true_pca.eigenvecs, C);
00466     eigenvals.resize(k);
00467     eigenvals << true_pca.eigenvals;
00468 }
00469 
00470 // Here, I just copied the ... content of the if ( algo == "em" ) { ... }
00471 // that you could find in train() before... Obviously, there is still some
00472 // clean up to do.
00473 void PCA::em_orth_algo()
00474 {
00475     PP<ProgressBar> pb;
00476   
00477     int n = train_set->length();
00478     int p = train_set->inputsize();
00479     int k = ncomponents;
00480     // Fill the matrix C with random data.
00481     Mat C(k,p);
00482     fill_random_normal(C);
00483     // Ensure it is orthonormal.
00484     GramSchmidtOrthogonalization(C);
00485     // Center the data.
00486     VMat centered_data = new CenteredVMatrix(new GetInputVMatrix(train_set));
00487     Vec sample_mean = static_cast<CenteredVMatrix*>((VMatrix*) centered_data)->getMu();
00488     mu.resize(sample_mean.length());
00489     mu << sample_mean;
00490     Mat Y = centered_data.toMat();
00491     Mat Y_copy(n,p);
00492     Mat X(n,k);
00493     Mat tmp_k_k(k,k);
00494     Mat tmp_k_k_2(k,k);
00495     Mat tmp_p_k(p,k);
00496     Mat tmp_k_n(k,n);
00497     Mat tmp_n_1(n,1);
00498     Mat tmp_n_p(n,p);
00499     Mat X_j, C_j;
00500     Mat x_j_prime_x_j(1,1);
00501     // Iterate through EM.
00502     if (report_progress)
00503         pb = new ProgressBar("Training EM PCA", nstages - stage);
00504     int init_stage = stage;
00505     Y_copy << Y;
00506     while (stage < nstages) {
00507         Y << Y_copy;
00508         for (int j = 0; j < k; j++) {
00509             C_j = C.subMatRows(j, 1);
00510             X_j = X.subMatColumns(j,1);
00511             // E-step: X_j <- Y C_j'
00512             productTranspose(X_j, Y, C_j);
00513             // M-step: C_j <- (X_j' X_j)^-1 X_j' Y
00514             transposeProduct(x_j_prime_x_j, X_j, X_j);
00515             transposeProduct(C_j, X_j, Y);
00516             C_j /= x_j_prime_x_j(0,0);
00517             // Normalize the new direction.
00518             PLearn::normalize(C_j, 2.0);
00519             // Subtract the component along this new direction, so as to
00520             // obtain orthogonal directions.
00521             productTranspose(tmp_n_1, Y, C_j);
00522             negateElements(Y);
00523             productAcc(Y, tmp_n_1, C_j);
00524             negateElements(Y);
00525         }
00526         stage++;
00527         if (report_progress)
00528             pb->update(stage - init_stage);
00529     }
00530     // Check orthonormality of C.
00531     for (int i = 0; i < k; i++) {
00532         for (int j = i; j < k; j++) {
00533             real dot_i_j = dot(C(i), C(j));
00534             if (i != j) {
00535                 if (abs(dot_i_j) > 1e-6) {
00536                     PLWARNING("In PCA::train - It looks like some vectors are not orthogonal");
00537                 }
00538             } else {
00539                 if (abs(dot_i_j - 1) > 1e-6) {
00540                     PLWARNING("In PCA::train - It looks like a vector is not normalized");
00541                 }
00542             }
00543         }
00544     }
00545     // Compute the projected data.
00546     Y << Y_copy;
00547     productTranspose(X, Y, C);
00548     // Compute the empirical variance on each projected axis, in order
00549     // to obtain the eigenvalues.
00550     VMat X_vm(X);
00551     Vec mean_proj, var_proj;
00552     computeMeanAndVariance(X_vm, mean_proj, var_proj);
00553     eigenvals.resize(k);
00554     eigenvals << var_proj;
00555     // Copy the eigenvectors.
00556     eigenvecs.resize(k, p);
00557     eigenvecs << C;
00558 }
00559 
00560 void PCA::train()
00561 {
00562     if ( stage < nstages )
00563     {
00564         if ( algo == "classical" )
00565             classical_algo( );
00566 
00567         else if( algo == "incremental" )
00568             incremental_algo();
00569     
00570         else if ( algo == "em" )
00571             em_algo();
00572 
00573         else if ( algo == "em_orth" )
00574             em_orth_algo( );
00575 
00576         else
00577             PLERROR("In PCA::train - Unknown value for 'algo'");    
00578     }
00579 
00580     else 
00581         PLWARNING("In PCA::train - The learner has already been train, skipping training");
00582 }
00583 
00584 
00586 // reconstruct //
00588 void PCA::reconstruct(const Vec& output, Vec& input) const
00589 {
00590     input.resize(mu.length());
00591     input << mu;
00592 
00593     int n = output.length();
00594     if(normalize)
00595     {
00596         for(int i=0; i<n; i++)
00597             multiplyAcc(input, eigenvecs(i), output[i]*sqrt(eigenvals[i]));
00598     }
00599     else
00600     {
00601         for(int i=0; i<n; i++)
00602             multiplyAcc(input, eigenvecs(i), output[i]);
00603     }
00604 }
00605 
00606 } // end of namespace PLearn
00607 
00608 
00609 /*
00610   Local Variables:
00611   mode:c++
00612   c-basic-offset:4
00613   c-file-style:"stroustrup"
00614   c-file-offsets:((innamespace . 0)(inline-open . 0))
00615   indent-tabs-mode:nil
00616   fill-column:79
00617   End:
00618 */
00619 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines