PLearn 0.1
WPLS.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // WPLS.cc
00004 //
00005 // Copyright (C) 2004 Olivier Delalleau 
00006 // 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************      
00036  * $Id: WPLS.cc 5370 2006-04-12 15:27:55Z tihocan $ 
00037  ******************************************************* */
00038 
00039 // Authors: Olivier Delalleau, Charles Dugas
00040 
00043 #include "WPLS.h"
00044 #include <plearn/math/plapack.h>
00045 #include <plearn/vmat/ShiftAndRescaleVMatrix.h>
00046 #include <plearn/vmat/SubVMatrix.h>
00047 #include <plearn/math/TMat_maths.h>    
00048 #include <plearn/math/pl_math.h>       // !< for isnan
00049 #include <plearn/vmat/VMat_linalg.h>
00050 #include <plearn/io/load_and_save.h>
00051 #include <plearn_learners/generic/VPLCombinedLearner.h>
00052 #include <plearn_learners/generic/VPLPreprocessedLearner2.h>
00053 
00054 namespace PLearn {
00055 using namespace std;
00056 
00057 WPLS::WPLS() 
00058     : m(-1),
00059       p(-1),
00060       w(0),
00061       method("kernel"),
00062       precision(1e-6),
00063       output_the_score(0),
00064       output_the_target(1),
00065       parent_filename("tmp"),
00066       parent_sub(0)
00067 {}
00068 
00069 PLEARN_IMPLEMENT_OBJECT(WPLS,
00070                         "Partial Least Squares Regression (WPLSR).",
00071                         "You can use this learner to perform regression, and / or dimensionality\n"
00072                         "reduction.\n"
00073                         "WPLS regression assumes the target Y and the data X are linked through:\n"
00074                         " Y = T.Q' + E\n"
00075                         " X = T.P' + F\n"
00076                         "The underlying coefficients T (the 'scores') and the loading matrices\n"
00077                         "Q and P are seeked. It is then possible to compute the prediction y for\n"
00078                         "a new input x, as well as its score vector t (its representation in\n"
00079                         "lower-dimensional coordinates).\n"
00080                         "The available algorithms to perform WPLS (chosen by the 'method' option) are:\n"
00081                         "\n"
00082                         " ====  WPLS1  ====\n"
00083                         "The classical WPLS algorithm, suitable only for a 1-dimensional target. The\n"
00084                         "following algorithm is taken from 'Factor Analysis in Chemistry', with an\n"
00085                         "additional loop that (I believe) was missing:\n"
00086                         " (1) Let X (n x p) = the centered and normalized input data\n"
00087                         "     Let y (n x 1) = the centered and normalized target data\n"
00088                         "     Let k be the number of components extracted\n"
00089                         " (2) s = y\n"
00090                         " (3) lx' = s' X, s = X lx (normalized)\n"
00091                         " (4) If s has changed by more than 'precision', loop to (3)\n"
00092                         " (5) ly = s' y\n"
00093                         " (6) lx' = s' X\n"
00094                         " (7) Store s, lx and ly in the columns of respectively T, P and Q\n"
00095                         " (8) X = X - s lx', y = y - s ly, loop to (2) k times\n"
00096                         " (9) Set W = (T P')^(+) T, where the ^(+) is the right pseudoinverse\n"
00097                         "\n"
00098                         " ==== Kernel ====\n"
00099                         "The code implements a NIPALS-WPLS-like algorithm, which is a so-called\n"
00100                         "'kernel' algorithm (faster than more classical implementations).\n"
00101                         "The algorithm, inspired from 'Factor Analysis in Chemistry' and above all\n"
00102                         "www.statsoftinc.com/textbook/stwpls.html, is the following:\n"
00103                         " (1) Let X (n x p) = the centered and normalized input data\n"
00104                         "     Let Y (n x m) = the centered and normalized target data\n"
00105                         "     Let k be the number of components extracted\n"
00106                         " (2) Initialize A_0 = X'Y, M_0 = X'X, C_0 = Identity(p), and h = 0\n"
00107                         " (3) q_h = largest eigenvector of B_h = A_h' A_h, found by the NIPALS method:\n"
00108                         "       (3.a) q_h = a (normalized) randomn column of B_h\n"
00109                         "       (3.b) q_h = B_h q_h\n"
00110                         "       (3.c) normalize q_h\n"
00111                         "       (3.d) if q_h has changed by more than 'precision', go to (b)\n"
00112                         " (4) w_h = C_h A_h q_h, normalize w_h and store it in a column of W (p x k)\n"
00113                         " (5) p_h = M_h w_h, c_h = w_h' p_h, p_h = p_h / c_h and store it in a column\n"
00114                         "     of P (p x k)\n"
00115                         " (6) q_h = A_h' w_h / c_h, and store it in a column of Q (m x k)\n"
00116                         " (7) A_h+1 = A_h - c_h p_h q_h'\n"
00117                         "     M_h+1 = M_h - c_h p_h p_h',\n"
00118                         "     C_h+1 = C_h - w_h p_h\n"
00119                         " (8) h = h+1, and if h < k, go to (3)\n"
00120                         "\n"
00121                         "The result is then given by:\n"
00122                         " - Y = X B, with B (p x m) = W Q'\n"
00123                         " - T = X W, where T is the score (reduced coordinates)\n"
00124                         "\n"
00125                         "You can choose to have the score (T) and / or the target (Y) in the output\n"
00126                         "of the learner (default is target only, i.e. regression)."
00127     );
00128 
00130 // declareOptions //
00132 void WPLS::declareOptions(OptionList& ol)
00133 {
00134     // Build options.
00135 
00136     declareOption(ol, "method", &WPLS::method, OptionBase::buildoption,
00137                   "The WPLS algorithm used ('wpls1' or 'kernel', see help for more details).\n");
00138 
00139     declareOption(ol, "output_the_score", &WPLS::output_the_score, OptionBase::buildoption,
00140                   "If set to 1, then the score (the low-dimensional representation of the input)\n"
00141                   "will be included in the output (before the target).");
00142 
00143     declareOption(ol, "output_the_target", &WPLS::output_the_target, OptionBase::buildoption,
00144                   "If set to 1, then (the prediction of) the target will be included in the\n"
00145                   "output (after the score).");
00146 
00147     declareOption(ol, "parent_filename", &WPLS::parent_filename, OptionBase::buildoption,
00148                   "For hyper-parameter selection purposes: use incremental learning to speed-up process");
00149     
00150     declareOption(ol, "parent_sub", &WPLS::parent_sub, OptionBase::buildoption,
00151                   "Tells which of the sublearners (of the combined learner) should be used.");
00152     
00153     declareOption(ol, "precision", &WPLS::precision, OptionBase::buildoption,
00154                   "The precision to which we compute the eigenvectors.");
00155 
00156 
00157     // Learnt options.
00158 
00159     declareOption(ol, "B", &WPLS::B, OptionBase::learntoption,
00160                   "The regression matrix in Y = X.B + E.");
00161 
00162     declareOption(ol, "m", &WPLS::m, OptionBase::learntoption,
00163                   "Used to store the target size.");
00164 
00165     declareOption(ol, "mean_input", &WPLS::mean_input, OptionBase::learntoption,
00166                   "The mean of the input data X.");
00167 
00168     declareOption(ol, "mean_target", &WPLS::mean_target, OptionBase::learntoption,
00169                   "The mean of the target data Y.");
00170 
00171     declareOption(ol, "p", &WPLS::p, OptionBase::learntoption,
00172                   "Used to store the input size.");
00173 
00174     declareOption(ol, "P", &WPLS::P, OptionBase::learntoption,
00175                   "Matrix that maps features to observed inputs: X = T.P' + E.");
00176 
00177     declareOption(ol, "Q", &WPLS::Q, OptionBase::learntoption,
00178                   "Matrix that maps features to observed outputs: Y = T.P' + F.");
00179 
00180     declareOption(ol, "stddev_input", &WPLS::stddev_input, OptionBase::learntoption,
00181                   "The standard deviation of the input data X.");
00182 
00183     declareOption(ol, "stddev_target", &WPLS::stddev_target, OptionBase::learntoption,
00184                   "The standard deviation of the target data Y.");
00185 
00186     declareOption(ol, "w", &WPLS::p, OptionBase::learntoption,
00187                   "Used to store the weight size (0 or 1).");
00188     
00189     declareOption(ol, "W", &WPLS::W, OptionBase::learntoption,
00190                   "The regression matrix in T = X.W.");
00191     
00192     
00193 
00194     
00195     // Now call the parent class' declareOptions
00196     inherited::declareOptions(ol);
00197 }
00198 
00200 // build //
00202 void WPLS::build()
00203 {
00204     inherited::build();
00205     build_();
00206 }
00207 
00209 // build_ //
00211 void WPLS::build_()
00212 {
00213     PLASSERT(precision > 0);
00214 
00215     if (train_set) {
00216         this->m = train_set->targetsize();
00217         this->p = train_set->inputsize();
00218         this->w = train_set->weightsize();
00219         // Check method consistency.
00220         if (method == "wpls1") {
00221             // Make sure the target is 1-dimensional.
00222             if (m != 1) {
00223                 PLERROR("In WPLS::build_ - With the 'wpls1' method, target should be 1-dimensional");
00224             }
00225         } else if (method == "kernel") {
00226             PLERROR("In WPLS::build_ - option 'method=kernel' not implemented yet");
00227         } else {
00228             PLERROR("In WPLS::build_ - Unknown value for option 'method'");
00229         }
00230     }
00231     if (!output_the_score && !output_the_target) {
00232         // Weird, we don't want any output ??
00233         PLWARNING("In WPLS::build_ - There will be no output");
00234     }
00235 }
00236 
00238 // computeCostsFromOutputs //
00240 void WPLS::computeCostsFromOutputs(const Vec& input, const Vec& output, 
00241                                   const Vec& target, Vec& costs) const
00242 {
00243     costs.resize(1);
00244     costs[0] = powdistance(output,target,2.0);
00245 // No cost computed.
00246 }
00247 
00249 // computeOutput //
00251 void WPLS::computeOutput(const Vec& input, Vec& output) const
00252 {
00253     static Vec input_copy;
00254     if (W.width()==0)
00255         PLERROR("WPLS::computeOutput but model was not trained!");
00256     // Compute the output from the input
00257     int nout = outputsize();
00258     output.resize(nout);
00259     // First normalize the input.
00260     input_copy.resize(this->p);
00261     input_copy << input;
00262     input_copy -= mean_input;
00263     input_copy /= stddev_input;
00264     int target_start = 0;
00265     if (output_the_score) {
00266         transposeProduct(output.subVec(0, this->nstages), W, input_copy);
00267         target_start = this->nstages;
00268     }
00269     if (output_the_target) {
00270         if (this->m > 0) {
00271             Vec target = output.subVec(target_start, this->m);
00272             transposeProduct(target, B, input_copy);
00273             target *= stddev_target;
00274             target += mean_target;
00275         } else {
00276             // This is just a safety check, since it should never happen.
00277             PLWARNING("In WPLS::computeOutput - You ask to output the target but the target size is <= 0");
00278         }
00279     }
00280 }
00281 
00283 // forget //
00285 void WPLS::forget()
00286 {
00287     stage= 0;
00288     // Free memory.
00289     B = Mat();
00290     W = Mat();
00291     P = Mat();
00292     Q = Mat();
00293 }
00294 
00296 // getTestCostNames //
00298 TVec<string> WPLS::getTestCostNames() const
00299 {
00300     // No cost computed.
00301     TVec<string> t;
00302     t.append("mse");
00303     return t;
00304 }
00305 
00307 // getTrainCostNames //
00309 TVec<string> WPLS::getTrainCostNames() const
00310 {
00311     // No cost computed.
00312     TVec<string> t;
00313     return t;
00314 }
00315 
00317 // makeDeepCopyFromShallowCopy //
00319 void WPLS::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00320 {
00321     inherited::makeDeepCopyFromShallowCopy(copies);
00322 
00323     // ### Call deepCopyField on all "pointer-like" fields 
00324     // ### that you wish to be deepCopied rather than 
00325     // ### shallow-copied.
00326     // ### ex:
00327     deepCopyField(B, copies);
00328     deepCopyField(mean_input, copies);
00329     deepCopyField(mean_target, copies);
00330     deepCopyField(stddev_input, copies);
00331     deepCopyField(stddev_target, copies);
00332     deepCopyField(W, copies);
00333     deepCopyField(P, copies);
00334     deepCopyField(Q, copies);
00335 }
00336 
00338 // NIPALSEigenvector //
00340 void WPLS::NIPALSEigenvector(const Mat& m, Vec& v, real precision) {
00341     int n = v.length();
00342     Vec wtmp(n);
00343     v << m.column(0);
00344     normalize(v, 2.0);
00345     bool ok = false;
00346     while (!ok) {
00347         wtmp << v;
00348         product(v, m, wtmp);
00349         normalize(v, 2.0);
00350         ok = true;
00351         for (int i = 0; i < n && ok; i++) {
00352             if (fabs(v[i] - wtmp[i]) > precision) {
00353                 ok = false;
00354             }
00355         }
00356     }
00357 }
00358 
00360 // outputsize //
00362 int WPLS::outputsize() const
00363 {
00364     int os = 0;
00365     if (output_the_score) {
00366         os += this->nstages;
00367     }
00368     if (output_the_target && m >= 0) {
00369         // If m < 0, this means we don't know yet the target size, thus we
00370         // shouldn't report it here.
00371         os += this->m;
00372     }
00373     return os;
00374 }
00375 
00376 // Unbiased estimators of mean and variance are (all sums taken over index i)
00377 // xbar = [ sum(wi*xi) ] / [ sum(wi) ]
00378 // var  = [ sum(wi*xi*xi) - xbar * xbar * sum(wi) ] / [ sum(wi)  - sum(wi*wi) / sum(wi) ] 
00379 void computeWeightedInputOutputMeansAndStddev(const VMat& d, Vec& means, Vec& stddev)
00380 {
00381     PLASSERT( d->inputsize() >= 0 );
00382     int n = d->length();
00383     int p = d->inputsize();
00384     int m = d->targetsize();
00385     means.resize(p+m);
00386     stddev.resize(p+m);
00387     Vec input(p+m), target(p+m);
00388     real weight;
00389     real sum_wi = 0.0;
00390     real sum_wi2 = 0.0;
00391     Vec sum_wixi(p+m), sum_wixi2(p+m);
00392     sum_wixi.fill(0.0);
00393     sum_wixi2.fill(0.0);
00394     for (int i = 0; i < n; i++) {
00395         d->getExample(i, input, target, weight);
00396         sum_wi += weight;
00397         sum_wi2 += weight * weight;
00398         for (int j = 0; j<p; j++) {  
00399             sum_wixi[j]  += weight*input[j];
00400             sum_wixi2[j] += weight*input[j]*input[j];
00401         }
00402         for (int j = 0; j<m; j++) {  
00403             sum_wixi[p+j]  += weight*target[j];
00404             sum_wixi2[p+j] += weight*target[j]*target[j];
00405         }
00406     }
00407   
00408     real adjust = sqrt( sum_wi - sum_wi2 /sum_wi );
00409     real xbar;
00410     real var;
00411     for (int j = 0; j<p+m; j++) 
00412     {
00413         xbar      = sum_wixi[j]/sum_wi;
00414         means[j]  = xbar; 
00415         //make sure we do sqrt of a number >= 0
00416         var= sum_wixi2[j] - xbar * xbar * sum_wi;
00417         if(var < 0.)
00418         {
00419             PLWARNING("In WPLS::computeWeightedInputOutputMeansAndStddev: var < 0; setting it to 0.");
00420             var= 0.;
00421         }
00422         stddev[j] = sqrt(var) / adjust;
00423         if (stddev[j] < 1e-10)
00424             stddev[j] = 1.0;
00425     }
00426 }
00427 
00428 void multiplyColumns(Mat& m, Vec& v)
00429 {
00430     int n = m.length();
00431     int p = m.width();
00432     real vi;
00433     if(v.length() != n)
00434         PLERROR("Matrix and vector lengths do not match");
00435     for(int i=0; i<n; i++) {
00436         vi = v[i];
00437         for(int j=0; j<p; j++)
00438             m(i,j) *= vi;
00439     }
00440 }
00441 
00443 // train //
00445 void WPLS::train()
00446 {
00447     if (stage == nstages) {
00448         // Already trained.
00449         if (verbosity >= 1)
00450             pout << "Skipping WPLS training" << endl;
00451         return;
00452     }
00453     if (verbosity >= 1)
00454         pout << "WPLS training started" << endl;
00455 
00456     int n    = train_set->length();
00457     int wlen = train_set->weightsize();
00458     VMat d = new SubVMatrix(train_set,0,0,train_set->length(), train_set->width());
00459     d->defineSizes(train_set->inputsize(), train_set->targetsize(), train_set->weightsize(), 0);
00460     Vec means, stddev;
00461     computeWeightedInputOutputMeansAndStddev(d, means, stddev);
00462     if (verbosity >= 2) {
00463         pout << "means = " << means << endl;
00464         pout << "stddev = " << stddev << endl;
00465     }
00466     normalize(d, means, stddev);
00467     mean_input  = means.subVec(0, p);
00468     mean_target = means.subVec(p, m);
00469     stddev_input  = stddev.subVec(0, p);
00470     stddev_target = stddev.subVec(p, m);
00471 
00472     Vec shift_input(p), scale_input(p), shift_target(m), scale_target(m);
00473     shift_input << mean_input;
00474     scale_input << stddev_input;
00475     shift_target << mean_target;
00476     scale_target << stddev_target;
00477     negateElements(shift_input);
00478     invertElements(scale_input);
00479     negateElements(shift_target);
00480     invertElements(scale_target);
00481 
00482     VMat input_part = new SubVMatrix(train_set,
00483                                      0, 0,
00484                                      train_set->length(),
00485                                      train_set->inputsize());
00486     PP<ShiftAndRescaleVMatrix> X_vmat =
00487         new ShiftAndRescaleVMatrix(input_part, shift_input, scale_input, true);
00488     X_vmat->verbosity = this->verbosity;
00489     VMat X_vmatrix = static_cast<ShiftAndRescaleVMatrix*>(X_vmat);
00490     Mat X = X_vmatrix->toMat();
00491     
00492     VMat target_part = new SubVMatrix( train_set,
00493                                        0, train_set->inputsize(),
00494                                        train_set->length(),
00495                                        train_set->targetsize());
00496     PP<ShiftAndRescaleVMatrix> Y_vmat =
00497         new ShiftAndRescaleVMatrix(target_part, shift_target, scale_target, true);
00498     Y_vmat->verbosity = this->verbosity;
00499     VMat Y_vmatrix = static_cast<ShiftAndRescaleVMatrix*>(Y_vmat);
00500     Vec Y(n);
00501     Y << Y_vmatrix->toMat();
00502         
00503     VMat weight_part = new SubVMatrix( train_set,
00504                                        0, train_set->inputsize() + train_set->targetsize(),
00505                                        train_set->length(),
00506                                        train_set->weightsize());
00507     PP<ShiftAndRescaleVMatrix> WE_vmat;
00508     VMat WE_vmatrix;
00509     Vec WE(n);
00510     Vec sqrtWE(n);
00511     if (wlen > 0) {
00512         WE_vmat = new ShiftAndRescaleVMatrix(weight_part);
00513         WE_vmat->verbosity = this->verbosity;
00514         WE_vmatrix = static_cast<ShiftAndRescaleVMatrix*>(WE_vmat);
00515         WE << WE_vmatrix->toMat();
00516         sqrtWE << sqrt(WE);
00517         multiplyColumns(X,sqrtWE);
00518         Y *= sqrtWE;
00519     } else
00520         WE.fill(1.0);
00521     
00522     // Some common initialization.
00523     W.resize(p, nstages);
00524     P.resize(p, nstages);
00525     Q.resize(m, nstages);
00526     
00527     if (method == "kernel") {
00528         PLERROR("You shouldn't be here... !?");       
00529     } else if (method == "wpls1") {
00530         Vec s(n);
00531         Vec old_s(n);
00532         Vec lx(p);
00533         Vec ly(1);
00534         Mat T(n,nstages);
00535         Mat tmp_np(n,p), tmp_pp(p,p);
00536 
00537         PP<ProgressBar> pb;
00538         if(report_progress) {
00539             pb = new ProgressBar("Computing the components", nstages);
00540         }
00541         bool finished;
00542         real dold;
00543 
00544         if (parent_filename != "") {
00545             string expdir = getExperimentDirectory();
00546             int pos = expdir.find("Split");
00547             string the_split = expdir.substr(pos,6);
00548             int pos2 = parent_filename.find("Split");
00549             int nremain = parent_filename.length() - 6 - pos2;
00550             parent_filename = parent_filename.substr(0,pos2) + the_split + parent_filename.substr(pos2+6,nremain);
00551             PP<VPLCombinedLearner> combined_parent;
00552             PLearn::load(parent_filename, combined_parent);
00553             //if (VPLPreprocessedLearner2* vplpl = dynamic_cast<VPLPreprocessedLearner2*>(
00554             //        combined_parent->sublearners_[parent_sub]))
00555             //{
00556             //    parent = vplpl->learner_;
00557             //}
00558             //else
00559             //    PLERROR("Unsupported type for sublearners of the combined
00560             //    learner");
00561             
00562             PP<VPLPreprocessedLearner2> vplpl = (PP<VPLPreprocessedLearner2>)(combined_parent->sublearners_[parent_sub]);
00563             PP<WPLS> parent = (PP<WPLS>)(vplpl->learner_);
00564             //VPLPreprocessedLearner2* vplpl = (VPLPreprocessedLearner2*)(combined_parent->sublearners_[parent_sub]);
00565             //WPLS* parent (WPLS*)(vplpl->learner_);
00566             int k = parent->nstages;
00567             Mat tmp_nk(n,k);
00568             if (parent->stage < nstages) {
00569                 Mat tmp_n1(n,1);
00570                 product(tmp_n1, X, parent->B);
00571                 for (int i=0; i<n; i++)
00572                     Y[i] -= tmp_n1(i,0);
00573                 product(tmp_nk,X,parent->W);
00574                 productTranspose(tmp_np,tmp_nk,parent->P);
00575                 X -= tmp_np;
00576                 stage = k;
00577             } else {
00578                 product(tmp_nk,X,parent->W);
00579                 T = tmp_nk.subMat(0,0,n,nstages);
00580                 P = (parent->P).subMat(0,0,p,nstages);
00581                 Q = (parent->Q).subMat(0,0,m,nstages);
00582                 stage = nstages;
00583             }
00584         }
00585         while (stage < nstages) {
00586             if (verbosity >= 1)
00587                 pout << "stage=" << stage << endl;
00588             s << Y;
00589             normalize(s, 2.0);  
00590             finished = false;
00591             int count = 0;
00592             while (!finished) {
00593                 count++;
00594                 old_s << s;
00595                 transposeProduct(lx, X, s);
00596                 product(s, X, lx);
00597                 normalize(s, 2.0);
00598                 dold = norm(old_s -s);
00599                 if(isnan(dold))
00600                     PLERROR("dold is nan");
00601                 if (dold < precision)
00602                     finished = true;
00603                 else {
00604                     if (verbosity >= 2)
00605                         pout << "dold = " << dold << endl;
00606                     if (count%100==0 && verbosity>=1)
00607                         pout << "loop counts = " << count << endl;
00608                 }
00609             }
00610             transposeProduct(lx, X, s);
00611             ly[0] = dot(s, Y);
00612             T.column(stage) << s;
00613             P.column(stage) << lx;
00614             Q.column(stage) << ly;
00615             externalProduct(tmp_np,s,lx);
00616             X -= tmp_np;
00617             Y -= ly[0] * s;
00618             if (report_progress)
00619                 pb->update(stage);
00620             stage++;
00621         }
00622         productTranspose(tmp_np, T, P);
00623         
00624         if (verbosity >= 2) {
00625             pout << "T = " << endl << T << endl;
00626             pout << "P = " << endl << P << endl;
00627             pout << "Q = " << endl << Q << endl;
00628             pout << "tmp_np = " << endl << tmp_np << endl;
00629             pout << endl;
00630         }
00631         Mat U, Vt;
00632         Vec D;
00633         real safeguard = 1.1;
00634         SVD(tmp_np, U, D, Vt, 'S', safeguard);
00635         if (verbosity >= 2) {
00636             pout << "U = " << endl << U << endl;  
00637             pout << "D = " << endl << D << endl;
00638             pout << "Vt = " << endl << Vt << endl;
00639             pout << endl;
00640         }
00641         
00642         Mat invDmat(p,p);
00643         invDmat.fill(0.0);
00644         for (int i = 0; i < D.length(); i++) {
00645             if (abs(D[i]) < precision)
00646                 invDmat(i,i) = 0.0;
00647             else
00648                 invDmat(i,i) = 1.0 / D[i];
00649         }
00650         
00651         product(tmp_pp,invDmat,Vt);
00652         product(tmp_np,U,tmp_pp);
00653         transposeProduct(W, tmp_np, T);
00654         B.resize(p,1);
00655         productTranspose(B, W, Q);
00656         if (verbosity >= 2) {
00657             pout << "W = " << W << endl;
00658             pout << "B = " << B << endl;
00659         }
00660         if (verbosity >= 1)
00661             pout << "WPLS training ended" << endl;
00662     }
00663 }
00664 
00665 } // end of namespace PLearn
00666 
00667 
00668 /*
00669   Local Variables:
00670   mode:c++
00671   c-basic-offset:4
00672   c-file-style:"stroustrup"
00673   c-file-offsets:((innamespace . 0)(inline-open . 0))
00674   indent-tabs-mode:nil
00675   fill-column:79
00676   End:
00677 */
00678 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines