PLearn 0.1
NatGradEstimator.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // NatGradEstimator.cc
00004 //
00005 // Copyright (C) 2007 Yoshua Bengio
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Yoshua Bengio
00036 
00040 #include "NatGradEstimator.h"
00041 #include <plearn/math/TMat_maths.h>
00042 #include <plearn/math/plapack.h>
00043 
00044 namespace PLearn {
00045 using namespace std;
00046 
00047 // bool save_G=false;
00048 
00049 PLEARN_IMPLEMENT_OBJECT(
00050     NatGradEstimator,
00051     "Subclass of GradientCorrector that computes an online natural gradient update direction.\n",
00052     "Convert a sequence of gradients into covariance-corrected (natural gradient) directions.\n"
00053     "The algorithm used for converting a sequence of n-dimensional gradients g_t\n"
00054     "into covariance-corrected update directions v_t is the following:\n\n"
00055     "operator(int t, Vec g, Vec v): (reads g and writes v)\n"
00056     "    i = t%b   /* denoting b = cov_minibatch_size */\n"
00057     "    extend X by a (k+i)-th column gamma^{\frac{-i}{2}} g\n"
00058     "    extend G by a (k+i)-th column and row, with G_{k+i,.}=X'_{k+1,.} X\n"
00059     "      and idem for the symmetric sub-column\n"
00060     "    extend vectors r and a by (k+i)-th element, r_{k+i-1}=0, r_{k+i}=gamma^{\frac{-i}{2}}\n"
00061     "    Solve linear system (G + gamma^{-k} lambda I) a = r in a\n"
00062     "    v = X a (1 - gamma)/(1 - gamma^t)\n"
00063     "    if i+1==b\n"
00064     "       (V,D) = leading_eigendecomposition(G,k)\n"
00065     "       U = gamma^{b/2} X V\n"
00066     "\n\n"
00067     "See technical report 'A new insight on the natural gradient' for justifications\n"
00068     );
00069 
00070 NatGradEstimator::NatGradEstimator()
00071     /* ### Initialize all fields to their default value */
00072     : cov_minibatch_size(10),
00073       init_lambda(1.),
00074       min_lambda(0.001),
00075       n_eigen(10),
00076       gamma(0.99),
00077       renormalize(true),
00078       amari_version(false),
00079       update_lambda_from_eigen(false),
00080       previous_t(-1),
00081       first_t(-1),
00082       lambda(1.)
00083 {
00084     build();
00085 }
00086 
00087 // ### Nothing to add here, simply calls build_
00088 void NatGradEstimator::build()
00089 {
00090     inherited::build();
00091     init();
00092 }
00093 
00094 void NatGradEstimator::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00095 {
00096     inherited::makeDeepCopyFromShallowCopy(copies);
00097 
00098     deepCopyField(Ut, copies);
00099     deepCopyField(D, copies);
00100     deepCopyField(Xt, copies);
00101     deepCopyField(G, copies);
00102     deepCopyField(r, copies);
00103     deepCopyField(Vt, copies);
00104     deepCopyField(Vkt, copies);
00105     deepCopyField(A, copies);
00106     deepCopyField(pivots, copies);
00107 }
00108 
00109 void NatGradEstimator::declareOptions(OptionList& ol)
00110 {
00111     // ### Declare all of this object's options here.
00112     // ### For the "flags" of each option, you should typically specify
00113     // ### one of OptionBase::buildoption, OptionBase::learntoption or
00114     // ### OptionBase::tuningoption. If you don't provide one of these three,
00115     // ### this option will be ignored when loading values from a script.
00116     // ### You can also combine flags, for example with OptionBase::nosave:
00117     // ### (OptionBase::buildoption | OptionBase::nosave)
00118 
00119     // ### ex:
00120     declareOption(ol, "cov_minibatch_size", &NatGradEstimator::cov_minibatch_size,
00121                   OptionBase::buildoption,
00122                   "Covariance estimator minibatch size, i.e. number of calls\n"
00123                   "to operator() before re-estimating the principal\n"
00124                   "eigenvectors/values. Note that each such re-computation will\n"
00125                   "cost O(n_eigen * n)");
00126     declareOption(ol, "init_lambda", &NatGradEstimator::init_lambda,
00127                   OptionBase::buildoption,
00128                   "Initial variance. The first covariance is assumed to be\n"
00129                   "init_lambda times the identity. Default = 1.\n");
00130     declareOption(ol, "min_lambda", &NatGradEstimator::min_lambda,
00131                   OptionBase::buildoption,
00132                   "Minimal lambda value allowed in lambda's update from an eigendecomposition.\n");
00133 
00134     declareOption(ol, "regularizer", &NatGradEstimator::init_lambda,
00135                   OptionBase::buildoption,
00136                   "Proxy for option init_lambda (different name to avoid python problems).\n");
00137     declareOption(ol, "n_eigen", &NatGradEstimator::n_eigen,
00138                   OptionBase::buildoption,
00139                   "Number of principal eigenvectors of the covariance matrix\n"
00140                   "that are kept in its approximation.\n");
00141     declareOption(ol, "gamma", &NatGradEstimator::gamma,
00142                   OptionBase::buildoption,
00143                   "Forgetting factor in moving average estimator of covariance. 0<gamma<1.\n");
00144     declareOption(ol, "amari_version", &NatGradEstimator::amari_version,
00145                   OptionBase::buildoption,
00146                   "Instead of our tricks, use the formula Ginv <-- (1+eps) Ginv - eps Ginv g g' Ginv\n"
00147                   "to estimate the inverse of the covariance matrix, and multiply it with g at each step.\n");
00148     declareOption(ol, "update_lambda_from_eigen", &NatGradEstimator::update_lambda_from_eigen,
00149                   OptionBase::buildoption,
00150                   "Following an eigendecomposition, set lambda to the (n_eigen+1)th eigenvalue\n");
00151 
00152     declareOption(ol, "verbosity", &NatGradEstimator::verbosity,
00153                   OptionBase::buildoption,
00154                   "Verbosity level\n");
00155     declareOption(ol, "renormalize", &NatGradEstimator::renormalize,
00156                   OptionBase::buildoption,
00157                   "Whether to renormalize z wrt scaling that gamma produces\n");
00158 
00159     declareOption(ol, "Ut", &NatGradEstimator::Ut,
00160                   OptionBase::learntoption,
00161                   "Estimated scaled principal eigenvectors of the gradients covariance matrix\n"
00162                   "(stored in the rows of Ut)\n");
00163     declareOption(ol, "G", &NatGradEstimator::G,
00164                   OptionBase::learntoption,
00165                   "Gram matrix growing during a minibatch\n");
00166     declareOption(ol, "previous_t", &NatGradEstimator::previous_t,
00167                   OptionBase::learntoption,
00168                   "Value of t at previous call of operator()\n");
00169     declareOption(ol, "first_t", &NatGradEstimator::first_t,
00170                   OptionBase::learntoption,
00171                   "Value of t when operator() is first called\n");
00172     declareOption(ol, "Xt", &NatGradEstimator::Xt,
00173                   OptionBase::learntoption,
00174                   "contains in its rows the scaled eigenvectors and g's\n"
00175                   "seen since the beginning of the minibatch.\n");
00176 
00177     // Now call the parent class' declareOptions
00178     inherited::declareOptions(ol);
00179 }
00180 
00181 void NatGradEstimator::init()
00182 {
00183     if (n_dim>=0)
00184     {
00185         PLASSERT_MSG(n_dim>0, "NatGradEstimator::init(), n_dim should be > 0");
00186         PLASSERT_MSG(gamma<1 && gamma>0, "NatGradEstimator::init(), gamma should be < 1 and >0");
00187         Ut.resize(n_eigen,n_dim);
00188         Vt.resize(n_eigen+1,n_eigen+cov_minibatch_size);
00189         Vkt = Vt.subMatRows(0,n_eigen);
00190         D.resize(n_eigen+1);
00191         G.resize(n_eigen + cov_minibatch_size, n_eigen + cov_minibatch_size);
00192         A.resize(n_eigen + cov_minibatch_size, n_eigen + cov_minibatch_size);
00193         G.clear();
00194         Xt.resize(n_eigen+cov_minibatch_size, n_dim);
00195         Xt.clear();
00196         r.resize(n_eigen);
00197         lambda = init_lambda;
00198         for (int j=0;j<n_eigen;j++)
00199             G(j,j) = lambda;
00200         first_t=-1;
00201         previous_t=-1;
00202     }
00203 }
00204 // TODO replace the calls to pow by something else. It's notoriously
00205 // inefficient.
00206 void NatGradEstimator::operator()(int t, const Vec& g, Vec v)
00207 {
00208     if (previous_t>=0)
00209         PLASSERT_MSG(t==previous_t+1, "NatGradEstimator() should be called sequentially!");
00210     if  (n_dim<0) 
00211     {
00212         n_dim = g.length();
00213         v.resize(n_dim);
00214         init();
00215         previous_t=t-1;
00216         first_t=t;
00217     }
00218     int i = t % cov_minibatch_size;
00219     int n = n_eigen+i;
00220     Xt.resize(n+1,n_dim);
00221     Vec newX = Xt(n);
00222     real rn = pow(gamma,real(-0.5*(i+1)));
00223     multiply(g,rn,newX);
00224     G.resize(n+1,n+1);
00225     Vec newG=G(n);
00226     product(newG,Xt,newX);
00227     G.column(n) << newG;
00228     r.resize(n+1);
00229     r.clear();
00230     r[n] = rn;
00231     // solve linear system (G + \gamma^{-k} \lambda I) a = r
00232     pivots.resize(n);
00233     A.resize(n+1,n+1);
00234     A << G;
00235     real rn2 = rn*rn;
00236     real coef = rn2*lambda;
00237     for (int j=0;j<=n;j++)
00238         A(j,j) += coef;
00239     Mat r_row = r.toMat(1,n+1);
00240     int status = lapackSolveLinearSystem(A,r_row,pivots);
00241     if (status!=0)
00242         PLWARNING("NatGradEstimator: lapackSolveLinearSystem returned %d\n:",status);
00243     if (verbosity>1 && i%(cov_minibatch_size/3)==0)
00244         cout << "solution r = " << r << endl;
00245     // solution is in r
00246     transposeProduct(v, Xt, r);
00247 
00248     // Multiply v by C's normalizer.
00249     if (renormalize) 
00250         v*=(1 - pow(gamma,real(t+1)))/(1 - gamma);
00251 
00252     if (verbosity>0 && i%(cov_minibatch_size)==0)
00253     {
00254         real gnorm = sqrt(dot(g,g));
00255         real vnorm = sqrt(dot(v,v));
00256         real angle = acos(dot(v,g)/(gnorm*vnorm))*360/(2*3.14159);
00257         cout << "angle(g,v)="<<angle<<", gnorm="<<gnorm<<", vnorm="<<vnorm<<", norm ratio="<<vnorm/gnorm<<endl;
00258     }
00259 
00260     // recompute the eigen-decomposition
00261     if (i+1==cov_minibatch_size)
00262     {
00263         // get eigen-decomposition, with one more eigen-x than necessary to check if coherent with lambda
00264         //if (save_G)
00265         //    saveAscii("G.amat",G);
00266 
00267         // try to regularize G
00268 //        for (int j=0;j<n+1;j++)
00269 //            G(j,j) += 0.001;
00270 
00271 
00272 //        eigenVecOfSymmMat(G,n_eigen,D,Vt);
00273         // Get all eigenvalues -> this resizes D and Vt, but it doesn't matter
00274         eigenVecOfSymmMat(G,G.width(),D,Vt);
00275 //        cout << "-= " << t << " =-" << endl;
00276 //        cout << D.length() << " eigenvalues = " << D << endl;
00277 
00278         if( D.length() < n_eigen )
00279             PLERROR("GOT LESS EIGENVECTORS THAN n_eigen.");
00280 
00281         // convert eigenvectors Vt of G into *unnormalized* eigenvectors U of C
00282         product(Ut,Vkt,Xt);
00283         Ut *= 1.0/rn;
00284         D *= 1.0/rn2;
00285         for (int j=0;j<n_eigen;j++) {
00286             if (D[j]<1e-10)
00287                 PLWARNING("NatGradEstimator: very small eigenvalue %d = %g\n",j,D[j]);
00288 //            if (D[j]<lambda)
00289 //                cout << " *** Small D[j] *** -> " << D[j] << endl;
00290         }
00291         if (verbosity>0) // verifier Ut U = D/
00292         {
00293             static Mat Dmat;
00294             cout << D.length() << " eigenvalues = " << D << endl;
00295             if (verbosity>2)
00296             {
00297                 Dmat.resize(n_eigen,n_eigen);
00298                 productTranspose(Dmat,Ut,Ut);
00299                 for (int j=0;j<n_eigen;j++) 
00300                     Dmat(j,j)-=D[j];
00301                 cout << "norm(U' U - D)/(n_eigen*n_eigen) = " << sumsquare(Dmat.toVec())/n_eigen << endl;
00302             }
00303         }
00304         // prepare for next minibatch
00305         Xt.resize(n_eigen,n_dim);
00306         Xt << Ut;
00307         G.resize(n_eigen,n_eigen);
00308         G.clear();
00309         for (int j=0;j<n_eigen;j++)
00310             G(j,j) = D[j];
00311 
00312         // Update lambda in a yet to be determined smart way
00313         if( update_lambda_from_eigen )    {
00314 //            if (D[n_eigen-1]>lambda)
00315 //                cout << " *** Last lambda too small? *** lambda, last eigen : " << lambda << ", " << D[n_eigen-1] << endl;
00316 
00317 /*            float big_eig = D[0];
00318             bool cont = true;
00319             for (int j=0;j<n_eigen && cont;j++) {
00320                 if( D[j]< (0.1*big_eig) ) {
00321                     lambda = D[j];
00322                     cont = false;
00323                 }
00324             }
00325             if(cont)
00326                 lambda = D[n_eigen-1];
00327 
00328 */
00329             
00330             lambda =  D[n_eigen-1];
00331 
00332 
00333 
00334             if( lambda < min_lambda )
00335                 lambda = min_lambda;
00336 
00337 
00338 //          if (D[n_eigen]<1e-6)
00339 //              PLWARNING("NatGradEstimator: updating lambda with small value %g\n",D[n_eigen]);
00340 //          lambda = D[n_eigen-1];
00341 
00342 //            if(lambda<0.01)
00343 //                lambda = 0.01;
00344         }
00345     }
00346     previous_t = t;
00347 }
00348 
00349 } // end of namespace PLearn
00350 
00351 
00352 /*
00353   Local Variables:
00354   mode:c++
00355   c-basic-offset:4
00356   c-file-style:"stroustrup"
00357   c-file-offsets:((innamespace . 0)(inline-open . 0))
00358   indent-tabs-mode:nil
00359   fill-column:79
00360   End:
00361 */
00362 
00363 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines