PLearn 0.1
OnlineGramNaturalGradientOptimizer.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // OnlineGramNaturalGradientOptimizer.cc
00004 //
00005 // Copyright (C) 2007 Pierre-Antoine Manzagol
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pierre-Antoine Manzagol
00036 
00040 #define PL_LOG_MODULE_NAME "OnlineGramNaturalGradientOptimizer"
00041 
00042 #include "OnlineGramNaturalGradientOptimizer.h"
00043 #include <plearn/io/pl_log.h>
00044 #include <plearn/math/TMat_maths.h>
00045 #include <plearn/display/DisplayUtils.h>
00046 #include <plearn/var/SumOfVariable.h>
00047 
00048 #include <plearn/math/plapack.h>
00049 
00050 
00051 
00052 namespace PLearn {
00053 using namespace std;
00054 
00055 PLEARN_IMPLEMENT_OBJECT(
00056     OnlineGramNaturalGradientOptimizer,
00057     "Optimization by Schraudolph's stochastic meta descent (SMD).", 
00058     "OnlineGramNaturalGradientOptimizer is \n"
00059     "blabla \n"
00060     "\n"
00061 );
00062 
00063 OnlineGramNaturalGradientOptimizer::OnlineGramNaturalGradientOptimizer():
00064     learning_rate(0.01),
00065     gamma(1.0),
00066     reg(1e-6),
00067     opt_batch_size(1),
00068     n_eigen(6)
00069 {}
00070 
00071 
00072 void OnlineGramNaturalGradientOptimizer::declareOptions(OptionList& ol)
00073 {
00074     declareOption(
00075         ol, "learning_rate", &OnlineGramNaturalGradientOptimizer::learning_rate,
00076         OptionBase::buildoption, 
00077         "Learning rate used in the natural gradient descent.\n");
00078     declareOption(
00079         ol, "gamma", &OnlineGramNaturalGradientOptimizer::gamma,
00080         OptionBase::buildoption, 
00081         "Discount factor used in the update of the estimate of the gradient covariance.\n");
00082     declareOption(
00083         ol, "reg", &OnlineGramNaturalGradientOptimizer::reg,
00084         OptionBase::buildoption, 
00085         "Regularizer used in computing the natural gradient, C^{-1} mu. Added to C^{-1} diagonal.\n");
00086     declareOption(
00087         ol, "opt_batch_size", &OnlineGramNaturalGradientOptimizer::opt_batch_size,
00088         OptionBase::buildoption, 
00089         "Size of the optimizer's batches (examples before parameter and gradient covariance updates).\n");
00090     declareOption(
00091         ol, "n_eigen", &OnlineGramNaturalGradientOptimizer::n_eigen,
00092         OptionBase::buildoption, 
00093         "The number of eigen vectors to model the gradient covariance matrix\n");
00094 
00095     inherited::declareOptions(ol);
00096 }
00097 
00098 void OnlineGramNaturalGradientOptimizer::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00099 { 
00100     inherited::makeDeepCopyFromShallowCopy(copies);
00101 
00102     deepCopyField(gradients, copies);
00103     deepCopyField(mu, copies);
00104     deepCopyField(gram, copies);
00105     deepCopyField(U, copies);
00106     deepCopyField(D, copies);
00107     deepCopyField(cov_eigen_vec, copies);
00108     deepCopyField(cov_eigen_val, copies);
00109     deepCopyField(cov_norm_eigen_vec, copies);
00110     deepCopyField(dot_prod, copies);
00111     deepCopyField(scaled_dot_prod, copies);
00112     deepCopyField(naturalg, copies);
00113 
00114 }
00115 
00116 void OnlineGramNaturalGradientOptimizer::build_()
00117 {
00118     n_optimizeN_calls=0;
00119     n_eigen_cur = 0;
00120     n_eigen_old = 0;
00121 
00122     total_variance = 0.0;
00123     variance_percentage = 0.;
00124 
00125     int n = params.nelems();
00126 
00127     cout << "Number of parameters: " << n << endl;
00128 
00129     if (n > 0) {
00130         gradients.resize( opt_batch_size, n );
00131         gradients.clear();
00132         mu.resize(n);
00133         mu.clear();
00134         naturalg.resize(n);
00135         naturalg.clear();
00136         // other variables will have different lengths
00137         // depending on the current number of eigen vectors
00138     }
00139 }
00140 
00141 // 'stage' is to be interpreted as "the number of examples to use
00142 // in batches of size 'batch_size' "
00143 // Note that a batch could be spread over two epochs
00144 bool OnlineGramNaturalGradientOptimizer::optimizeN(VecStatsCollector& stats_coll) 
00145 {
00146     n_optimizeN_calls++;
00147 
00148     if( nstages%opt_batch_size != 0 )   {
00149         PLWARNING("OnlineGramNaturalGradientOptimizer::optimizeN(...) - nstages%opt_batch_size != 0");
00150     }
00151 
00152     int stage_max = stage + nstages; // the stage to reach
00153 
00154     PP<ProgressBar> pb;
00155     pb = new ProgressBar("Training " + classname() + " from stage " 
00156                 + tostring(stage) + " to " + tostring(stage_max), (int)(stage_max-stage)/opt_batch_size );
00157 
00158     int initial_stage = stage;
00159     while( stage < stage_max )    {
00160 
00161         /*if( bi == 0 )
00162             t0 = clock();*/
00163 
00164         // Get the new gradient and append it
00165         params.clearGradient();
00166         proppath.clearGradient();
00167         cost->gradient[0] = -1.0;
00168         proppath.fbprop();
00169         params.copyGradientTo( gradients(bi) );
00170 
00171         // End of batch. Compute natural gradient and update parameters.
00172         bi++;
00173         if( bi == opt_batch_size )  {
00174             //t1 = clock();
00175 
00176             bi = 0;
00177             gramEigenNaturalGradient();
00178 
00179             //t2 = clock();
00180 
00181             // set params += -learning_rate * params.gradient
00182             naturalg *= learning_rate;
00183             params.copyGradientFrom( naturalg );
00184             params.updateAndClear();
00185 
00186             //t3 = clock();
00187 
00188             //cout << double(t1-t0) << " " << double(t2-t1) << " " << double(t3-t2) << endl;
00189 
00190             if(pb)
00191                 pb->update((stage-initial_stage)/opt_batch_size);
00192 
00193         }
00194 
00195         stats_coll.update(cost->value);
00196         stage++;
00197     }
00198 
00199     return false;
00200 }
00201 
00202 
00203 void OnlineGramNaturalGradientOptimizer::gramEigenNaturalGradient()
00204 {
00205     // We don't have any eigen vectors yet
00206     if( n_eigen_cur == 0 )  {
00207 
00208         // The number of eigen vectors we will have after incorporating the new data
00209         // (the gram matrix of gradients might have a rank smaller than n_eigen)
00210         n_eigen_cur = min( gradients.length(), n_eigen);
00211 
00212         // Compute the total variance - to do this, compute the trace of the covariance matrix
00213         // could also use the trace of the gram matrix since we compute it, ie sum(diag(gram))
00214 /*        for( int i=0; i<gradients.length(); i++)   {
00215             Vec v = gradients(i);
00216             total_variance += sumsquare(v);
00217         }
00218         total_variance /= gradients.length();*/
00219 
00220         // Compute the gram matrix - TODO does this recognize gram is symetric? (and save the computations?)
00221         gram.resize( gradients.length(), gradients.length() );
00222         productTranspose(gram, gradients, gradients);
00223         gram /= gradients.length();
00224 
00225         // Extract eigenvectors/eigenvalues - destroys the content of gram, D and U are resized
00226         // gram = U D U' (if we took all values)
00227         eigenVecOfSymmMat(gram, n_eigen_cur, D, U);
00228 
00229         // Percentage of the variance we keep is the sum of the kept eigenvalues divided
00230         // by the total variance.
00231         //variance_percentage = sum(D)/total_variance;
00232 
00233         // The eigenvectors V of C are deduced from the eigenvectors U of G by the
00234         // formula V = AUD^{-1/2} (D the eigenvalues of G).  The nonzero eigenvalues of
00235         // C and D are the same.
00236 
00237         // The true eigenvalues are norm_eigen_vec. However, we shall keep in memory
00238         // the eigenvectors of C rescaled by the square root of their associated
00239         // eigenvalues, so that C can be written VV' instead of VDV'. Thus, the "new" V
00240         // is equal to VD^{1/2} = AU.
00241         // We have row vectors so AU = (U'A')'
00242 
00243         cov_eigen_vec.resize(n_eigen_cur, gradients.width() );
00244         product( cov_eigen_vec, U, gradients );
00245         cov_eigen_vec /= sqrt( gradients.length() );
00246         cov_eigen_val.resize( D.length() );
00247         cov_eigen_val << D;
00248 
00249         ofstream fd_eigval("eigen_vals.txt", ios_base::app);
00250         fd_eigval << cov_eigen_val << endl;
00251         fd_eigval.close();
00252 
00253         cov_norm_eigen_vec.resize( n_eigen_cur, gradients.width() );
00254         for( int i=0; i<n_eigen_cur; i++)   {
00255             Vec v = cov_norm_eigen_vec(i);
00256             divide( cov_eigen_vec(i), sqrt(D[i]), v );
00257         }
00258 
00259     }
00260 
00261     // We already have some eigen vectors, so it's an update
00262     else    {
00263 
00264         // The number of eigen vectors we will have after incorporating the new data
00265         n_eigen_old = cov_eigen_vec.length();
00266         n_eigen_cur = min( cov_eigen_vec.length() + gradients.length(), n_eigen);
00267 
00268         // Update the total variance, by computing that of the covariance matrix
00269         // total_variance = gamma*total_variance + (1-gamma)*sum(sum(A.^2))/n_new_vec
00270         /*total_variance *= gamma;
00271         for( int i=0; i<gradients.length(); i++)   {
00272             Vec v = gradients(i);
00273             // To reflect the new update
00274             //total_variance += (1.-gamma) * sumsquare(v) / gradients.length();
00275             total_variance += sumsquare(v) / gradients.length();
00276         }*/
00277 
00278         // Compute the gram matrix
00279         // To find the equivalence between the covariance matrix and the Gram matrix,
00280         // we need to have the covariance matrix under the form C = UU' + AA'. However,
00281         // what we have is C = gamma UU' + (1-gamma)AA'/n_new_vec. Thus, we will
00282         // rescale U and A using U = sqrt(gamma) U and A = sqrt((1 - gamma)/n_new_vec)
00283         // A. Now, the Gram matrix is of the form [U'U U'A;A'U A'A] using the new U and
00284         // A.
00285 
00286         gram.resize( n_eigen_old + gradients.length(), n_eigen_old + gradients.length() );
00287 
00288         Mat m = gram.subMat(0, 0, n_eigen_old, n_eigen_old);
00289         m.clear();
00290         addToDiagonal(m, gamma*D);
00291 
00292         // Nicolas says "use C_{n+1} = gamma C_n + gg'" so no (1.-gamma)
00293         m = gram.subMat(n_eigen_old, n_eigen_old, gradients.length(), gradients.length());
00294         productTranspose(m, gradients, gradients);
00295         //m *= (1.-gamma) / gradients.length();
00296         m /= gradients.length();
00297 
00298         m = gram.subMat(n_eigen_old, 0, gradients.length(), n_eigen_old );
00299         productTranspose(m, gradients, cov_eigen_vec);
00300         //m *= sqrt(gamma*(1.-gamma)/gradients.length());
00301         m *= sqrt(gamma/gradients.length());
00302 
00303         Mat m2 = gram.subMat( 0, n_eigen_old, n_eigen_old, gradients.length() );
00304         transpose( m, m2 );
00305 
00306         //G = (G + G')/2; % Solving numerical mistakes
00307 
00308 //cout << "--" << endl << gram << endl;
00309 
00310         // Extract eigenvectors/eigenvalues - destroys the content of gram, D and U are resized
00311         // gram = U D U' (if we took all values)
00312         eigenVecOfSymmMat(gram, n_eigen_cur, D, U);
00313 
00314         // Percentage of the variance we keep is the sum of the kept eigenvalues divided
00315         // by the total variance.
00316         //variance_percentage = sum(D)/total_variance;
00317 
00318         // The new (rescaled) eigenvectors are of the form [U A]*V where V is the
00319         // eigenvector of G. Rewriting V = [V1;V2], we have [U A]*V = UV1 + AV2.
00320         // for us cov_eigen_vec = U1 eigen_vec + U2 gradients
00321 
00322         swap = old_cov_eigen_vec;
00323         old_cov_eigen_vec = cov_eigen_vec;
00324         cov_eigen_vec = swap;
00325 
00326         cov_eigen_vec.resize(n_eigen_cur, gradients.width());
00327         product( cov_eigen_vec, U.subMatColumns(0, n_eigen_old), old_cov_eigen_vec );
00328 
00329 //  C = alpha A.B + beta C
00330 productScaleAcc(cov_eigen_vec, U.subMatColumns(n_eigen_old, gradients.length()), false, gradients, false,
00331                    sqrt((1.-gamma)/gradients.length()), sqrt(gamma));
00332 
00333         cov_eigen_val.resize( D.length() );
00334         cov_eigen_val << D;
00335 
00336         cov_norm_eigen_vec.resize( n_eigen_cur, gradients.width() );
00337         for( int i=0; i<n_eigen_cur; i++)   {
00338             Vec v = cov_norm_eigen_vec(i);
00339             divide( cov_eigen_vec(i), sqrt(D[i]), v );
00340         }
00341 
00342     }
00343 
00344     // ### Determine reg - Should be set automaticaly.
00345     //reg = cov_eigen_val[n_eigen_cur-1];
00346     for( int i=0; i<n_eigen_cur; i++)   {
00347         if( cov_eigen_val[i] < reg )  {
00348             PLWARNING("cov_eigen_val[i] < reg. Setting to reg.");
00349             cov_eigen_val[i] = reg;
00350         }
00351     }
00352 
00353 
00354     // *** Compute C^{-1} mu, where mu is the mean of gradients ***
00355 
00356     // Compute mu
00357     columnMean( gradients, mu );
00358 
00359 
00360 /*    cout << "mu  " << mu << endl;
00361     cout << "norm(mu) " << norm(mu) << endl;
00362     cout << "cov_eigen_val " << cov_eigen_val << endl;
00363     cout << "cov_eigen_vec " << cov_eigen_vec << endl;
00364     cout << "cov_norm_eigen_vec " << cov_norm_eigen_vec << endl;*/
00365 
00366     // Compute the dot product with the eigenvectors
00367     dot_prod.resize(n_eigen_cur);
00368     product( dot_prod, cov_norm_eigen_vec, mu);
00369 
00370 //    cout << "dot_prod " << dot_prod << endl;
00371 
00372     // Rescale according to the eigenvectors. Since the regularization constant will
00373     // be added to all the eigenvalues (and not only the ones we didn't keep), we
00374     // have to remove it from the ones we kept.
00375     scaled_dot_prod.resize(n_eigen_cur);
00376 
00377     divide( dot_prod, cov_eigen_val, scaled_dot_prod);
00378     scaled_dot_prod -= dot_prod/reg;
00379 
00380     transposeProduct(naturalg, cov_norm_eigen_vec, scaled_dot_prod);
00381 
00382     naturalg += mu / reg;
00383 
00384 
00385 }
00386 
00387 
00388 } // end of namespace PLearn
00389 
00390 
00391 /*
00392   Local Variables:
00393   mode:c++
00394   c-basic-offset:4
00395   c-file-style:"stroustrup"
00396   c-file-offsets:((innamespace . 0)(inline-open . 0))
00397   indent-tabs-mode:nil
00398   fill-column:79
00399   End:
00400 */
00401 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines