PLearn 0.1
NatGradItEstimator.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // NatGradEstimator.cc
00004 //
00005 // Copyright (C) 2007 yoshua Bengio
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: yoshua Bengio
00036 
00040 #include "NatGradEstimator.h"
00041 #include <plearn/math/TMat_maths.h>
00042 #include <plearn/math/plapack.h>
00043 
00044 namespace PLearn {
00045 using namespace std;
00046 
00047 PLEARN_IMPLEMENT_OBJECT(
00048     NatGradEstimator,
00049     "Convert a sequence of gradients into covariance-corrected (natural gradient) directions.\n",
00050     "The algorithm used for converting a sequence of n-dimensional gradients g_t\n"
00051     "into covariance-corrected update directions v_t is the following:\n\n"
00052     "init():\n"
00053     "  initialize U = Id(k,n)\n"
00054     "  initialize D = lambda Id(k,k), diag matrix stored as a vector\n"
00055     "  initialize sigma = 0\n"
00056     "\n"
00057     "operator(int t, Vec g, Vec v): (reads g and writes v)\n"
00058     "    i = t%b   /* denoting b = cov_minibatch_size */\n"
00059     "    G_{.i} = g /* = gradient for example t = i-th column of matrix G */\n"
00060     "    if t<b \n"
00061     "        v0_i = g / (lambda + ||g||^2)\n"
00062     "        v = v0_i\n"
00063     "    else /* denoting k = n_eigen */ \n"
00064     "        v0_i = (g gamma/sigma + sum_{j=1}^k (1/D_j - gamma/sigma) U_{.j} U_{.j}' g)  /* = inv(C) g */ \n"
00065     "        u0_i = v0_i / ( gamma + v0_i' g / (i+1))\n"
00066     "        v = u0_i  - (1/(i+1)) sum_{j=1}^{i-1} v0_j G_{.j}' u0_i / (gamma + v0_j'G_{.j}/(i+1)) \n"
00067     "    for j = 1 to inversion_n_iterations\n"
00068     "       v = (1 - gamma alpha) v + alpha v0_i - (alpha/i) sum_{r=0}^i v0_r G_{.r}' v\n"
00069     "    v *= (1 - gamma^{t/b})/(1 - gamma)\n"
00070     "    if i+1==b  /* recompute eigen-decomposition: */\n"
00071     "       M = [gamma D    (gamma/b)^{1/2} sqrt(D) U' G;  (gamma/b)^{1/2} G' U sqrt(D)    G'G/b] /* = Gram matrix */\n"
00072     "       (V,E) = leading_eigendecomposition(M,k)\n"
00073     "       U = ([U sqrt(D)   G] V E^{-1/2} /* = k-principal e-vec of C */\n"
00074     "       D = E /* = k principal e-val of C */\n"
00075     "       sigma = {(k+1)th e-value of M}/gamma \n"
00076     "               /* = heuristic value for lower e-values of C */\n"
00077     "\n\n"
00078     "This is derived from the following considerations:\n"
00079     "  - let the covariance estimator at the beginning of minibatch t/b be C. We have its\n"
00080     "    eigen-decomposition in principal e-vectors U, principal e-values D, and lower e-values=sigma.\n"
00081     "  - at the end of the minibatch it is B + GG'/b\n"
00082     "    where B is C with the upper eigenvalues reduced by a factor gamma.\n"
00083     "  - this introduces a scaling factor (1-gamma)/(1-gamma^{t/b}) which is scaled out of\n"
00084     "    the v's on last line of above pseudo-code\n"
00085     "  - to obtain the eigen-decomposition efficiently, we rewrite B* + GG' in Gram matrix form\n"
00086     "    where B* ignores the lower eigenvalues of B, i.e. B* = gamma U D U'. Hence\n"
00087     "    B* + GG' = [sqrt(gamma) U sqrt(D)    G]' [sqrt(gamma) U sqrt(D)   G],\n"
00088     "    but this matrix has the same eigenvalues as M = [sqrt(gamma) U sqrt(D)    G] [sqrt(gamma) U sqrt(D)    G]'\n"
00089     "    and the eigenvectors of B*  + GG' can be recovered from above formula.\n"
00090     "  - To regularize B* + GG', we threshold the lower eigenvalues and set them to the (k+1)-th eigenvalue.\n"
00091     "  - on the i-th gradient g_i of the minibatch we would like to solve\n"
00092     "           (B + (1/i)sum_{k=1}^i g_k g_k') v_i = g_i\n"
00093     "  - we do this iteratively using as initial estimator of v_i: v_i^0 = inv(F) g_i\n"
00094     "    where F is C with the lower eigenvalues boosted by a factor 1/gamma, and \n"
00095     "    each iteration has the form:\n"
00096     "            v_i <-- v_i + alpha inv(F) (g_i - (B + (1/i)sum_{k=1}^i g_k g_k') v_i)\n"
00097     "    which can be simplified into\n"
00098     "            v_i <-- (1 - alpha gamma) v_i + alpha v_i^0 - alpha/i sum_{k=1}^i v_k^0 g_k' v_i \n"
00099     );
00100 
00101 NatGradEstimator::NatGradEstimator()
00102     /* ### Initialize all fields to their default value */
00103     : cov_minibatch_size(10),
00104       lambda(1),
00105       n_eigen(10),
00106       alpha(0.1),
00107       gamma(0.9),
00108       inversion_n_iterations(5),
00109       n_dim(-1),
00110       use_double_init(true),
00111       verbosity(0),
00112       sigma(0),
00113       previous_t(-1)
00114 {
00115     build();
00116 }
00117 
00118 // ### Nothing to add here, simply calls build_
00119 void NatGradEstimator::build()
00120 {
00121     inherited::build();
00122     build_();
00123 }
00124 
00125 void NatGradEstimator::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00126 {
00127     inherited::makeDeepCopyFromShallowCopy(copies);
00128 
00129     // ### Call deepCopyField on all "pointer-like" fields
00130     // ### that you wish to be deepCopied rather than
00131     // ### shallow-copied.
00132     // ### ex:
00133     deepCopyField(Ut, copies);
00134     deepCopyField(E, copies);
00135     deepCopyField(D, copies);
00136     deepCopyField(Gt, copies);
00137     deepCopyField(initial_v, copies);
00138     deepCopyField(tmp_v, copies);
00139     deepCopyField(M, copies);
00140     deepCopyField(M11, copies);
00141     deepCopyField(M12, copies);
00142     deepCopyField(M21, copies);
00143     deepCopyField(M22, copies);
00144     deepCopyField(Vt, copies);
00145     deepCopyField(Vkt, copies);
00146     deepCopyField(Vbt, copies);
00147     deepCopyField(newUt, copies);
00148     deepCopyField(vg, copies);
00149 }
00150 
00151 void NatGradEstimator::declareOptions(OptionList& ol)
00152 {
00153     // ### Declare all of this object's options here.
00154     // ### For the "flags" of each option, you should typically specify
00155     // ### one of OptionBase::buildoption, OptionBase::learntoption or
00156     // ### OptionBase::tuningoption. If you don't provide one of these three,
00157     // ### this option will be ignored when loading values from a script.
00158     // ### You can also combine flags, for example with OptionBase::nosave:
00159     // ### (OptionBase::buildoption | OptionBase::nosave)
00160 
00161     // ### ex:
00162     declareOption(ol, "cov_minibatch_size", &NatGradEstimator::cov_minibatch_size,
00163                   OptionBase::buildoption,
00164                   "Covariance estimator minibatch size, i.e. number of calls\n"
00165                   "to operator() before re-estimating the principal\n"
00166                   "eigenvectors/values. Note that each such re-computation will\n"
00167                   "cost O(n_eigen * n)");
00168     declareOption(ol, "lambda", &NatGradEstimator::lambda,
00169                   OptionBase::buildoption,
00170                   "Initial variance. The first covariance is assumed to be\n"
00171                   "lambda times the identity. Default = 1.\n");
00172     declareOption(ol, "n_eigen", &NatGradEstimator::n_eigen,
00173                   OptionBase::buildoption,
00174                   "Number of principal eigenvectors of the covariance matrix\n"
00175                   "that are kept in its approximation.\n");
00176     declareOption(ol, "alpha", &NatGradEstimator::alpha,
00177                   OptionBase::buildoption,
00178                   "Learning rate of the inversion iterations.\n");
00179     declareOption(ol, "inversion_n_iterations", &NatGradEstimator::inversion_n_iterations,
00180                   OptionBase::buildoption,
00181                   "Number of iterations of numerical approximation algorithm for\n"
00182                   "solving the system inverse(cov) v = g\n");
00183     declareOption(ol, "use_double_init", &NatGradEstimator::use_double_init,
00184                   OptionBase::buildoption,
00185                   "wether to use the u0 and its correction for initialization the inversion iteration\n");
00186     declareOption(ol, "gamma", &NatGradEstimator::gamma,
00187                   OptionBase::buildoption,
00188                   "Forgetting factor in moving average estimator of covariance. 0<gamma<1.\n");
00189     declareOption(ol, "amari_version", &NatGradEstimator::amari_version,
00190                   OptionBase::buildoption,
00191                   "Instead of our tricks, use the formula Ginv <-- (1+eps) Ginv - eps Ginv g g' Ginv\n"
00192                   "to estimate the inverse of the covariance matrix, and multiply it with g at each step.\n");
00193     declareOption(ol, "verbosity", &NatGradEstimator::verbosity,
00194                   OptionBase::buildoption,
00195                   "Verbosity level\n");
00196 
00197     declareOption(ol, "n_dim", &NatGradEstimator::n_dim,
00198                   OptionBase::learntoption,
00199                   "Number of dimensions of the gradient vectors\n");
00200     declareOption(ol, "Ut", &NatGradEstimator::Ut,
00201                   OptionBase::learntoption,
00202                   "Estimated principal eigenvectors of the gradients covariance matrix\n"
00203                   "(stored in the rows of Ut)\n");
00204     declareOption(ol, "E", &NatGradEstimator::E,
00205                   OptionBase::learntoption,
00206                   "Estimated principal eigenvalues of the gradients covariance matrix\n");
00207     declareOption(ol, "sigma", &NatGradEstimator::sigma,
00208                   OptionBase::learntoption,
00209                   "Estimated value for the minor eigenvalues of the gradients covariance matrix\n");
00210     declareOption(ol, "Gt", &NatGradEstimator::Gt,
00211                   OptionBase::learntoption,
00212                   "Collected gradients during a minibatch\n");
00213     declareOption(ol, "previous_t", &NatGradEstimator::previous_t,
00214                   OptionBase::learntoption,
00215                   "Value of t at previous call of operator()\n");
00216     declareOption(ol, "initial_v", &NatGradEstimator::initial_v,
00217                   OptionBase::learntoption,
00218                   "Initial v for the g's of the current minibatch\n");
00219 
00220     // Now call the parent class' declareOptions
00221     inherited::declareOptions(ol);
00222 }
00223 
00224 void NatGradEstimator::build_()
00225 {
00226     init();
00227 }
00228 
00229 void NatGradEstimator::init()
00230 {
00231     if (n_dim>=0)
00232     {
00233         PLASSERT_MSG(n_dim>0, "NatGradEstimator::init(), n_dim should be > 0");
00234         PLASSERT_MSG(gamma<1 && gamma>0, "NatGradEstimator::init(), gamma should be < 1 and >0");
00235         Ut.resize(n_eigen,n_dim);
00236         Vt.resize(n_eigen+1,n_eigen+cov_minibatch_size);
00237         Vkt = Vt.subMat(0,0,n_eigen,n_eigen);
00238         Vbt = Vt.subMat(0,n_eigen,n_eigen,cov_minibatch_size);
00239         E.resize(n_eigen+1);
00240         D = E.subVec(0,n_eigen);
00241         M.resize(n_eigen + cov_minibatch_size, n_eigen + cov_minibatch_size);
00242         M11=M.subMat(0,0,n_eigen,n_eigen);
00243         M12=M.subMat(0,n_eigen,n_eigen,cov_minibatch_size);
00244         M21=M.subMat(n_eigen,0,cov_minibatch_size,n_eigen);
00245         M22=M.subMat(n_eigen,n_eigen,cov_minibatch_size,cov_minibatch_size);
00246         Gt.resize(cov_minibatch_size, n_dim);
00247         initial_v.resize(cov_minibatch_size, n_dim);
00248         tmp_v.resize(n_dim);
00249         newUt.resize(n_eigen,n_dim);
00250         vg.resize(cov_minibatch_size);
00251     }
00252 }
00253 
00254 void NatGradEstimator::operator()(int t, const Vec& g, Vec v)
00255 {
00256     if (t!=0)
00257         PLASSERT_MSG(t==previous_t+1, "NatGradEstimator() should be called sequentially!");
00258     if  (n_dim<0) 
00259     {
00260         PLASSERT_MSG(t==0, "The first call to NatGradEstimator() should be with t=0\n");
00261         n_dim = g.length();
00262         v.resize(n_dim);
00263         init();
00264     }
00265     int i = t % cov_minibatch_size;
00266     Vec v0 = initial_v(i);
00267     Gt(i) << g;
00268 
00269     // initialize v0
00270     v0 << g;
00271     if (t<cov_minibatch_size)
00272     {
00273         v0 *= 1.0/(lambda + pownorm(g));
00274         v << v0;
00275     }
00276     else
00277     {
00278         real oos = gamma/sigma;
00279         real ooip1 = 1.0/(i+1.0);
00280         v0 *= oos;
00281         // v0 = g*gamma/sigma + sum_j (1/D_j - gamma/sigma Uj Uj' g
00282         for (int j=0;j<n_eigen;j++)
00283         {
00284             Vec Uj = Ut(j);
00285             multiplyAcc(v0, Uj, (1/D[j] - oos) * dot(Uj,g));
00286         }
00287         if (use_double_init)
00288         {
00289             vg[i] = dot(v0,g);
00290             multiply(v0,1.0/(gamma + vg[i]*ooip1),tmp_v); // tmp_v == u0_i here
00291             v << tmp_v;
00292             for (int j=0;j<i;j++)
00293                 multiplyAcc(v, initial_v(j), -ooip1*dot(Gt(j),tmp_v)/(gamma + vg[j]*ooip1));
00294         }
00295         else
00296             v << v0;
00297     }
00298 
00299     // iterate on v to solve linear system
00300     if (verbosity>0)
00301         cout << "start inversion iterations" << endl;
00302     for (int j=0;j<inversion_n_iterations;j++)
00303     {
00304         multiply(v, (1 - gamma*alpha),tmp_v);
00305         multiplyAcc(tmp_v, v0, alpha);
00306         for (int r=0;r<=i;r++)
00307             multiplyAcc(tmp_v, initial_v(r), -alpha/(i+1)*dot(Gt(r),v));
00308         v << tmp_v;
00309         // verify that we get an improvement
00310         if (verbosity>0)
00311         {
00312             // compute (B + (1/i)sum_{k=1}^i g_k g_k') v_i            
00313             //        =(U (gamma D -sigma I) U' + sigma I + (1/i)sum_{k=1}^i g_k g_k') v_i            
00314             multiply(v,sigma,tmp_v);
00315             for (int j=0;j<n_eigen;j++)
00316             {
00317                 Vec Uj = Ut(j);
00318                 multiplyAcc(tmp_v,Uj,(gamma*D[j]-sigma)*dot(Uj,v));
00319             }
00320             for (int j=0;j<=i;j++)
00321             {
00322                 Vec Gj = Gt(j);
00323                 multiplyAcc(tmp_v,Gj,dot(Gj,v)/(i+1));
00324             }
00325             // result is in tmp_v. Compare with g_i
00326             real gnorm = dot(g,g);
00327             real enorm = dot(tmp_v,tmp_v);
00328             real angle = acos(dot(tmp_v,g)/sqrt(gnorm*enorm))*360/(2*3.14159);
00329             real err = L2distance(g,tmp_v);
00330             cout << "linear system distance=" << err << ", angle="<<angle<<", norm ratio="<<enorm/gnorm<<endl;
00331         }
00332     }
00333     
00334     // normalize back v, to take into account scaling up of C due to gamma iteration
00335     v *= (1 - pow(gamma,real(t/cov_minibatch_size)))/(1 - gamma);
00336     // recompute the eigen-decomposition
00337     if (i+1==cov_minibatch_size)
00338     {
00339         // build Gram matrix M, by blocks [M11 M12; M21 M22]
00340         M11.clear();
00341         for (int j=0;j<n_eigen;j++)
00342             M11(j,j) = gamma*D[j];
00343         productTranspose(M12,Ut,Gt);
00344         real gob=gamma/cov_minibatch_size;
00345         for (int j=0;j<n_eigen;j++)
00346             M12(j) *= sqrt(D[j]*gob);
00347         transpose(M12,M21);
00348         productTranspose(M22,Gt,Gt);
00349         M22 *= 1.0/cov_minibatch_size;
00350 
00351         // get eigen-decomposition, with one more eigen-x than necessary to set sigma
00352         eigenVecOfSymmMat(M,n_eigen+1,E,Vt);
00353         
00354         // convert eigenvectors Vt of M into eigenvectors U of C
00355         product(newUt,Vbt,Gt);
00356         Vec sqrtD = tmp_v.subVec(0,n_eigen);
00357         compute_sqrt(D,sqrtD);
00358         diagonalizedFactorsProduct(newUt,Vkt,sqrtD,Ut,true);
00359         Ut << newUt;
00360     }
00361     previous_t = t;
00362 }
00363 
00364 } // end of namespace PLearn
00365 
00366 
00367 /*
00368   Local Variables:
00369   mode:c++
00370   c-basic-offset:4
00371   c-file-style:"stroustrup"
00372   c-file-offsets:((innamespace . 0)(inline-open . 0))
00373   indent-tabs-mode:nil
00374   fill-column:79
00375   End:
00376 */
00377 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines