PLearn 0.1
GradientOptimizer.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PLearn (A C++ Machine Learning Library)
00004 // Copyright (C) 1998 Pascal Vincent
00005 // Copyright (C) 1999-2002 Pascal Vincent and Yoshua Bengio
00006 // Copyright (C) 1999-2002, 2006 University of Montreal
00007 //
00008 
00009 // Redistribution and use in source and binary forms, with or without
00010 // modification, are permitted provided that the following conditions are met:
00011 // 
00012 //  1. Redistributions of source code must retain the above copyright
00013 //     notice, this list of conditions and the following disclaimer.
00014 // 
00015 //  2. Redistributions in binary form must reproduce the above copyright
00016 //     notice, this list of conditions and the following disclaimer in the
00017 //     documentation and/or other materials provided with the distribution.
00018 // 
00019 //  3. The name of the authors may not be used to endorse or promote
00020 //     products derived from this software without specific prior written
00021 //     permission.
00022 // 
00023 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00024 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00025 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00026 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00027 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00028 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00029 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00030 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00031 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00032 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00033 // 
00034 // This file is part of the PLearn library. For more information on the PLearn
00035 // library, go to the PLearn Web site at www.plearn.org
00036  
00037 
00038 /* *******************************************************      
00039  * $Id: GradientOptimizer.cc 5852 2006-06-14 14:40:03Z larocheh $
00040  * This file is part of the PLearn library.
00041  ******************************************************* */
00042 
00043 #define PL_LOG_MODULE_NAME "GradientOptimizer"
00044 
00045 #include "GradientOptimizer.h"
00046 #include <plearn/io/pl_log.h>
00047 #include <plearn/math/TMat_maths.h>
00048 #include <plearn/display/DisplayUtils.h>
00049 #include <plearn/var/SumOfVariable.h>
00050 
00051 namespace PLearn {
00052 using namespace std;
00053 
00054 PLEARN_IMPLEMENT_OBJECT(
00055     GradientOptimizer,
00056     "Optimization by gradient descent.", 
00057     "GradientOptimizer is the simple usual gradient descent algorithm \n"
00058     "(the number of samples on which to estimate gradients before an \n"
00059     "update, which determines whether we are performing 'batch' \n"
00060     "'stochastic' or even 'minibatch', is currently specified outside \n"
00061     "this class, typically in the numer of s/amples of the meanOf function \n"
00062     "to be optimized, as its 'nsamples' parameter). \n"
00063     "\n"
00064     "Options for GradientOptimizer are [ option_name: <type> (default) ]: \n"
00065     "  - start_learning_rate: <real> (0.01) \n"
00066     "    the initial learning rate \n"
00067     "  - decrease_constant: <real> (0) \n"
00068     "    the learning rate decrease constant \n"
00069     "\n"
00070 );
00071 
00072 GradientOptimizer::GradientOptimizer():
00073     learning_rate(0.),   
00074     start_learning_rate(1e-2),
00075     decrease_constant(0),
00076     use_stochastic_hack(false),
00077     verbosity(0)
00078 {}
00079 
00080 
00081 void GradientOptimizer::declareOptions(OptionList& ol)
00082 {
00083     declareOption(
00084         ol, "start_learning_rate", &GradientOptimizer::start_learning_rate,
00085         OptionBase::buildoption, 
00086         "The initial learning rate\n");
00087 
00088     declareOption(
00089         ol, "learning_rate", &GradientOptimizer::learning_rate,
00090         OptionBase::learntoption, 
00091         "The current learning rate\n");
00092 
00093     declareOption(
00094         ol, "decrease_constant", &GradientOptimizer::decrease_constant,
00095         OptionBase::buildoption, 
00096         "The learning rate decrease constant \n");
00097 
00098     declareOption(
00099         ol, "lr_schedule", &GradientOptimizer::lr_schedule,
00100         OptionBase::buildoption, 
00101         "Fixed schedule instead of decrease_constant. This matrix has 2 columns: iteration_threshold \n"
00102         "and learning_rate_factor. As soon as the iteration number goes above the iteration_threshold,\n"
00103         "the corresponding learning_rate_factor is applied (multiplied) to the start_learning_rate to\n"
00104         "obtain the learning_rate.\n");
00105 
00106     declareOption(
00107         ol, "use_stochastic_hack", &GradientOptimizer::use_stochastic_hack,
00108         OptionBase::buildoption, 
00109         "Indication that a stochastic hack to accelerate stochastic gradient descent should be used.\n"
00110         "Be aware that it will not take into account minimum and maximum values in variables.\n"
00111         );
00112 
00113     declareOption(
00114         ol, "verbosity", &GradientOptimizer::verbosity,
00115         OptionBase::buildoption, 
00116         "Controls the amount of output.  If zero, does not print anything.\n"
00117         "If 'verbosity'=V, print the current cost and learning rate if\n"
00118         "\n"
00119         "    stage % V == 0\n"
00120         "\n"
00121         "i.e. every V stages.  (Default=0)\n");
00122 
00123     inherited::declareOptions(ol);
00124 }
00125 
00126 
00127 // static bool displayvg=false;
00128 
00129 bool GradientOptimizer::optimizeN(VecStatsCollector& stats_coll) 
00130 {
00131     // Big hack for the special case of stochastic gradient, to avoid doing an
00132     // explicit update (temporarily change the gradient fields of the
00133     // parameters to point to the parameters themselves, so that gradients are
00134     // "accumulated" directly in the parameters, thus updating them!
00135 
00136     SumOfVariable* sumofvar = dynamic_cast<SumOfVariable*>((Variable*)cost);
00137     Array<Mat> oldgradientlocations;
00138     bool stochastic_hack = use_stochastic_hack && sumofvar!=0 && sumofvar->nsamples==1;
00139     //stochastic_hack=false;
00140     if(stochastic_hack)
00141     {
00142         // make the gradient and values fields of parameters point to the same
00143         // place, so that when the descendants of the parameter Var's do a
00144         // bprop this automatically increments the parameters (by the right
00145         // amount since we set the cost->gradient to -learning_rate).
00146         int n = params.size();
00147         oldgradientlocations.resize(n);
00148         for(int i=0; i<n; i++)
00149             oldgradientlocations[i] = params[i]->defineGradientLocation(params[i]->matValue);
00150     }
00151     else
00152         params.clearGradient();
00153 
00154     if(other_costs.length() != 0)
00155     {
00156         for(int i=0; i<other_params.length(); i++)
00157             other_params[i].clearGradient();
00158     }
00159 
00160     // Big hack for the special case of stochastic gradient, to avoid doing an explicit update
00161     // (temporarily change the gradient fields of the parameters to point to the parameters themselves,
00162     // so that gradients are "accumulated" directly in the parameters, thus updating them!
00163 
00164     int stage_max = stage + nstages; // the stage to reach
00165 
00166     int current_schedule = 0;
00167     int n_schedules = lr_schedule.length();
00168     if (n_schedules>0)
00169         while (current_schedule+1 < n_schedules && stage > lr_schedule(current_schedule,0))
00170             current_schedule++;
00171     
00172     while (stage < stage_max) 
00173     {        
00174         if (n_schedules>0)
00175         {
00176             while (current_schedule+1 < n_schedules && stage > lr_schedule(current_schedule,0))
00177                 current_schedule++;
00178             learning_rate = start_learning_rate * lr_schedule(current_schedule,1);
00179         }
00180         else
00181             learning_rate = start_learning_rate/(1.0+decrease_constant*stage);
00182 
00183         if(other_costs.length() != 0)
00184         {
00185             for(int i=0; i<other_costs.length(); i++)
00186             {
00187                 other_proppaths[i].clearGradient();
00188                 other_costs[i]->gradient[0] = -learning_rate*other_weight;
00189 
00190                 static bool display_var_graph_before_fbprop=false;
00191                 if (display_var_graph_before_fbprop)
00192                     displayVarGraph(other_proppaths[i], true, 333);
00193                 //displayVarGraph(other_proppaths[i], true, 333);
00194                 other_proppaths[i].fbprop(); 
00195                 //displayVarGraph(other_proppaths[i], true, 333);
00196 #ifdef BOUNDCHECK
00197                 int np = other_params[i].size();
00198                 for(int j=0; j<np; j++)
00199                     if (other_params[i][j]->value.hasMissing())
00200                         PLERROR("parameter updated with NaN");
00201 #endif
00202                 static bool display_var_graph=false;
00203                 if (display_var_graph)
00204                     displayVarGraph(proppath, true, 333);
00205 
00206 //       // Debugging of negative NLL bug...
00207 //       if (cost->value[0] <= 0) {
00208 //         displayVarGraph(proppath, true, 333);
00209 //         cerr << "Negative NLL cost vector = " << cost << endl;
00210 //         PLERROR("Negative NLL encountered in optimization");
00211 //       }
00212 
00213                 // set params += -learning_rate * params.gradient
00214                 other_params[i].updateAndClear();
00215             }
00216         }
00217 
00218 
00219         proppath.clearGradient();
00220         cost->gradient[0] = -learning_rate;
00221 
00222         static bool display_var_graph_before_fbprop=false;
00223         if (display_var_graph_before_fbprop)
00224             displayVarGraph(proppath, true, 333);
00225         proppath.fbprop(); 
00226 #ifdef BOUNDCHECK
00227         int np = params.size();
00228         for(int i=0; i<np; i++)
00229             if (params[i]->value.hasMissing())
00230                 PLERROR("parameter updated with NaN");
00231 #endif
00232         static bool display_var_graph=false;
00233         if (display_var_graph)
00234             displayVarGraph(proppath, true, 333);
00235 
00236 //       // Debugging of negative NLL bug...
00237 //       if (cost->value[0] <= 0) {
00238 //         displayVarGraph(proppath, true, 333);
00239 //         cerr << "Negative NLL cost vector = " << cost << endl;
00240 //         PLERROR("Negative NLL encountered in optimization");
00241 //       }
00242 
00243         // set params += -learning_rate * params.gradient
00244         if(!stochastic_hack)
00245             params.updateAndClear();
00246         else
00247             if(partial_update_vars.length() != 0) 
00248                 for(int i=0; i<partial_update_vars.length(); i++)
00249                     partial_update_vars[i]->clearRowsToUpdate();
00250         if (verbosity > 0 && stage % verbosity == 0) {
00251             MODULE_LOG << "Stage " << stage << ": " << cost->value
00252                        << "\tlr=" << learning_rate
00253                        << endl;
00254         }
00255         stats_coll.update(cost->value);
00256         ++stage;
00257     }
00258 
00259     if(stochastic_hack) // restore the gradients as they previously were...
00260     {
00261         int n = params.size();
00262         for(int i=0; i<n; i++)
00263             params[i]->defineGradientLocation(oldgradientlocations[i]);
00264     }
00265     return false;
00266 }
00267 
00268 // Very old code.  TO BE DEPRECATED
00269 #if 0
00270 /*
00271 real ScaledGradientOptimizer::optimize()
00272 {
00273     ofstream out;
00274     if (!filename.empty())
00275         out.open(filename.c_str());
00276 
00277     eps_scale.fill(1.0);
00278     Vec first_long_time_mv; 
00279     real best_cost = 1e30;
00280     Vec prev_params(gradient.length());
00281     Vec prev_gradient(gradient.length());
00282     Vec best_params(gradient.length());
00283     Vec best_gradient(gradient.length());
00284     params >> prev_params;
00285     params >> best_params;
00286     params.copyGradientTo(prev_gradient);
00287     params.copyGradientTo(best_gradient);
00288     int n_long = (int)(1.0/(short_time_mac*long_time_mac));
00289     cout << "start learning rate = " << start_learning_rate << endl;
00290     learning_rate = 0;
00291     Vec meancost(cost->size());
00292     Vec lastmeancost(cost->size());
00293     early_stop = false;
00294     for (int t=0; !early_stop && t<nupdates; t++)
00295     {
00296         params.clearGradient();
00297         proppath.clearGradient();
00298         cost->gradient[0] = 1.0;
00299         proppath.fbprop();
00300         if (every!=0) 
00301         {
00302             if ((t%every==0) && (t>0)) 
00303             {
00304                 meancost /= real(every);      
00305                 if (meancost[0] > best_cost)
00306                 {
00307                     start_learning_rate *= 0.5;
00308                     params << best_params;
00309                     params.copyGradientFrom(best_gradient);
00310                 }
00311                 else
00312                 {
00313                     best_cost = meancost[0];
00314                     best_params << prev_params;
00315                     best_gradient << prev_gradient;
00316                     params >> prev_params;
00317                     params.copyGradientTo(prev_gradient);
00318                     start_learning_rate *= 1.1;
00319                 }
00320                 learning_rate = start_learning_rate/(1.0+decrease_constant*t);
00321                 cout << t << ' ' << meancost << ' ' << learning_rate << endl;
00322                 if (out)
00323                     out << t << ' ' << meancost << ' ' << learning_rate << endl;
00324                 early_stop = measure(t,meancost);
00325                 lastmeancost << meancost;
00326                 meancost.clear();
00327             }
00328             else
00329             {
00330                 learning_rate = start_learning_rate/(1.0+decrease_constant*t);
00331             }
00332         } 
00333         params.copyGradientTo(gradient);
00334         if (t<n_long-1)
00335             // prepare to initialize the moving average
00336             // (by doing initially a batch average)
00337         {
00338             long_time_ma += gradient;
00339             squareAcc(long_time_mv, gradient);
00340         }
00341         else if (t==n_long-1) 
00342             // prepare to initialize the moving averages
00343         {
00344             long_time_ma *= real(1.0)/ (real)n_long;
00345             long_time_mv *= real(1.0)/ (real)n_long;
00346             squareMultiplyAcc(long_time_mv, long_time_ma,(real)-1);
00347             first_long_time_mv << long_time_mv;
00348             short_time_ma << long_time_ma;
00349         }
00350         else 
00351             // steady-state mode
00352         {
00353             exponentialMovingAverageUpdate(short_time_ma, gradient,short_time_mac);
00354             exponentialMovingAverageUpdate(long_time_ma, short_time_ma,long_time_mac);
00355             exponentialMovingSquareUpdate(long_time_mv, gradient,long_time_mac);
00356             if (t%n_long==0)
00357             {
00358                 real prev_eps = 0.5*(max(eps_scale)+mean(eps_scale));
00359                 //apply(long_time_mv,long_time_md,sqrt);
00360                 cout << "******* AT T= " << t << " *******" << endl;
00361                 cout << "average gradient norm = " 
00362                      << norm(long_time_ma) << endl;
00363                 cout << "average gradient = " << long_time_ma << endl;
00364                 //cout << "short time average gradient = " << short_time_ma << endl;
00365                 Vec long_time_md = sqrt(long_time_mv);
00366                 cout << "sdev(gradient) = " << long_time_md << endl;
00367                 cout << "mean(sdev(gradient)) = " << mean(long_time_md) << endl;
00368                 add(long_time_mv,regularizer,eps_scale);
00369                 //divide(1.0,long_time_mv,eps_scale);
00370                 //divide(first_long_time_mv,long_time_mv,eps_scale);
00371                 cout << "eps_scale = " << eps_scale << endl;
00372                 real new_eps = 0.5*(max(eps_scale)+mean(eps_scale));
00373                 start_learning_rate *= prev_eps / new_eps;
00374                 learning_rate = start_learning_rate / (1 + decrease_constant*t);
00375                 cout << "scale learning rate by " << prev_eps / new_eps << " to " << learning_rate << endl;
00376 
00377                 //real *e=eps_scale.data();
00378                 //for (int i=0;i<eps_scale.length();i++)
00379                 //  if (e[i]>regularizer) e[i]=regularizer;
00380                 //cout << "regularized  eps_scale = " << eps_scale << endl;
00381                 //cout << "avg/sdev) = " << long_time_md  << endl;
00382                 //eps_scale *= learning_rate;
00383                 //cout << "regularized eps_scale * learning_rate = " << eps_scale << endl;
00384             }
00385         }
00386         // set params += -learning_rate * params.gradient
00387         meancost += cost->value;
00388         gradient *= eps_scale;
00389         params.update(-learning_rate,gradient);
00390     }
00391     return meancost[0];
00392 }
00393 */
00394 #endif // #if 0
00395 
00396 } // end of namespace PLearn
00397 
00398 
00399 /*
00400   Local Variables:
00401   mode:c++
00402   c-basic-offset:4
00403   c-file-style:"stroustrup"
00404   c-file-offsets:((innamespace . 0)(inline-open . 0))
00405   indent-tabs-mode:nil
00406   fill-column:79
00407   End:
00408 */
00409 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines