PLearn 0.1
AutoScaledGradientOptimizer.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PLearn (A C++ Machine Learning Library)
00004 // Copyright (C) 1998 Pascal Vincent
00005 // Copyright (C) 1999-2002 Pascal Vincent and Yoshua Bengio
00006 // Copyright (C) 1999-2002, 2006 University of Montreal
00007 //
00008 
00009 // Redistribution and use in source and binary forms, with or without
00010 // modification, are permitted provided that the following conditions are met:
00011 // 
00012 //  1. Redistributions of source code must retain the above copyright
00013 //     notice, this list of conditions and the following disclaimer.
00014 // 
00015 //  2. Redistributions in binary form must reproduce the above copyright
00016 //     notice, this list of conditions and the following disclaimer in the
00017 //     documentation and/or other materials provided with the distribution.
00018 // 
00019 //  3. The name of the authors may not be used to endorse or promote
00020 //     products derived from this software without specific prior written
00021 //     permission.
00022 // 
00023 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00024 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00025 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00026 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00027 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00028 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00029 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00030 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00031 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00032 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00033 // 
00034 // This file is part of the PLearn library. For more information on the PLearn
00035 // library, go to the PLearn Web site at www.plearn.org
00036  
00037 
00038 /* *******************************************************      
00039  * $Id: AutoScaledGradientOptimizer.cc 5852 2006-06-14 14:40:03Z larocheh $
00040  * This file is part of the PLearn library.
00041  ******************************************************* */
00042 
00043 #define PL_LOG_MODULE_NAME "AutoScaledGradientOptimizer"
00044 
00045 #include "AutoScaledGradientOptimizer.h"
00046 #include <plearn/io/pl_log.h>
00047 #include <plearn/math/TMat_maths.h>
00048 #include <plearn/display/DisplayUtils.h>
00049 
00050 namespace PLearn {
00051 using namespace std;
00052 
00053 PLEARN_IMPLEMENT_OBJECT(
00054     AutoScaledGradientOptimizer,
00055     "Optimization by gradient descent with adapted scaling for each parameter.", 
00056     "This is a simple variation on the basic GradientOptimizer \n"
00057     "in which the gradient is scaled elementwise (for each parameter) \n"
00058     "by a scaling factor that is 1 over an average of the \n"
00059     "absolute value of the gradient plus some small epsilon. \n"
00060     "\n"
00061 );
00062 
00063 AutoScaledGradientOptimizer::AutoScaledGradientOptimizer():
00064     learning_rate(0.),   
00065     start_learning_rate(1e-2),
00066     decrease_constant(0),
00067     verbosity(0),
00068     evaluate_scaling_every(1000),
00069     evaluate_scaling_during(1000),
00070     epsilon(1e-6),
00071     nsteps_remaining_for_evaluation(-1)
00072 {}
00073 
00074 
00075 void AutoScaledGradientOptimizer::declareOptions(OptionList& ol)
00076 {
00077     declareOption(
00078         ol, "start_learning_rate", &AutoScaledGradientOptimizer::start_learning_rate,
00079         OptionBase::buildoption, 
00080         "The initial learning rate\n");
00081 
00082     declareOption(
00083         ol, "learning_rate", &AutoScaledGradientOptimizer::learning_rate,
00084         OptionBase::learntoption, 
00085         "The current learning rate\n");
00086 
00087     declareOption(
00088         ol, "decrease_constant", &AutoScaledGradientOptimizer::decrease_constant,
00089         OptionBase::buildoption, 
00090         "The learning rate decrease constant \n");
00091 
00092     declareOption(
00093         ol, "lr_schedule", &AutoScaledGradientOptimizer::lr_schedule,
00094         OptionBase::buildoption, 
00095         "Fixed schedule instead of decrease_constant. This matrix has 2 columns: iteration_threshold \n"
00096         "and learning_rate_factor. As soon as the iteration number goes above the iteration_threshold,\n"
00097         "the corresponding learning_rate_factor is applied (multiplied) to the start_learning_rate to\n"
00098         "obtain the learning_rate.\n");
00099 
00100     declareOption(
00101         ol, "verbosity", &AutoScaledGradientOptimizer::verbosity,
00102         OptionBase::buildoption, 
00103         "Controls the amount of output.  If zero, does not print anything.\n"
00104         "If 'verbosity'=V, print the current cost and learning rate if\n"
00105         "\n"
00106         "    stage % V == 0\n"
00107         "\n"
00108         "i.e. every V stages.  (Default=0)\n");
00109 
00110     declareOption(
00111         ol, "evaluate_scaling_every", &AutoScaledGradientOptimizer::evaluate_scaling_every,
00112         OptionBase::buildoption, 
00113         "every how-many steps should the mean and scaling be reevaluated\n");
00114 
00115     declareOption(
00116         ol, "evaluate_scaling_during", &AutoScaledGradientOptimizer::evaluate_scaling_during,
00117         OptionBase::buildoption, 
00118         "how many steps should be used to re-evaluate the mean and scaling\n");
00119 
00120     declareOption(
00121         ol, "epsilon", &AutoScaledGradientOptimizer::epsilon,
00122         OptionBase::buildoption, 
00123         "scaling will be 1/(mean_abs_grad + epsilon)\n");
00124 
00125     inherited::declareOptions(ol);
00126 }
00127 
00128 
00129 void AutoScaledGradientOptimizer::setToOptimize(const VarArray& the_params, Var the_cost, VarArray the_other_costs, TVec<VarArray> the_other_params, real the_other_weight)
00130 {
00131     inherited::setToOptimize(the_params, the_cost, the_other_costs, the_other_params, the_other_weight);
00132     int n = params.nelems();
00133     param_values = Vec(n);
00134     param_gradients = Vec(n);
00135     params.makeSharedValue(param_values);
00136     params.makeSharedGradient(param_gradients);
00137     scaling.resize(n);
00138     scaling.clear();
00139     if(epsilon<0)
00140         scaling.fill(1.0);
00141     meanabsgrad.resize(n);
00142     meanabsgrad.clear();
00143 }
00144 
00145 
00146 // static bool displayvg=false;
00147 
00148 bool AutoScaledGradientOptimizer::optimizeN(VecStatsCollector& stats_coll) 
00149 {
00150     PLASSERT_MSG(other_costs.length()==0, "gradient on other costs not currently supported");
00151 
00152     param_gradients.clear();
00153 
00154     int stage_max = stage + nstages; // the stage to reach
00155 
00156     int current_schedule = 0;
00157     int n_schedules = lr_schedule.length();
00158     if (n_schedules>0)
00159         while (current_schedule+1 < n_schedules && stage > lr_schedule(current_schedule,0))
00160             current_schedule++;
00161     
00162     while (stage < stage_max) 
00163     {        
00164         if (n_schedules>0)
00165         {
00166             while (current_schedule+1 < n_schedules && stage > lr_schedule(current_schedule,0))
00167                 current_schedule++;
00168             learning_rate = start_learning_rate * lr_schedule(current_schedule,1);
00169         }
00170         else
00171             learning_rate = start_learning_rate/(1.0+decrease_constant*stage);
00172 
00173         proppath.clearGradient();
00174         cost->gradient[0] = 1.0;
00175 
00176         static bool display_var_graph_before_fbprop=false;
00177         if (display_var_graph_before_fbprop)
00178             displayVarGraph(proppath, true, 333);
00179         proppath.fbprop(); 
00180 #ifdef BOUNDCHECK
00181         int np = params.size();
00182         for(int i=0; i<np; i++)
00183             if (params[i]->value.hasMissing())
00184                 PLERROR("parameter updated with NaN");
00185 #endif
00186         static bool display_var_graph=false;
00187         if (display_var_graph)
00188             displayVarGraph(proppath, true, 333);
00189 
00190 //       // Debugging of negative NLL bug...
00191 //       if (cost->value[0] <= 0) {
00192 //         displayVarGraph(proppath, true, 333);
00193 //         cerr << "Negative NLL cost vector = " << cost << endl;
00194 //         PLERROR("Negative NLL encountered in optimization");
00195 //       }
00196 
00197         // set params += -learning_rate * params.gradient * scaling
00198         {
00199         real* p_val = param_values.data();
00200         real* p_grad = param_gradients.data();
00201         real* p_scale = scaling.data();
00202         real neg_learning_rate = -learning_rate;
00203 
00204         int n = param_values.length();
00205         while(n--)
00206             *p_val++ += neg_learning_rate*(*p_grad++)*(*p_scale++);
00207         }
00208 
00209         if(stage%evaluate_scaling_every==0)
00210         {
00211             nsteps_remaining_for_evaluation = evaluate_scaling_during;
00212             meanabsgrad.clear();
00213             if(verbosity>=4)
00214                 perr << "At stage " << stage << " beginning evaluating meanabsgrad during " << evaluate_scaling_during << " stages" << endl;
00215         }
00216 
00217         if(nsteps_remaining_for_evaluation>0)
00218         {
00219             real* p_grad = param_gradients.data();
00220             real* p_mean = meanabsgrad.data();
00221             int n = param_gradients.length();
00222             while(n--)
00223                 *p_mean++ += fabs(*p_grad++);
00224             --nsteps_remaining_for_evaluation;
00225 
00226             if(nsteps_remaining_for_evaluation==0) // finalize evaluation
00227             {
00228                 int n = param_gradients.length();                
00229                 for(int i=0; i<n; i++)
00230                 {
00231                     meanabsgrad[i] /= evaluate_scaling_during;
00232                     scaling[i] = 1.0/(meanabsgrad[i]+epsilon);
00233                 }
00234                 if(verbosity>=4)
00235                     perr << "At stage " << stage 
00236                          << " finished evaluating meanabsgrad. It's in range: ( " 
00237                          << min(meanabsgrad) << ",  " << max(meanabsgrad) << " )" << endl;
00238                 if(verbosity>=5)
00239                     perr << meanabsgrad << endl;
00240 
00241                 if(epsilon<0)
00242                     scaling.fill(1.0);
00243             }
00244         }
00245         param_gradients.clear();
00246 
00247         if (verbosity > 0 && stage % verbosity == 0) {
00248             MODULE_LOG << "Stage " << stage << ": " << cost->value
00249                        << "\tlr=" << learning_rate
00250                        << endl;
00251         }
00252         stats_coll.update(cost->value);
00253         ++stage;
00254     }
00255 
00256     return false;
00257 }
00258 
00259 } // end of namespace PLearn
00260 
00261 
00262 /*
00263   Local Variables:
00264   mode:c++
00265   c-basic-offset:4
00266   c-file-style:"stroustrup"
00267   c-file-offsets:((innamespace . 0)(inline-open . 0))
00268   indent-tabs-mode:nil
00269   fill-column:79
00270   End:
00271 */
00272 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines