PLearn 0.1
AdaptGradientOptimizer.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PLearn (A C++ Machine Learning Library)
00004 // Copyright (C) 1998 Pascal Vincent
00005 // Copyright (C) 1999-2003 Pascal Vincent, Yoshua Bengio,
00006 //                         Olivier Delalleau and University of Montreal
00007 //
00008 
00009 // Redistribution and use in source and binary forms, with or without
00010 // modification, are permitted provided that the following conditions are met:
00011 // 
00012 //  1. Redistributions of source code must retain the above copyright
00013 //     notice, this list of conditions and the following disclaimer.
00014 // 
00015 //  2. Redistributions in binary form must reproduce the above copyright
00016 //     notice, this list of conditions and the following disclaimer in the
00017 //     documentation and/or other materials provided with the distribution.
00018 // 
00019 //  3. The name of the authors may not be used to endorse or promote
00020 //     products derived from this software without specific prior written
00021 //     permission.
00022 // 
00023 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00024 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00025 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00026 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00027 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00028 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00029 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00030 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00031 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00032 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00033 // 
00034 // This file is part of the PLearn library. For more information on the PLearn
00035 // library, go to the PLearn Web site at www.plearn.org
00036 
00037 
00038  
00039 
00040 /* *******************************************************      
00041  * $Id: AdaptGradientOptimizer.cc 4774 2006-01-10 20:05:24Z tihocan $
00042  * This file is part of the PLearn library.
00043  ******************************************************* */
00044 
00045 #include "AdaptGradientOptimizer.h"
00046 #include <plearn/var/SumOfVariable.h>
00047 
00048 namespace PLearn {
00049 using namespace std;
00050 
00051 AdaptGradientOptimizer::AdaptGradientOptimizer():
00052     adapt_coeff1(0),
00053     adapt_coeff2(0),
00054     decrease_constant(0),
00055     learning_rate_adaptation(0),
00056     max_learning_rate(2e-2),
00057     min_learning_rate(1e-3),
00058     start_learning_rate(1e-2)
00059 {}
00060 
00061 /*
00062 AdaptGradientOptimizer::AdaptGradientOptimizer(VarArray the_params, Var the_cost,
00063                                                real the_start_learning_rate, 
00064                                                real the_decrease_constant,
00065                                                real the_min_learning_rate,
00066                                                real the_max_learning_rate,
00067                                                int the_learning_rate_adaptation,
00068                                                real the_adapt_coeff1,
00069                                                real the_adapt_coeff2,
00070                                                int n_updates, const string& filename, 
00071                                                int every_iterations)
00072     :inherited(the_params,the_cost, n_updates, filename, every_iterations),
00073      start_learning_rate(the_start_learning_rate),
00074      min_learning_rate(the_min_learning_rate),
00075      max_learning_rate(the_max_learning_rate),
00076      learning_rate_adaptation(the_learning_rate_adaptation),
00077      adapt_coeff1(the_adapt_coeff1),
00078      adapt_coeff2(the_adapt_coeff2),
00079      decrease_constant(the_decrease_constant) {}
00080 
00081 AdaptGradientOptimizer::AdaptGradientOptimizer(VarArray the_params, Var the_cost, 
00082                                                VarArray update_for_measure,
00083                                                real the_start_learning_rate, 
00084                                                real the_decrease_constant,
00085                                                real the_min_learning_rate,
00086                                                real the_max_learning_rate,
00087                                                int the_learning_rate_adaptation,
00088                                                real the_adapt_coeff1,
00089                                                real the_adapt_coeff2,
00090                                                int n_updates, const string& filename, 
00091                                                int every_iterations)
00092     :inherited(the_params,the_cost, update_for_measure,
00093                n_updates, filename, every_iterations),
00094      start_learning_rate(the_start_learning_rate),
00095      min_learning_rate(the_min_learning_rate),
00096      max_learning_rate(the_max_learning_rate),
00097      learning_rate_adaptation(the_learning_rate_adaptation),
00098      adapt_coeff1(the_adapt_coeff1),
00099      adapt_coeff2(the_adapt_coeff2),
00100      decrease_constant(the_decrease_constant) {}
00101     */
00102 
00103 
00104 void AdaptGradientOptimizer::declareOptions(OptionList& ol)
00105 {
00106     declareOption(ol, "start_learning_rate", &AdaptGradientOptimizer::start_learning_rate, OptionBase::buildoption, 
00107                   "    the initial learning rate\n");
00108 
00109     declareOption(ol, "min_learning_rate", &AdaptGradientOptimizer::min_learning_rate, OptionBase::buildoption, 
00110                   "    the minimum value for the learning rate, when there is learning rate adaptation\n");
00111 
00112     declareOption(ol, "max_learning_rate", &AdaptGradientOptimizer::max_learning_rate, OptionBase::buildoption, 
00113                   "    the maximum value for the learning rate, when there is learning rate adaptation\n");
00114 
00115     declareOption(ol, "adapt_coeff1", &AdaptGradientOptimizer::adapt_coeff1, OptionBase::buildoption, 
00116                   "    a coefficient for learning rate adaptation, use may depend on the kind of adaptation\n");
00117 
00118     declareOption(ol, "adapt_coeff2", &AdaptGradientOptimizer::adapt_coeff2, OptionBase::buildoption, 
00119                   "    a coefficient for learning rate adaptation, use may depend on the kind of adaptation\n");
00120 
00121     declareOption(ol, "decrease_constant", &AdaptGradientOptimizer::decrease_constant, OptionBase::buildoption, 
00122                   "    the learning rate decrease constant : each update of the weights is scaled by the\n\
00123          coefficient 1/(1 + stage * decrease_constant)\n");
00124 
00125     declareOption(ol, "learning_rate_adaptation", &AdaptGradientOptimizer::learning_rate_adaptation, OptionBase::buildoption, 
00126                   "    the way the learning rates evolve :\n\
00127           - 0  : no adaptation\n\
00128           - 1  : basic adaptation :\n\
00129                    if the gradient of the weight i has the same sign for two consecutive epochs\n\
00130                      then lr(i) = lr(i) + lr(i) * adapt_coeff1\n\
00131                      else lr(i) = lr(i) - lr(i) * adapt_coeff2\n\
00132           - 2  : ALAP1 formula. See code (not really tested)\n\
00133           - 3  : variance-dependent learning rate :\n\
00134                    let avg(i) be the exponential average of the variance of the gradient of the weight i\n\
00135                    over the past epochs, where the coefficient for the exponential average is adapt_coeff1\n\
00136                    (adapt_coeff1 = 0 means no average)\n\
00137                    if avg(i) is low (ie < average of all avg(j))\n\
00138                      then lr(i) = max_learning_rate\n\
00139                      else lr(i) = min_learning_rate\n");
00140 
00141     declareOption(ol, "adapt_every", &AdaptGradientOptimizer::adapt_every, OptionBase::buildoption, 
00142                   "    the learning rate adaptation will occur after adapt_every updates of the weights (0 means after each epoch)\n");
00143 
00144     inherited::declareOptions(ol);
00145 }
00146 
00147 PLEARN_IMPLEMENT_OBJECT(AdaptGradientOptimizer,
00148                         "An optimizer that performs gradient descent with learning rate adaptation.",
00149                         ""
00150     );
00151 
00153 // build_ //
00155 void AdaptGradientOptimizer::build_(){
00156     early_stop = false;
00157     count_updates = 0;
00158     learning_rate = start_learning_rate;
00159     SumOfVariable* sumofvar = dynamic_cast<SumOfVariable*>((Variable*)cost);
00160     stochastic_hack = sumofvar!=0 && sumofvar->nsamples==1;
00161     params.clearGradient();
00162     int n = params.nelems();
00163     if (n > 0) {
00164         store_var_grad.resize(n);
00165         store_var_grad.clear();
00166         store_grad.resize(n);
00167         store_quad_grad.resize(n);
00168         store_grad.clear();
00169         store_quad_grad.clear();
00170         learning_rates.resize(n);
00171         gradient.resize(n);
00172         tmp_storage.resize(n);
00173         old_evol.resize(n);
00174         oldgradientlocations.resize(params.size());
00175         learning_rates.fill(start_learning_rate);
00176         switch (learning_rate_adaptation) {
00177         case 0:
00178             break;
00179         case 1:
00180             // tmp_storage is used to store the old parameters
00181             params.copyTo(tmp_storage);
00182             old_evol.fill(0);
00183             break;
00184         case 2:
00185             // tmp_storage is used to store the initial opposite gradient
00186             computeOppositeGradient(tmp_storage);
00187             break;
00188         case 3:
00189             break;
00190         default:
00191             break;
00192         }
00193     }
00194 }
00195 
00197 // adaptLearningRateALAP1 //
00199 void AdaptGradientOptimizer::adaptLearningRateALAP1(
00200     Vec old_gradient,
00201     Vec new_gradient) {
00202     int j = 0; // the current index in learning_rates
00203     real prod = 0;
00204     for (j = 0; j<params.nelems(); j++) {
00205         prod += old_gradient[j] * new_gradient[j];
00206     }
00207     // The division by j=params.nelems() is a scaling coeff
00208     learning_rate = learning_rate + adapt_coeff1 * prod / real(j);
00209     if (learning_rate < min_learning_rate) {
00210         learning_rate = min_learning_rate;
00211     } else if (learning_rate > max_learning_rate) {
00212         learning_rate = max_learning_rate;
00213     }
00214 }
00215 
00217 // adaptLearningRateBasic //
00219 void AdaptGradientOptimizer::adaptLearningRateBasic(
00220     Vec old_params,
00221     Vec old_evol) {
00222     Var* array = params->data();
00223     int j = 0;
00224     int k;
00225     real u; // used to store old_evol[j]
00226     for (int i=0; i<params.size(); i++) {
00227         k = j;
00228         for (; j<k+array[i]->nelems(); j++) {
00229             u = old_evol[j];
00230             real diff = array[i]->valuedata[j-k] - old_params[j];
00231             if (diff > 0) {
00232                 // the parameter has increased
00233                 if (u > 0) {
00234                     old_evol[j]++;
00235                 } else {
00236                     old_evol[j] = +1;
00237                 }
00238             } else if (diff < 0) {
00239                 // the parameter has decreased
00240                 if (u < 0) {
00241                     old_evol[j]--;
00242                 } else {
00243                     old_evol[j] = -1;
00244                 }
00245             } else {
00246                 // there has been no change
00247                 old_evol[j] = 0;
00248             }
00249             if (u * old_evol[j] > 0) {
00250                 // consecutive updates in the same direction
00251                 learning_rates[j] += learning_rates[j] * adapt_coeff1;
00252             }
00253             else if (u * old_evol[j] < 0) {
00254                 // oscillation
00255                 learning_rates[j] -= learning_rates[j] * adapt_coeff2;
00256             }
00257      
00258             if (learning_rates[j] < min_learning_rate) {
00259                 learning_rates[j] = min_learning_rate;
00260             } else if (learning_rates[j] > max_learning_rate) {
00261                 learning_rates[j] = max_learning_rate;
00262             }
00263         }
00264     }
00265 }
00266 
00268 // adaptLearningRateVariance //
00270 void AdaptGradientOptimizer::adaptLearningRateVariance() {
00271     real moy_var = 0;
00272     real exp_avg_coeff = 0;
00273     if (stage > 1) {
00274         exp_avg_coeff = adapt_coeff1;
00275     }
00276     for (int j=0; j<params.nelems(); j++) {
00277         // Compute variance
00278         store_var_grad[j] = 
00279             store_var_grad[j] * exp_avg_coeff +
00280             (store_quad_grad[j] - store_grad[j]*store_grad[j] / real(count_updates))
00281             * (1 - exp_avg_coeff);
00282         moy_var += store_var_grad[j];
00283     }
00284     count_updates = 0;
00285     store_quad_grad.clear();
00286     store_grad.clear();
00287     moy_var /= real(params.nelems());
00288     int nb_low_var = 0, nb_high_var = 0;
00289     real var_limit = 1.0;
00290     for (int j=0; j<params.nelems(); j++) {
00291         if (store_var_grad[j] <= moy_var * var_limit) {
00292             learning_rates[j] = max_learning_rate;
00293             nb_low_var++;
00294         } else {
00295             learning_rates[j] = min_learning_rate;
00296             nb_high_var++;
00297         }
00298     }
00299 }
00300 
00302 // optimize //
00304 real AdaptGradientOptimizer::optimize()
00305 {
00306     PLERROR("In AdaptGradientOptimizer::optimize Deprecated, use OptimizeN !");
00307     return 0;
00308 }
00309 
00311 // optimizeN //
00313 bool AdaptGradientOptimizer::optimizeN(VecStatsCollector& stats_coll) {
00314 
00315     bool adapt = (learning_rate_adaptation != 0);
00316     stochastic_hack = stochastic_hack && !adapt;
00317     if (adapt_every == 0) {
00318         adapt_every = nstages;  // the number of steps to complete an epoch
00319     }
00320 
00321     // Big hack for the special case of stochastic gradient, to avoid doing an explicit update
00322     // (temporarily change the gradient fields of the parameters to point to the parameters themselves,
00323     // so that gradients are "accumulated" directly in the parameters, thus updating them!
00324     if(stochastic_hack) {
00325         int n = params.size();
00326         for(int i=0; i<n; i++)
00327             oldgradientlocations[i] = params[i]->defineGradientLocation(params[i]->matValue);
00328     }
00329 
00330     int stage_max = stage + nstages; // the stage to reach
00331 
00332     for (; !early_stop && stage<stage_max; stage++) {
00333 
00334         // Take into account the learning rate decrease
00335         // This is actually done during the update step, except when there is no
00336         // learning rate adaptation
00337         switch (learning_rate_adaptation) {
00338         case 0:
00339             learning_rate = start_learning_rate/(1.0+decrease_constant*stage);
00340             break;
00341         default:
00342             break;
00343         }
00344 
00345         proppath.clearGradient();
00346         if (adapt)
00347             cost->gradient[0] = -1.;
00348         else
00349             cost->gradient[0] = -learning_rate;
00350 
00351         proppath.fbprop();
00352 
00353         // Actions to take after each step, depending on the
00354         // adaptation method used :
00355         // - moving along the chosen direction
00356         // - adapting the learning rate
00357         // - storing some data
00358         real coeff = 1/(1.0 + stage * decrease_constant); // the scaling cofficient
00359         switch (learning_rate_adaptation) {
00360         case 0:
00361             if (!stochastic_hack) {
00362                 params.updateAndClear();
00363             }
00364             break;
00365         case 1:
00366             params.copyGradientTo(gradient);
00367             // TODO Not really efficient, write a faster update ?
00368             params.update(learning_rates, gradient, coeff); 
00369             params.clearGradient();
00370             break;
00371         case 2:
00372             params.copyGradientTo(gradient);
00373             adaptLearningRateALAP1(tmp_storage, gradient);
00374             params.update(learning_rate, gradient);
00375             tmp_storage << gradient;
00376             params.clearGradient();
00377             break;
00378         case 3:
00379             // storing sum and sum-of-squares of the gradient in order to compute
00380             // the variance later
00381             params.copyGradientTo(gradient);
00382             for (int i=0; i<params.nelems(); i++) {
00383                 store_grad[i] += gradient[i];
00384                 store_quad_grad[i] += gradient[i] * gradient[i];
00385             }
00386             count_updates++;
00387             params.update(learning_rates, gradient, coeff);
00388             params.clearGradient();
00389             break;
00390         default:
00391             break;
00392         }
00393 
00394         if ((stage + 1) % adapt_every == 0) {
00395             // Time for learning rate adaptation
00396             switch (learning_rate_adaptation) {
00397             case 0:
00398                 break;
00399             case 1:
00400                 adaptLearningRateBasic(tmp_storage, old_evol);
00401                 params.copyTo(tmp_storage);
00402                 break;
00403             case 2:
00404                 // Nothing, the adaptation is after each example
00405                 break;
00406             case 3:
00407                 adaptLearningRateVariance();
00408                 break;
00409             default:
00410                 break;
00411             }
00412         }
00413 
00414         stats_coll.update(cost->value);
00415     }
00416 
00417     if(stochastic_hack) // restore the gradients as they previously were...
00418     {
00419         int n = params.size();
00420         for(int i=0; i<n; i++)
00421             params[i]->defineGradientLocation(oldgradientlocations[i]);
00422     }
00423 
00424     if (early_stop)
00425         cout << "Early Stopping !" << endl;
00426     return early_stop;
00427 }
00428 
00429 } // end of namespace PLearn
00430 
00431 
00432 /*
00433   Local Variables:
00434   mode:c++
00435   c-basic-offset:4
00436   c-file-style:"stroustrup"
00437   c-file-offsets:((innamespace . 0)(inline-open . 0))
00438   indent-tabs-mode:nil
00439   fill-column:79
00440   End:
00441 */
00442 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines