PLearn 0.1
GradNNetLayerModule.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // GradNNetLayerModule.cc
00004 //
00005 // Copyright (C) 2005 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************
00036    * $Id: GradNNetLayerModule.cc,v 1.3 2006/01/18 04:04:06 lamblinp Exp $
00037    ******************************************************* */
00038 
00039 // Authors: Pascal Lamblin
00040 
00044 #include "GradNNetLayerModule.h"
00045 #include <plearn/math/TMat_maths.h>
00046 
00047 namespace PLearn {
00048 using namespace std;
00049 
00050 PLEARN_IMPLEMENT_OBJECT(
00051     GradNNetLayerModule,
00052     "Affine transformation module, with stochastic gradient descent updates",
00053     "Neural Network layer, using stochastic gradient to update neuron weights\n"
00054     "       Output = weights * Input + bias\n"
00055     "Weights and bias are updated by online gradient descent, with learning\n"
00056     "rate possibly decreasing in 1/(1 + n_updates_done * decrease_constant).\n"
00057     "An L1 and L2 regularization penalty can be added to push weights to 0.\n"
00058     "Weights can be initialized to 0, to a given initial matrix, or randomly\n"
00059     "from a uniform distribution.\n"
00060     );
00061 
00063 // GradNNetLayerModule //
00065 GradNNetLayerModule::GradNNetLayerModule():
00066     start_learning_rate( .001 ),
00067     decrease_constant( 0. ),
00068     init_weights_random_scale( 1. ),
00069     L1_penalty_factor( 0. ),
00070     L2_penalty_factor( 0. ),
00071     step_number( 0 )
00072 {}
00073 
00075 // fprop //
00077 void GradNNetLayerModule::fprop(const Vec& input, Vec& output) const
00078 {
00079     PLASSERT_MSG( input.size() == input_size,
00080                   "input.size() should be equal to this->input_size" );
00081 
00082     output.resize( output_size );
00083 
00084     // Applies linear transformation
00085     for( int i=0 ; i<output_size ; i++ )
00086         output[i] = dot( weights(i), input ) + bias[i];
00087 }
00088 
00089 void GradNNetLayerModule::fprop(const Mat& inputs, Mat& outputs)
00090 {
00091     PLASSERT( inputs.width() == input_size );
00092     int n = inputs.length();
00093     outputs.resize(n, output_size);
00094     productTranspose(outputs, inputs, weights);
00095 
00096     // Add bias.
00097     resizeOnes(n);
00098     externalProductAcc(outputs, ones, bias); // could be more efficient, but not critical
00099 }
00100 
00102 // bpropUpdate //
00104 // We are not using blas routines anymore, because we would iterate several
00105 // times over the weight matrix.
00106 void GradNNetLayerModule::bpropUpdate(const Vec& input, const Vec& output,
00107                                       const Vec& output_gradient)
00108 {
00109     PLASSERT_MSG( input.size() == input_size,
00110                   "input.size() should be equal to this->input_size" );
00111     PLASSERT_MSG( output.size() == output_size,
00112                   "output.size() should be equal to this->output_size" );
00113     PLASSERT_MSG( output_gradient.size() == output_size,
00114                   "output_gradient.size() should be equal to this->output_size"
00115                 );
00116 
00117     learning_rate = start_learning_rate / (1+decrease_constant*step_number);
00118 
00119     for( int i=0; i<output_size; i++ )
00120     {
00121         real og_i = output_gradient[i];
00122         real* w_ = weights[i];
00123 
00124         real delta_L1 = learning_rate * L1_penalty_factor;
00125         real delta_L2 = learning_rate * L2_penalty_factor;
00126         if( delta_L2 > 1 )
00127             PLWARNING("GradNNetLayerModule::bpropUpdate:\n"
00128                       "learning rate = %f is too large!\n", learning_rate);
00129 
00130         real lr_og_i = learning_rate * og_i;
00131         bias[i] -= lr_og_i;
00132 
00133         for( int j=0; j<input_size; j++ )
00134         {
00135             if( delta_L2 > 0. )
00136                 w_[j] *= (1 - delta_L2);
00137 
00138             w_[j] -= input[j] * lr_og_i;
00139 
00140             if( delta_L1 > 0. )
00141             {
00142                 if( w_[j] > delta_L1 )
00143                     w_[j] -= delta_L1;
00144                 else if( w_[j] < -delta_L1 )
00145                     w_[j] += delta_L1;
00146                 else
00147                     w_[j] = 0.;
00148             }
00149 
00150         }
00151     }
00152     step_number++;
00153 }
00154 
00155 
00156 // Simply updates and propagates back gradient
00157 void GradNNetLayerModule::bpropUpdate(const Vec& input, const Vec& output,
00158                                       Vec& input_gradient,
00159                                       const Vec& output_gradient,
00160                                       bool accumulate)
00161 {
00162     PLASSERT_MSG( input.size() == input_size,
00163                   "input.size() should be equal to this->input_size" );
00164     PLASSERT_MSG( output.size() == output_size,
00165                   "output.size() should be equal to this->output_size" );
00166     PLASSERT_MSG( output_gradient.size() == output_size,
00167                   "output_gradient.size() should be equal to this->output_size"
00168                 );
00169 
00170     if( accumulate )
00171     {
00172         PLASSERT_MSG( input_gradient.size() == input_size,
00173                       "Cannot resize input_gradient AND accumulate into it" );
00174     }
00175     else
00176     {
00177         input_gradient.resize( input_size );
00178         input_gradient.clear();
00179     }
00180 
00181     learning_rate = start_learning_rate / (1+decrease_constant*step_number);
00182 
00183     for( int i=0; i<output_size; i++ )
00184     {
00185         real og_i = output_gradient[i];
00186         real* w_ = weights[i];
00187 
00188         real delta_L1 = learning_rate * L1_penalty_factor;
00189         real delta_L2 = learning_rate * L2_penalty_factor;
00190         if( delta_L2 > 1 )
00191             PLWARNING("GradNNetLayerModule::bpropUpdate:\n"
00192                       "learning rate = %f is too large!\n", learning_rate);
00193 
00194         real lr_og_i = learning_rate * og_i;
00195         bias[i] -= lr_og_i;
00196 
00197         for( int j=0; j<input_size; j++ )
00198         {
00199             input_gradient[j] += w_[j] * og_i;
00200 
00201             if( delta_L2 > 0. )
00202                 w_[j] *= (1 - delta_L2);
00203 
00204             w_[j] -= input[j] * lr_og_i;
00205 
00206             if( delta_L1 > 0. )
00207             {
00208                 if( w_[j] > delta_L1 )
00209                     w_[j] -= delta_L1;
00210                 else if( w_[j] < -delta_L1 )
00211                     w_[j] += delta_L1;
00212                 else
00213                     w_[j] = 0.;
00214             }
00215 
00216         }
00217     }
00218     step_number++;
00219 }
00220 
00221 void GradNNetLayerModule::bpropUpdate(const Mat& inputs, const Mat& outputs,
00222         Mat& input_gradients,
00223         const Mat& output_gradients,
00224         bool accumulate)
00225 {
00226     PLASSERT( inputs.width() == input_size );
00227     PLASSERT( outputs.width() == output_size );
00228     PLASSERT( output_gradients.width() == output_size );
00229 
00230     int n = inputs.length();
00231 
00232     if( accumulate )
00233     {
00234         PLASSERT_MSG( input_gradients.width() == input_size &&
00235                 input_gradients.length() == n,
00236                 "Cannot resize input_gradients and accumulate into it" );
00237     }
00238     else
00239     {
00240         input_gradients.resize(n, input_size);
00241         input_gradients.fill(0);
00242     }
00243 
00244     learning_rate = start_learning_rate / (1+decrease_constant*step_number);
00245     real avg_lr = learning_rate / n; // To obtain an average on a mini-batch.
00246 
00247     // With L2 regularization, weights are scaled by a coefficient equal to
00248     // 1 - learning rate * penalty.
00249     real l2_scaling =
00250         L2_penalty_factor > 0 ? 1 - learning_rate * L2_penalty_factor
00251                               : 1;
00252     PLASSERT_MSG(l2_scaling > 0, "Learning rate too large");
00253 
00254     // Compute input gradient.
00255     productAcc(input_gradients, output_gradients, weights);
00256 
00257     // Update bias.
00258     resizeOnes(n);
00259     transposeProductScaleAcc(bias, output_gradients, ones, -avg_lr, real(1));
00260 
00261     // Update weights.
00262     transposeProductScaleAcc(weights, output_gradients, inputs,
00263                              -avg_lr, l2_scaling);
00264 
00265     // Apply L1 penalty if needed (note: this is not very efficient).
00266     if (L1_penalty_factor > 0) {
00267         real delta_L1 = learning_rate * L1_penalty_factor;
00268         for( int i=0; i<output_size; i++ )
00269         {
00270             real* w_ = weights[i];
00271             for( int j=0; j<input_size; j++ )
00272             {
00273                 real& w_ij = w_[j];
00274                 if( w_ij > delta_L1 )
00275                     w_ij -= delta_L1;
00276                 else if( w_ij < -delta_L1 )
00277                     w_ij += delta_L1;
00278                 else
00279                     w_ij = 0.;
00280             }
00281         }
00282     }
00283     step_number += n;
00284 }
00285 
00286 
00288 // bbpropUpdate //
00290 void GradNNetLayerModule::bbpropUpdate(const Vec& input, const Vec& output,
00291                                        const Vec& output_gradient,
00292                                        const Vec& output_diag_hessian)
00293 {
00294     PLASSERT_MSG( output_diag_hessian.size() == output_size,
00295                   "output_diag_hessian.size() should be equal to"
00296                   " this->output_size" );
00297     bpropUpdate( input, output, output_gradient );
00298 }
00299 
00300 /* This implementation is incorrect. Let the PLERROR defined in parent version
00301 // Propagates back output_gradient and output_diag_hessian
00302 void GradNNetLayerModule::bbpropUpdate(const Vec& input, const Vec& output,
00303                                        Vec&  input_gradient,
00304                                        const Vec& output_gradient,
00305                                        Vec&  input_diag_hessian,
00306                                        const Vec& output_diag_hessian,
00307                                        bool accumulate)
00308 {
00309     bpropUpdate( input, output, input_gradient, output_gradient, accumulate );
00310 }
00311 */
00312 
00314 // forget //
00316 // Forget the bias and reinitialize the weights
00317 void GradNNetLayerModule::forget()
00318 {
00319     learning_rate = start_learning_rate;
00320     step_number = 0;
00321 
00322     bias.resize( output_size );
00323     if( init_bias.size() > 0 )
00324     {
00325         if( init_bias.size() != output_size )
00326             PLERROR( "init_bias (%d) should have length equal to output_size (%d)",
00327                      init_bias.size(), output_size );
00328         bias << init_bias;
00329     }
00330     else
00331         bias.clear();
00332 
00333     weights.resize( output_size, input_size );
00334     if( init_weights.size() > 0 )
00335     {
00336         if( weights.length() != output_size || weights.width() != input_size )
00337             PLERROR( "weights (%d,%d) should have size equal to (output_size, input_size) (%d,%d)",
00338                      weights.length(), weights.width(),
00339                      output_size, input_size );
00340 
00341         weights << init_weights;
00342     }
00343     else if(init_weights_random_scale != 0. )
00344     {
00345         if( !random_gen )
00346         {
00347             PLWARNING( "GradNNetLayerModule: cannot forget() without"
00348                        " random_gen" );
00349             return;
00350         }
00351         real r = init_weights_random_scale / input_size;
00352         random_gen->fill_random_uniform(weights, -r, r);
00353     }
00354     else
00355         weights.clear();
00356 }
00357 
00358 void GradNNetLayerModule::setLearningRate( real dynamic_learning_rate )
00359 {
00360     start_learning_rate = dynamic_learning_rate;
00361     step_number = 0;
00362     // learning_rate will automatically be set in bpropUpdate()
00363 }
00364 
00366 // build //
00368 void GradNNetLayerModule::build()
00369 {
00370     inherited::build();
00371     build_();
00372 }
00373 
00375 // makeDeepCopyFromShallowCopy //
00377 void GradNNetLayerModule::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00378 {
00379     inherited::makeDeepCopyFromShallowCopy(copies);
00380 
00381     deepCopyField(init_weights, copies);
00382     deepCopyField(init_bias,    copies);
00383     deepCopyField(weights,      copies);
00384     deepCopyField(bias,         copies);
00385     deepCopyField(ones,         copies);
00386 }
00387 
00389 // declareOptions //
00391 void GradNNetLayerModule::declareOptions(OptionList& ol)
00392 {
00393     declareOption(ol, "start_learning_rate",
00394                   &GradNNetLayerModule::start_learning_rate,
00395                   OptionBase::buildoption,
00396                   "Learning-rate of stochastic gradient optimization");
00397 
00398     declareOption(ol, "decrease_constant",
00399                   &GradNNetLayerModule::decrease_constant,
00400                   OptionBase::buildoption,
00401                   "Decrease constant of stochastic gradient optimization");
00402 
00403     declareOption(ol, "init_weights", &GradNNetLayerModule::init_weights,
00404                   OptionBase::buildoption,
00405                   "Optional initial weights of the neurons (one row per neuron).\n"
00406                   "If not provided then weights are initialized according to a uniform\n"
00407                   "distribution (see init_weights_random_scale)\n");
00408 
00409     declareOption(ol, "init_bias", &GradNNetLayerModule::init_bias,
00410                   OptionBase::buildoption,
00411                   "Optional initial bias of the neurons. If not provided, they are set to 0.\n");
00412 
00413     declareOption(ol, "init_weights_random_scale",
00414                   &GradNNetLayerModule::init_weights_random_scale,
00415                   OptionBase::buildoption,
00416                   "If init_weights is not provided, the weights are initialized randomly\n"
00417                   "from a uniform in [-r,r], with r = init_weights_random_scale/input_size.\n"
00418                   "To clear the weights initially, just set this option to 0.");
00419 
00420     declareOption(ol, "L1_penalty_factor",
00421                   &GradNNetLayerModule::L1_penalty_factor,
00422                   OptionBase::buildoption,
00423                   "Optional (default=0) factor of L1 regularization term, i.e.\n"
00424                   "minimize L1_penalty_factor * sum_{ij} |weights(i,j)| during training.\n");
00425 
00426     declareOption(ol, "L2_penalty_factor",
00427                   &GradNNetLayerModule::L2_penalty_factor,
00428                   OptionBase::buildoption,
00429                   "Optional (default=0) factor of L2 regularization term, i.e.\n"
00430                   "minimize 0.5 * L2_penalty_factor * sum_{ij} weights(i,j)^2 during training.\n");
00431 
00432 
00433     declareOption(ol, "weights", &GradNNetLayerModule::weights,
00434                   OptionBase::learntoption,
00435                   "Input weights of the neurons (one row per neuron)");
00436 
00437     declareOption(ol, "bias", &GradNNetLayerModule::bias,
00438                   OptionBase::learntoption,
00439                   "Bias of the neurons");
00440 
00441     inherited::declareOptions(ol);
00442 }
00443 
00445 // build_ //
00447 void GradNNetLayerModule::build_()
00448 {
00449     if( input_size < 0 ) // has not been initialized
00450         return;
00451 
00452     if( output_size < 0 )
00453         PLERROR("GradNNetLayerModule::build_: 'output_size' is < 0 (%i),\n"
00454                 " you should set it to a positive integer (the number of"
00455                 " neurons).\n", output_size);
00456 
00457     if( weights.length() != output_size
00458         || weights.width() != input_size
00459         || bias.size() != output_size )
00460     {
00461         forget();
00462     }
00463 }
00464 
00466 // resizeOnes //
00468 void GradNNetLayerModule::resizeOnes(int n)
00469 {
00470     if (ones.length() < n) {
00471         ones.resize(n);
00472         ones.fill(1);
00473     } else if (ones.length() > n)
00474         ones.resize(n);
00475 }
00476 
00477 
00478 
00479 } // end of namespace PLearn
00480 
00481 
00482 /*
00483   Local Variables:
00484   mode:c++
00485   c-basic-offset:4
00486   c-file-style:"stroustrup"
00487   c-file-offsets:((innamespace . 0)(inline-open . 0))
00488   indent-tabs-mode:nil
00489   fill-column:79
00490   End:
00491 */
00492 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines