PLearn 0.1
LinearFilterModule.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // LinearFilterModule.cc
00004 //
00005 // Copyright (C) 2005 Jerome Louradour
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************
00036    * $Id: LinearFilterModule.cc,v 1.3 2006/01/18 04:04:06 lamblinp Exp $
00037    ******************************************************* */
00038 
00039 // Authors: Jerome Louradour
00040 
00044 #include "LinearFilterModule.h"
00045 #include <plearn/math/TMat_maths.h>
00046 
00047 namespace PLearn {
00048 using namespace std;
00049 
00050 PLEARN_IMPLEMENT_OBJECT(
00051     LinearFilterModule,
00052     "Affine transformation module, with stochastic gradient descent updates",
00053     "Neural Network layer, using stochastic gradient to update neuron weights\n"
00054     "       Output = weights * Input + bias\n"
00055     "Weights and bias are updated by online gradient descent, with learning\n"
00056     "rate possibly decreasing in 1/(1 + n_updates_done * decrease_constant).\n"
00057     "An L1 and L2 regularization penalty can be added to push weights to 0.\n"
00058     "Weights can be initialized to 0, to a given initial matrix, or randomly\n"
00059     "from a uniform distribution.\n"
00060     );
00061 
00063 // LinearFilterModule //
00065 LinearFilterModule::LinearFilterModule():
00066     start_learning_rate( .001 ),
00067     decrease_constant( 0. ),
00068     init_weights_random_scale( 1. ),
00069     L1_penalty_factor( 0. ),
00070     L2_penalty_factor( 0. ),
00071     no_bias(false),
00072     between_0_and_1(false),
00073     step_number( 0 )
00074 {}
00075 
00077 // fprop //
00079 void LinearFilterModule::fprop(const Vec& input, Vec& output) const
00080 {
00081     PLASSERT_MSG( input.size() == input_size,
00082                   "input.size() should be equal to this->input_size" );
00083 
00084     output.resize( output_size );
00085 
00086     // Applies linear transformation
00087     for( int i=0 ; i<output_size ; i++ )
00088         output[i] = weights[i] * input[i % input_size] + bias[i];
00089 }
00090 
00091 void LinearFilterModule::fprop(const Mat& inputs, Mat& outputs)
00092 {
00093     PLASSERT( inputs.width() == input_size );
00094     int n = inputs.length();
00095     outputs.resize(n, output_size);
00096     for(int is=0;is<n;is++)
00097         for(int i=0;i<output_size;i++)
00098             outputs(is,i) = weights[i] * inputs(is, i % input_size);
00099 
00100     // Add bias.
00101     resizeOnes(n);
00102     externalProductAcc(outputs, ones, bias); // could be more efficient, but not critical
00103 }
00104 
00106 // bpropUpdate //
00108 // We are not using blas routines anymore, because we would iterate several
00109 // times over the weight matrix.
00110 void LinearFilterModule::bpropUpdate(const Vec& input, const Vec& output,
00111                                       const Vec& output_gradient)
00112 {
00113     PLASSERT_MSG( input.size() == input_size,
00114                   "input.size() should be equal to this->input_size" );
00115     PLASSERT_MSG( output.size() == output_size,
00116                   "output.size() should be equal to this->output_size" );
00117     PLASSERT_MSG( output_gradient.size() == output_size,
00118                   "output_gradient.size() should be equal to this->output_size"
00119                 );
00120 
00121     learning_rate = start_learning_rate / (1+decrease_constant*step_number);
00122 
00123     for( int i=0; i<output_size; i++ )
00124     {
00125         real og_i = output_gradient[i];
00126 
00127         real delta_L1 = learning_rate * L1_penalty_factor;
00128         real delta_L2 = learning_rate * L2_penalty_factor;
00129         if( delta_L2 > 1 )
00130             PLWARNING("LinearFilterModule::bpropUpdate:\n"
00131                       "learning rate = %f is too large!\n", learning_rate);
00132 
00133         real lr_og_i = learning_rate * og_i;
00134         if( !no_bias )
00135             bias[i] -= lr_og_i;
00136 
00137             if( delta_L2 > 0. )
00138                 weights[i] *= (1 - delta_L2);
00139 
00140             weights[i] -= input[i % input_size] * lr_og_i;
00141 
00142             if( delta_L1 > 0. )
00143             {
00144                 if( weights[i] > delta_L1 )
00145                     weights[i] -= delta_L1;
00146                 else if( weights[i] < -delta_L1 )
00147                     weights[i] += delta_L1;
00148                 else
00149                     weights[i] = 0.;
00150             }
00151 
00152             if( between_0_and_1 )
00153             {
00154                 if( weights[i] > 1. )
00155                     weights[i] = 1.;
00156                 if( weights[i] < 0. )
00157                     weights[i] = 0.;
00158             }
00159 
00160     }
00161     step_number++;
00162 }
00163 
00164 
00165 // Simply updates and propagates back gradient
00166 void LinearFilterModule::bpropUpdate(const Vec& input, const Vec& output,
00167                                       Vec& input_gradient,
00168                                       const Vec& output_gradient,
00169                                       bool accumulate)
00170 {
00171     PLASSERT_MSG( input.size() == input_size,
00172                   "input.size() should be equal to this->input_size" );
00173     PLASSERT_MSG( output.size() == output_size,
00174                   "output.size() should be equal to this->output_size" );
00175     PLASSERT_MSG( output_gradient.size() == output_size,
00176                   "output_gradient.size() should be equal to this->output_size"
00177                 );
00178 
00179     if( accumulate )
00180     {
00181         PLASSERT_MSG( input_gradient.size() == input_size,
00182                       "Cannot resize input_gradient AND accumulate into it" );
00183     }
00184     else
00185     {
00186         input_gradient.resize( input_size );
00187         input_gradient.clear();
00188     }
00189 
00190     learning_rate = start_learning_rate / (1+decrease_constant*step_number);
00191 
00192     for( int i=0; i<output_size; i++ )
00193     {
00194         real og_i = output_gradient[i];
00195 
00196         real delta_L1 = learning_rate * L1_penalty_factor;
00197         real delta_L2 = learning_rate * L2_penalty_factor;
00198         if( delta_L2 > 1 )
00199             PLWARNING("LinearFilterModule::bpropUpdate:\n"
00200                       "learning rate = %f is too large!\n", learning_rate);
00201 
00202         real lr_og_i = learning_rate * og_i;
00203         if( !no_bias )
00204             bias[i] -= lr_og_i;
00205 
00206         input_gradient[i % input_size] += weights[i] * og_i;
00207 
00208         if( delta_L2 > 0. )
00209             weights[i] *= (1 - delta_L2);
00210 
00211         weights[i] -= input[i % input_size] * lr_og_i;
00212 
00213         if( delta_L1 > 0. )
00214         {
00215                 if( weights[i] > delta_L1 )
00216                     weights[i] -= delta_L1;
00217                 else if( weights[i] < -delta_L1 )
00218                     weights[i] += delta_L1;
00219                 else
00220                     weights[i] = 0.;
00221         }
00222             if( between_0_and_1 )
00223             {
00224                 if( weights[i] > 1. )
00225                     weights[i] = 1.;
00226                 if( weights[i] < 0. )
00227                     weights[i] = 0.;
00228             }
00229     }
00230     step_number++;
00231 }
00232 
00233 void LinearFilterModule::bpropUpdate(const Mat& inputs, const Mat& outputs,
00234         Mat& input_gradients,
00235         const Mat& output_gradients,
00236         bool accumulate)
00237 {
00238     PLASSERT( inputs.width() == input_size );
00239     PLASSERT( outputs.width() == output_size );
00240     PLASSERT( output_gradients.width() == output_size );
00241 
00242     int n = inputs.length();
00243 
00244     if( accumulate )
00245     {
00246         PLASSERT_MSG( input_gradients.width() == input_size &&
00247                 input_gradients.length() == n,
00248                 "Cannot resize input_gradients and accumulate into it" );
00249     }
00250     else
00251     {
00252         input_gradients.resize(n, input_size);
00253         input_gradients.fill(0);
00254     }
00255 
00256     learning_rate = start_learning_rate / (1+decrease_constant*step_number);
00257     real avg_lr = learning_rate / n; // To obtain an average on a mini-batch.
00258 
00259     // With L2 regularization, weights are scaled by a coefficient equal to
00260     // 1 - learning rate * penalty.
00261     real l2_scaling =
00262         L2_penalty_factor > 0 ? 1 - learning_rate * L2_penalty_factor
00263                               : 1;
00264     PLASSERT_MSG(l2_scaling > 0, "Learning rate too large");
00265 
00266     // Compute input gradient.
00267     for(int i_sample = 0; i_sample < outputs.length() ;i_sample++)
00268         for(int i = 0; i < output_size; i++)
00269             input_gradients(i_sample, i % input_size ) += weights[i] * output_gradients(i_sample,  i );
00270 
00271     // Update bias.
00272     if( !no_bias )
00273     {
00274         resizeOnes(n);
00275         transposeProductScaleAcc(bias, output_gradients, ones, -avg_lr, real(1));
00276     }
00277 
00278     // Update weights.
00279     for(int i_sample = 0; i_sample < outputs.length() ;i_sample++)
00280         for(int i = 0; i < output_size; i++ )
00281         {
00282             weights[i] -= avg_lr * l2_scaling * output_gradients(i_sample, i) * inputs(i_sample, i % input_size);
00283             if( between_0_and_1 )
00284             {
00285                 if( weights[i] > 1. )
00286                     weights[i] = 1.;
00287                 if( weights[i] < 0. )
00288                     weights[i] = 0.;
00289             }
00290         }
00291 
00292     // Apply L1 penalty if needed (note: this is not very efficient).
00293     if (L1_penalty_factor > 0) {
00294         real delta_L1 = learning_rate * L1_penalty_factor;
00295         for( int i=0; i<output_size; i++ )
00296         {
00297                 if( weights[i] > delta_L1 )
00298                     weights[i] -= delta_L1;
00299                 else if( weights[i] < -delta_L1 )
00300                     weights[i] += delta_L1;
00301                 else
00302                     weights[i] = 0.;
00303         }
00304     }
00305     step_number += n;
00306 }
00307 
00308 
00310 // bbpropUpdate //
00312 void LinearFilterModule::bbpropUpdate(const Vec& input, const Vec& output,
00313                                        const Vec& output_gradient,
00314                                        const Vec& output_diag_hessian)
00315 {
00316     PLASSERT_MSG( output_diag_hessian.size() == output_size,
00317                   "output_diag_hessian.size() should be equal to"
00318                   " this->output_size" );
00319     bpropUpdate( input, output, output_gradient );
00320 }
00321 
00322 /* This implementation is incorrect. Let the PLERROR defined in parent version
00323 // Propagates back output_gradient and output_diag_hessian
00324 void LinearFilterModule::bbpropUpdate(const Vec& input, const Vec& output,
00325                                        Vec&  input_gradient,
00326                                        const Vec& output_gradient,
00327                                        Vec&  input_diag_hessian,
00328                                        const Vec& output_diag_hessian,
00329                                        bool accumulate)
00330 {
00331     bpropUpdate( input, output, input_gradient, output_gradient, accumulate );
00332 }
00333 */
00334 
00336 // forget //
00338 // Forget the bias and reinitialize the weights
00339 void LinearFilterModule::forget()
00340 {
00341     learning_rate = start_learning_rate;
00342     step_number = 0;
00343 
00344     bias.resize( output_size );
00345     if( init_bias.size() > 0 )
00346     {
00347         if( init_bias.size() != output_size )
00348             PLERROR( "init_bias (%d) should have length equal to output_size (%d)",
00349                      init_bias.size(), output_size );
00350         bias << init_bias;
00351     }
00352     else
00353         bias.clear();
00354     if( no_bias )
00355         bias.clear();
00356 
00357     weights.resize( output_size );
00358     if( init_weights.size() > 0 )
00359     {
00360         if( init_weights.length() != output_size )
00361             PLERROR( "init_weights (%d) should have size equal to (output_size) (%d)",
00362                      init_weights.length(),
00363                      output_size );
00364 
00365         weights << init_weights;
00366     }
00367     else if( init_weights_random_scale < 0. )
00368     {
00369         real r = - init_weights_random_scale / sqrt( (real)input_size );
00370         random_gen->fill_random_uniform(weights, 1.-r, 1.);
00371     }
00372     else
00373     {
00374         real r = init_weights_random_scale / sqrt( (real)input_size );
00375         random_gen->fill_random_uniform(weights, 0., r);
00376     }
00377 }
00378 
00379 void LinearFilterModule::setLearningRate( real dynamic_learning_rate )
00380 {
00381     start_learning_rate = dynamic_learning_rate;
00382     step_number = 0;
00383     // learning_rate will automatically be set in bpropUpdate()
00384 }
00385 
00387 // build //
00389 void LinearFilterModule::build()
00390 {
00391     inherited::build();
00392     build_();
00393 }
00394 
00396 // makeDeepCopyFromShallowCopy //
00398 void LinearFilterModule::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00399 {
00400     inherited::makeDeepCopyFromShallowCopy(copies);
00401 
00402     deepCopyField(init_weights, copies);
00403     deepCopyField(init_bias,    copies);
00404     deepCopyField(weights,      copies);
00405     deepCopyField(bias,         copies);
00406     deepCopyField(ones,         copies);
00407 }
00408 
00410 // declareOptions //
00412 void LinearFilterModule::declareOptions(OptionList& ol)
00413 {
00414     declareOption(ol, "start_learning_rate",
00415                   &LinearFilterModule::start_learning_rate,
00416                   OptionBase::buildoption,
00417                   "Learning-rate of stochastic gradient optimization");
00418 
00419     declareOption(ol, "decrease_constant",
00420                   &LinearFilterModule::decrease_constant,
00421                   OptionBase::buildoption,
00422                   "Decrease constant of stochastic gradient optimization");
00423 
00424     declareOption(ol, "init_weights", &LinearFilterModule::init_weights,
00425                   OptionBase::buildoption,
00426                   "Optional initial weights of the neurons (one row per neuron).\n"
00427                   "If not provided then weights are initialized according to a uniform\n"
00428                   "distribution (see init_weights_random_scale)\n");
00429 
00430     declareOption(ol, "init_bias", &LinearFilterModule::init_bias,
00431                   OptionBase::buildoption,
00432                   "Optional initial bias of the neurons. If not provided, they are set to 0.\n");
00433 
00434     declareOption(ol, "init_weights_random_scale",
00435                   &LinearFilterModule::init_weights_random_scale,
00436                   OptionBase::buildoption,
00437                   "If init_weights is not provided, the weights are initialized randomly\n"
00438                   "from a uniform in [-r,r], with r = init_weights_random_scale/input_size.\n"
00439                   "To clear the weights initially, just set this option to 0.");
00440 
00441     declareOption(ol, "L1_penalty_factor",
00442                   &LinearFilterModule::L1_penalty_factor,
00443                   OptionBase::buildoption,
00444                   "Optional (default=0) factor of L1 regularization term, i.e.\n"
00445                   "minimize L1_penalty_factor * sum_{ij} |weights(i,j)| during training.\n");
00446 
00447     declareOption(ol, "L2_penalty_factor",
00448                   &LinearFilterModule::L2_penalty_factor,
00449                   OptionBase::buildoption,
00450                   "Optional (default=0) factor of L2 regularization term, i.e.\n"
00451                   "minimize 0.5 * L2_penalty_factor * sum_{ij} weights(i,j)^2 during training.\n");
00452 
00453     declareOption(ol, "no_bias",
00454                   &LinearFilterModule::no_bias,
00455                   OptionBase::buildoption,
00456                   "Wether or not to add biases.\n");
00457 
00458     declareOption(ol, "between_0_and_1",
00459                   &LinearFilterModule::between_0_and_1,
00460                   OptionBase::buildoption,
00461                   "Should all weights stay between 0 and 1.\n");
00462 
00463     declareOption(ol, "weights", &LinearFilterModule::weights,
00464                   OptionBase::learntoption,
00465                   "Input weights of the neurons (one weight per neuron)");
00466 
00467     declareOption(ol, "bias", &LinearFilterModule::bias,
00468                   OptionBase::learntoption,
00469                   "Bias of the neurons");
00470 
00471     inherited::declareOptions(ol);
00472 }
00473 
00475 // build_ //
00477 void LinearFilterModule::build_()
00478 {
00479     if( input_size < 0 ) // has not been initialized
00480         return;
00481 
00482     if( output_size < 0 )
00483         PLERROR("LinearFilterModule::build_: 'output_size' is < 0 (%i),\n"
00484                 " you should set it to a positive integer (the number of"
00485                 " neurons).\n", output_size);
00486 
00487     if( weights.length() != output_size
00488         || bias.size() != output_size )
00489     {
00490         forget();
00491     }
00492 }
00493 
00495 // resizeOnes //
00497 void LinearFilterModule::resizeOnes(int n)
00498 {
00499     if (ones.length() < n) {
00500         ones.resize(n);
00501         ones.fill(1);
00502     } else if (ones.length() > n)
00503         ones.resize(n);
00504 }
00505 
00506 
00507 
00508 } // end of namespace PLearn
00509 
00510 
00511 /*
00512   Local Variables:
00513   mode:c++
00514   c-basic-offset:4
00515   c-file-style:"stroustrup"
00516   c-file-offsets:((innamespace . 0)(inline-open . 0))
00517   indent-tabs-mode:nil
00518   fill-column:79
00519   End:
00520 */
00521 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines