PLearn 0.1
ShuntingNNetLayerModule.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // ShuntingNNetLayerModule.cc
00004 //
00005 // Copyright (C) 2008 Jerome Louradour
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************
00036    * $Id: ShuntingNNetLayerModule.cc,v 1.3 2006/01/18 04:04:06 lamblinp Exp $
00037    ******************************************************* */
00038 
00039 // Authors: Jerome Louradour
00040 
00044 #include "ShuntingNNetLayerModule.h"
00045 #include <plearn/math/TMat_maths.h>
00046 
00047 namespace PLearn {
00048 using namespace std;
00049 
00050 PLEARN_IMPLEMENT_OBJECT(
00051     ShuntingNNetLayerModule,
00052     "Affine transformation module, with stochastic gradient descent updates",
00053     "Neural Network layer, using stochastic gradient to update neuron weights\n"
00054     "       Output = weights * Input + bias\n"
00055     "Weights and bias are updated by online gradient descent, with learning\n"
00056     "rate possibly decreasing in 1/(1 + n_updates_done * decrease_constant).\n"
00057     "An L1 and L2 regularization penalty can be added to push weights to 0.\n"
00058     "Weights can be initialized to 0, to a given initial matrix, or randomly\n"
00059     "from a uniform distribution.\n"
00060     );
00061 
00063 // ShuntingNNetLayerModule //
00065 ShuntingNNetLayerModule::ShuntingNNetLayerModule():
00066     start_learning_rate( .001 ),
00067     decrease_constant( 0. ),
00068     init_weights_random_scale( 1. ),
00069     init_quad_weights_random_scale( 1. ),
00070     n_filters( 1 ),
00071     n_filters_inhib( -1 ),
00072     step_number( 0 )
00073 {}
00074 
00076 // declareOptions //
00078 
00079 void ShuntingNNetLayerModule::declareOptions(OptionList& ol)
00080 {
00081     declareOption(ol, "start_learning_rate",
00082                   &ShuntingNNetLayerModule::start_learning_rate,
00083                   OptionBase::buildoption,
00084                   "Learning-rate of stochastic gradient optimization");
00085 
00086     declareOption(ol, "decrease_constant",
00087                   &ShuntingNNetLayerModule::decrease_constant,
00088                   OptionBase::buildoption,
00089                   "Decrease constant of stochastic gradient optimization");
00090 
00091     declareOption(ol, "init_weights_random_scale",
00092                   &ShuntingNNetLayerModule::init_weights_random_scale,
00093                   OptionBase::buildoption,
00094                   "Weights of the excitation (softplus part) are initialized randomly\n"
00095                   "from a uniform in [-r,r], with r = init_weights_random_scale/input_size.\n"
00096                   "To clear the weights initially, just set this option to 0.");
00097                   
00098     declareOption(ol, "init_quad_weights_random_scale",
00099                   &ShuntingNNetLayerModule::init_quad_weights_random_scale,
00100                   OptionBase::buildoption,
00101                   "Weights of the quadratic part (of excitation, as well as inhibition) are initialized randomly\n"
00102                   "from a uniform in [-r,r], with r = init_weights_random_scale/input_size.\n"
00103                   "To clear the weights initially, just set this option to 0.");
00104                   
00105     declareOption(ol, "n_filters",
00106                   &ShuntingNNetLayerModule::n_filters,
00107                   OptionBase::buildoption,
00108                   "Number of synapses per neuron for excitation.\n");
00109 
00110     declareOption(ol, "n_filters_inhib",
00111                   &ShuntingNNetLayerModule::n_filters_inhib,
00112                   OptionBase::buildoption,
00113                   "Number of synapses per neuron for inhibition.\n"
00114                   "Must be lower or equal to n_filters in the current implementation (!).\n"
00115                   "If -1, then it is taken equal to n_filters.");
00116 
00117     declareOption(ol, "excit_quad_weights", &ShuntingNNetLayerModule::excit_quad_weights,
00118                   OptionBase::learntoption,
00119                   "List of weights vectors of the neurons"
00120                   "contributing to the excitation -- quadratic part)");
00121 
00122     declareOption(ol, "inhib_quad_weights", &ShuntingNNetLayerModule::inhib_quad_weights,
00123                   OptionBase::learntoption,
00124                   "List of weights vectors of the neurons (inhibation -- quadratic part)\n");
00125 
00126     declareOption(ol, "excit_weights", &ShuntingNNetLayerModule::excit_weights,
00127                   OptionBase::learntoption,
00128                   "Input weights vectors of the neurons (excitation -- softplus part)\n");
00129 
00130     declareOption(ol, "bias", &ShuntingNNetLayerModule::bias,
00131                   OptionBase::learntoption,
00132                   "Bias of the neurons (in the softplus of the excitations)\n");
00133 
00134     declareOption(ol, "excit_num_coeff", &ShuntingNNetLayerModule::excit_num_coeff,
00135                   OptionBase::learntoption,
00136                   "Multiplicative Coefficient applied on the excitation\n"
00137                   "in the numerator of the activation closed form.\n");
00138 
00139     declareOption(ol, "inhib_num_coeff", &ShuntingNNetLayerModule::inhib_num_coeff,
00140                   OptionBase::learntoption,
00141                   "Multiplicative Coefficient applied on the inhibition\n"
00142                   "in the numerator of the activation closed form.\n");
00143 
00144     inherited::declareOptions(ol);
00145 }
00147 // build //
00149 
00150 void ShuntingNNetLayerModule::build_()
00151 {
00152     if( input_size < 0 ) // has not been initialized
00153         return;
00154 
00155     if( output_size < 0 )
00156         PLERROR("ShuntingNNetLayerModule::build_: 'output_size' is < 0 (%i),\n"
00157                 " you should set it to a positive integer (the number of"
00158                 " neurons).\n", output_size);
00159 
00160     if (n_filters_inhib < 0)
00161         n_filters_inhib= n_filters;
00162     PLASSERT( n_filters>0 );
00163     
00164     if(    excit_quad_weights.length() != n_filters
00165         || inhib_quad_weights.length() != n_filters_inhib
00166         || excit_weights.length() != output_size
00167         || excit_weights.width() != input_size
00168         || bias.size() != output_size )
00169     {
00170         forget();
00171     }
00172 }
00173 void ShuntingNNetLayerModule::build()
00174 {
00175     inherited::build();
00176     build_();
00177 }
00178 
00180 // forget //
00182 
00183 void ShuntingNNetLayerModule::forget()
00184 {
00185     learning_rate = start_learning_rate;
00186     step_number = 0;
00187 
00188     bias.resize( output_size );
00189     bias.clear();
00190     
00191     excit_num_coeff.resize( output_size );
00192     inhib_num_coeff.resize( output_size );
00193     excit_num_coeff.fill(1.);
00194     inhib_num_coeff.fill(1.);
00195 
00196     excit_weights.resize( output_size, input_size );
00197     excit_quad_weights.resize( n_filters );
00198     PLASSERT( n_filters_inhib >= 0 && n_filters_inhib <= n_filters );
00199     inhib_quad_weights.resize( n_filters_inhib );
00200     
00201     if( !random_gen )
00202     {
00203         PLWARNING( "ShuntingNNetLayerModule: cannot forget() without random_gen" );
00204         return;
00205     }
00206     
00207     real r = init_weights_random_scale / (real)input_size;
00208     if( r > 0. )
00209         random_gen->fill_random_uniform(excit_weights, -r, r);
00210     else
00211         excit_weights.clear();
00212       
00213     r = init_quad_weights_random_scale / (real)input_size;    
00214     if( r > 0. )
00215         for( int k = 0; k < n_filters; k++ )
00216         {
00217             excit_quad_weights[k].resize( output_size, input_size );
00218             random_gen->fill_random_uniform(excit_quad_weights[k], -r, r);
00219             if ( k < n_filters_inhib ) {
00220                 inhib_quad_weights[k].resize( output_size, input_size );
00221                 random_gen->fill_random_uniform(inhib_quad_weights[k], -r, r);
00222             }
00223         }
00224     else
00225         for( int k = 0; k < n_filters; k++ )
00226         {
00227             excit_quad_weights[k].resize(output_size, input_size );
00228             excit_quad_weights[k].clear();
00229             if ( k < n_filters_inhib ) {
00230                 inhib_quad_weights[k].resize(output_size, input_size );
00231                 inhib_quad_weights[k].clear();
00232             }
00233         }
00234 }
00235 
00237 // fprop //
00239 
00240 void ShuntingNNetLayerModule::fprop(const Vec& input, Vec& output) const
00241 {
00242     PLASSERT_MSG( input.size() == input_size,
00243                   "input.size() should be equal to this->input_size" );
00244 
00245     output.resize( output_size );
00246 
00247     if( during_training )
00248     {
00249         batch_excitations.resize(1, output_size);
00250         batch_inhibitions.resize(1, output_size);
00251     }
00252 //    if( use_fast_approximations )
00253 
00254         for( int i = 0; i < output_size; i++ )
00255         {
00256             real excitation = 0.;
00257             real inhibition = 0.;
00258             for ( int k=0; k < n_filters; k++ )
00259             {
00260                 excitation += square( dot( excit_quad_weights[k](i), input ) );
00261                 if ( k < n_filters_inhib )
00262                     inhibition += square( dot( inhib_quad_weights[k](i), input ) );
00263             }
00264             excitation = sqrt( excitation + tabulated_softplus( dot( excit_weights(i), input ) + bias[i] ) );
00265             inhibition = sqrt( inhibition );
00266             if( during_training )
00267             {
00268                     batch_excitations(0,i) = excitation;
00269                     batch_inhibitions(0,i) = inhibition;
00270             }
00271 
00272             output[i] = ( excit_num_coeff[i]* excitation - inhib_num_coeff[i]* inhibition ) /
00273                         (1. + excitation + inhibition );
00274         }
00275 //    else
00276 }
00277 
00278 void ShuntingNNetLayerModule::fprop(const Mat& inputs, Mat& outputs)
00279 {
00280     PLASSERT( inputs.width() == input_size );
00281     int n = inputs.length();
00282     outputs.resize(n, output_size);
00283     
00284 
00285     Mat excitations_part2(n, output_size);
00286     excitations_part2.clear();
00287     productTranspose(excitations_part2, inputs, excit_weights);
00288     resizeOnes(n);
00289     externalProductAcc(excitations_part2, ones, bias);
00290 
00291     Mat excitations(n, output_size), inhibitions(n, output_size);
00292     excitations.clear();
00293     inhibitions.clear();
00294 
00295         for ( int k=0; k < n_filters; k++ )
00296         {
00297             Mat tmp_sample_output(n, output_size);
00298 
00299             tmp_sample_output.clear();
00300             productTranspose(tmp_sample_output, inputs, excit_quad_weights[k]);
00301             squareElements(tmp_sample_output);
00302             multiplyAcc(excitations, tmp_sample_output, 1.);
00303 
00304             if ( k < n_filters_inhib ) {
00305                 tmp_sample_output.clear();
00306                 productTranspose(tmp_sample_output, inputs, inhib_quad_weights[k]);
00307                 squareElements(tmp_sample_output);
00308                 multiplyAcc(inhibitions, tmp_sample_output, 1.);
00309             }
00310         }
00311         for( int i_sample = 0; i_sample < n; i_sample ++)
00312         {
00313             for( int i = 0; i < output_size; i++ )
00314             {
00315                 excitations(i_sample,i) = sqrt( excitations(i_sample,i) + tabulated_softplus( excitations_part2(i_sample,i) ) );
00316                 inhibitions(i_sample,i) = sqrt( inhibitions(i_sample,i) );
00317 
00318                 real E = excitations(i_sample,i);
00319                 real S = inhibitions(i_sample,i);
00320                     
00321                 outputs(i_sample,i) = ( excit_num_coeff[i]* E - inhib_num_coeff[i]* S ) /
00322                                        (1. + E + S );
00323             }
00324         }
00325 
00326     if( during_training )
00327     {
00328         batch_excitations.resize(n, output_size);
00329         batch_inhibitions.resize(n, output_size);
00330         batch_excitations << excitations;
00331         batch_inhibitions << inhibitions;
00332     }
00333 }
00334 
00336 // bpropUpdate //
00338 
00339 void ShuntingNNetLayerModule::bpropUpdate(const Vec& input, const Vec& output,
00340                                       const Vec& output_gradient)
00341 {
00342     learning_rate = start_learning_rate / (1+decrease_constant*step_number);
00343 
00344     for( int i=0; i<output_size; i++ )
00345     {
00346         real tmp = square(1 + batch_excitations(0,i) + batch_inhibitions(0,i) );
00347         
00348         real Dactivation_Dexcit =   ( excit_num_coeff[i]  +  batch_inhibitions(0,i)*(excit_num_coeff[i] + inhib_num_coeff[i]) ) / tmp;
00349         real Dactivation_Dinhib = - ( inhib_num_coeff[i]  +  batch_excitations(0,i)*(excit_num_coeff[i] + inhib_num_coeff[i]) ) / tmp;
00350 
00351         real lr_og_excit = learning_rate * output_gradient[i];
00352         PLASSERT( batch_excitations(0,i)>0. );
00353         PLASSERT( batch_inhibitions(0,i)>0. );
00354         real lr_og_inhib = lr_og_excit * Dactivation_Dinhib / batch_inhibitions(0,i);
00355         lr_og_excit *= Dactivation_Dexcit / batch_excitations(0,i);
00356         
00357         tmp = lr_og_excit * sigmoid( dot( excit_weights(i), input ) + bias[i] ) * .5;
00358 
00359         bias[i] -= tmp;
00360         multiplyAcc( excit_weights(i), input, -tmp);
00361 
00362         for( int k = 0; k < n_filters; k++ )
00363         {
00364             real tmp_excit2 = lr_og_excit * dot( excit_quad_weights[k](i), input );
00365             real tmp_inhib2 = 0;
00366             if (k < n_filters_inhib)
00367                 tmp_inhib2 = lr_og_inhib * dot( inhib_quad_weights[k](i), input );
00368             for( int j=0; j<input_size; j++ )
00369             {
00370                 excit_quad_weights[k](i,j) -= tmp_excit2 * input[j];
00371                 if (k < n_filters_inhib)
00372                     inhib_quad_weights[k](i,j) -= tmp_inhib2 * input[j];
00373             }   
00374         }
00375     }
00376 
00377     step_number++;
00378 }
00379 
00380 void ShuntingNNetLayerModule::bpropUpdate(const Mat& inputs, const Mat& outputs,
00381         Mat& input_gradients,
00382         const Mat& output_gradients,
00383         bool accumulate)
00384 {
00385     PLASSERT( inputs.width() == input_size );
00386     PLASSERT( outputs.width() == output_size );
00387     PLASSERT( output_gradients.width() == output_size );
00388 
00389     //fprop(inputs);
00390 
00391     int n = inputs.length();
00392 
00393     if( accumulate )
00394     {
00395         PLASSERT_MSG( input_gradients.width() == input_size &&
00396                 input_gradients.length() == n,
00397                 "Cannot resize input_gradients and accumulate into it" );
00398     }
00399     else
00400     {
00401         input_gradients.resize(n, input_size);
00402         input_gradients.fill(0);
00403     }
00404 
00405     learning_rate = start_learning_rate / (1+decrease_constant*step_number);
00406     real avg_lr = learning_rate / n; // To obtain an average on a mini-batch.
00407 
00408     if ( avg_lr == 0. )
00409         return ; 
00410 
00411         Mat tmp(n, output_size);
00412         // tmp = (1 + E + S ).^2;
00413         tmp.fill(1.);
00414         multiplyAcc(tmp, batch_excitations, (real)1);
00415         multiplyAcc(tmp, batch_inhibitions, (real)1);
00416         squareElements(tmp);
00417         
00418         Vec bias_updates(output_size);
00419         Mat excit_weights_updates( output_size, input_size);
00420         TVec<Mat> excit_quad_weights_updates(n_filters);
00421         TVec<Mat> inhib_quad_weights_updates(n_filters_inhib);
00422         // Initialisation 
00423         bias_updates.clear();
00424         excit_weights_updates.clear();
00425         for( int k=0; k < n_filters; k++ )
00426         {
00427             excit_quad_weights_updates[k].resize( output_size, input_size);
00428             excit_quad_weights_updates[k].clear();
00429             if (k < n_filters_inhib) {
00430                 inhib_quad_weights_updates[k].resize( output_size, input_size);
00431                 inhib_quad_weights_updates[k].clear();
00432             }
00433         }
00434 
00435         for( int i_sample = 0; i_sample < n; i_sample++ )
00436         for( int i=0; i<output_size; i++ )
00437         {
00438             real Dactivation_Dexcit =   ( excit_num_coeff[i]  +  batch_inhibitions(i_sample,i)*(excit_num_coeff[i] + inhib_num_coeff[i]) ) / tmp(i_sample,i);
00439             real Dactivation_Dinhib = - ( inhib_num_coeff[i]  +  batch_excitations(i_sample,i)*(excit_num_coeff[i] + inhib_num_coeff[i]) ) / tmp(i_sample,i);
00440             
00441             real lr_og_excit = avg_lr * output_gradients(i_sample,i);
00442             PLASSERT( batch_excitations(i_sample,i)>0. );
00443             PLASSERT( n_filters_inhib==0 || batch_inhibitions(i_sample,i)>0. );
00444             real lr_og_inhib = lr_og_excit * Dactivation_Dinhib / batch_inhibitions(i_sample,i);
00445             lr_og_excit *= Dactivation_Dexcit / batch_excitations(i_sample,i);
00446                 
00447             real tmp2 = lr_og_excit * sigmoid( dot( excit_weights(i), inputs(i_sample) ) + bias[i] ) * .5;
00448 
00449             bias_updates[i] -= tmp2;
00450             multiplyAcc( excit_weights_updates(i), inputs(i_sample), -tmp2);
00451 
00452             for( int k = 0; k < n_filters; k++ )
00453             {
00454                 real tmp_excit2 = lr_og_excit   * dot( excit_quad_weights[k](i), inputs(i_sample) );
00455                 real tmp_inhib2 = 0;
00456                 if (k < n_filters_inhib)
00457                     tmp_inhib2 = lr_og_inhib   * dot( inhib_quad_weights[k](i), inputs(i_sample) );
00458                 //for( int j=0; j<input_size; j++ )
00459                 //{
00460                 //    excit_quad_weights_updates[k](i,j) -= tmp_excit2 * inputs(i_sample,j);
00461                 //    if (k < n_filters_inhib)
00462                 //        inhib_quad_weights_updates[k](i,j) -= tmp_inhib2 * inputs(i_sample,j);
00463                 //}
00464                 multiplyAcc( excit_quad_weights_updates[k](i), inputs(i_sample), -tmp_excit2);
00465                 if (k < n_filters_inhib)
00466                     multiplyAcc( inhib_quad_weights_updates[k](i), inputs(i_sample), -tmp_inhib2);
00467             }
00468         }
00469 
00470         multiplyAcc( bias, bias_updates, 1.);
00471         multiplyAcc( excit_weights, excit_weights_updates, 1.);
00472         for( int k = 0; k < n_filters; k++ )
00473         {
00474             multiplyAcc( excit_quad_weights[k], excit_quad_weights_updates[k], 1.);
00475             if (k < n_filters_inhib)
00476                 multiplyAcc( inhib_quad_weights[k], inhib_quad_weights_updates[k], 1.);
00477         }
00478         batch_excitations.clear();
00479         batch_inhibitions.clear();
00480 
00481     step_number += n;
00482 }
00483 
00484 
00485 
00486 
00487 void ShuntingNNetLayerModule::setLearningRate( real dynamic_learning_rate )
00488 {
00489     start_learning_rate = dynamic_learning_rate;
00490     step_number = 0;
00491     // learning_rate will automatically be set in bpropUpdate()
00492 }
00493 
00494 
00496 // makeDeepCopyFromShallowCopy //
00498 
00499 void ShuntingNNetLayerModule::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00500 {
00501     inherited::makeDeepCopyFromShallowCopy(copies);
00502 
00503     deepCopyField(excit_weights,            copies);
00504     deepCopyField(excit_quad_weights,       copies);
00505     deepCopyField(inhib_quad_weights,       copies);
00506     deepCopyField(bias,                     copies);
00507     deepCopyField(excit_num_coeff,          copies);
00508     deepCopyField(inhib_num_coeff,          copies);
00509     deepCopyField(ones,                     copies);
00510 }
00511 
00512 
00513 
00514 
00516 // resizeOnes //
00518 void ShuntingNNetLayerModule::resizeOnes(int n) const
00519 {
00520     if (ones.length() < n) {
00521         ones.resize(n);
00522         ones.fill(1);
00523     } else if (ones.length() > n)
00524         ones.resize(n);
00525 }
00526 
00527 
00528 
00529 } // end of namespace PLearn
00530 
00531 
00532 /*
00533   Local Variables:
00534   mode:c++
00535   c-basic-offset:4
00536   c-file-style:"stroustrup"
00537   c-file-offsets:((innamespace . 0)(inline-open . 0))
00538   indent-tabs-mode:nil
00539   fill-column:79
00540   End:
00541 */
00542 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines