PLearn 0.1
RBMLLParameters.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMLLParameters.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin
00036 
00041 #include "RBMLLParameters.h"
00042 #include <plearn/math/TMat_maths.h>
00043 
00044 namespace PLearn {
00045 using namespace std;
00046 
00047 PLEARN_IMPLEMENT_OBJECT(
00048     RBMLLParameters,
00049     "Stores and learns the parameters between two linear layers of an RBM",
00050     "");
00051 
00052 RBMLLParameters::RBMLLParameters( real the_learning_rate ) :
00053     inherited(the_learning_rate),
00054     momentum(0.)
00055 {
00056 }
00057 
00058 RBMLLParameters::RBMLLParameters( string down_types, string up_types,
00059                                   real the_learning_rate ) :
00060     inherited( down_types, up_types, the_learning_rate ),
00061     momentum(0.)
00062 {
00063     // We're not sure inherited::build() has been called
00064     build();
00065 }
00066 
00067 void RBMLLParameters::declareOptions(OptionList& ol)
00068 {
00069     declareOption(ol, "momentum", &RBMLLParameters::momentum,
00070                   OptionBase::buildoption,
00071                   "Momentum factor (should be between 0 and 1)");
00072 
00073     declareOption(ol, "weights", &RBMLLParameters::weights,
00074                   OptionBase::learntoption,
00075                   "Matrix containing unit-to-unit weights (output_size ×"
00076                   " input_size)");
00077 
00078     declareOption(ol, "up_units_bias",
00079                   &RBMLLParameters::up_units_bias,
00080                   OptionBase::learntoption,
00081                   "Element i contains the bias of up unit i");
00082 
00083     declareOption(ol, "down_units_bias",
00084                   &RBMLLParameters::down_units_bias,
00085                   OptionBase::learntoption,
00086                   "Element i contains the bias of down unit i");
00087 
00088     // Now call the parent class' declareOptions
00089     inherited::declareOptions(ol);
00090 }
00091 
00092 void RBMLLParameters::build_()
00093 {
00094     if( up_layer_size == 0 || down_layer_size == 0 )
00095         return;
00096 
00097     output_size = 0;
00098     bool needs_forget = false; // do we need to reinitialize the parameters?
00099 
00100     if( weights.length() != up_layer_size ||
00101         weights.width() != down_layer_size )
00102     {
00103         weights.resize( up_layer_size, down_layer_size );
00104         needs_forget = true;
00105     }
00106 
00107     weights_pos_stats.resize( up_layer_size, down_layer_size );
00108     weights_neg_stats.resize( up_layer_size, down_layer_size );
00109 
00110     down_units_bias.resize( down_layer_size );
00111     down_units_bias_pos_stats.resize( down_layer_size );
00112     down_units_bias_neg_stats.resize( down_layer_size );
00113     for( int i=0 ; i<down_layer_size ; i++ )
00114     {
00115         char dut_i = down_units_types[i];
00116         if( dut_i != 'l' ) // not linear activation unit
00117             PLERROR( "RBMLLParameters::build_() - value '%c' for"
00118                      " down_units_types[%d]\n"
00119                      "should be 'l'.\n",
00120                      dut_i, i );
00121     }
00122 
00123     up_units_bias.resize( up_layer_size );
00124     up_units_bias_pos_stats.resize( up_layer_size );
00125     up_units_bias_neg_stats.resize( up_layer_size );
00126     for( int i=0 ; i<up_layer_size ; i++ )
00127     {
00128         char uut_i = up_units_types[i];
00129         if( uut_i != 'l' ) // not linear activation unit
00130             PLERROR( "RBMLLParameters::build_() - value '%c' for"
00131                      " up_units_types[%d]\n"
00132                      "should be 'l'.\n",
00133                      uut_i, i );
00134     }
00135 
00136     if( momentum != 0. )
00137     {
00138         weights_inc.resize( up_layer_size, down_layer_size );
00139         down_units_bias_inc.resize( down_layer_size );
00140         up_units_bias_inc.resize( up_layer_size );
00141     }
00142 
00143     if( needs_forget )
00144         forget();
00145 
00146     clearStats();
00147 }
00148 
00149 void RBMLLParameters::build()
00150 {
00151     inherited::build();
00152     build_();
00153 }
00154 
00155 
00156 void RBMLLParameters::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00157 {
00158     inherited::makeDeepCopyFromShallowCopy(copies);
00159 
00160     deepCopyField(weights, copies);
00161     deepCopyField(up_units_bias, copies);
00162     deepCopyField(down_units_bias, copies);
00163     deepCopyField(weights_pos_stats, copies);
00164     deepCopyField(weights_neg_stats, copies);
00165     deepCopyField(up_units_bias_pos_stats, copies);
00166     deepCopyField(up_units_bias_neg_stats, copies);
00167     deepCopyField(down_units_bias_pos_stats, copies);
00168     deepCopyField(down_units_bias_neg_stats, copies);
00169     deepCopyField(weights_inc, copies);
00170     deepCopyField(up_units_bias_inc, copies);
00171     deepCopyField(down_units_bias_inc, copies);
00172 }
00173 
00174 void RBMLLParameters::accumulatePosStats( const Vec& down_values,
00175                                           const Vec& up_values )
00176 {
00177     // weights_pos_stats += up_values * down_values'
00178     externalProductAcc( weights_pos_stats, up_values, down_values );
00179 
00180     down_units_bias_pos_stats += down_values;
00181     up_units_bias_pos_stats += up_values;
00182 
00183     pos_count++;
00184 }
00185 
00186 void RBMLLParameters::accumulateNegStats( const Vec& down_values,
00187                                                const Vec& up_values )
00188 {
00189     // weights_neg_stats += up_values * down_values'
00190     externalProductAcc( weights_neg_stats, up_values, down_values );
00191 
00192     down_units_bias_neg_stats += down_values;
00193     up_units_bias_neg_stats += up_values;
00194 
00195     neg_count++;
00196 }
00197 
00198 void RBMLLParameters::update()
00199 {
00200     // updates parameters
00201     //weights -= learning_rate * (weights_pos_stats/pos_count
00202     //                              - weights_neg_stats/neg_count)
00203     real pos_factor = -learning_rate / pos_count;
00204     real neg_factor = learning_rate / neg_count;
00205 
00206     int l = weights.length();
00207     int w = weights.width();
00208 
00209     real* w_i = weights.data();
00210     real* wps_i = weights_pos_stats.data();
00211     real* wns_i = weights_neg_stats.data();
00212     int w_mod = weights.mod();
00213     int wps_mod = weights_pos_stats.mod();
00214     int wns_mod = weights_neg_stats.mod();
00215 
00216     if( momentum == 0. )
00217     {
00218         // no need to use weights_inc
00219         for( int i=0 ; i<l ; i++, w_i+=w_mod, wps_i+=wps_mod, wns_i+=wns_mod )
00220             for( int j=0 ; j<w ; j++ )
00221                 w_i[j] += pos_factor * wps_i[j] + neg_factor * wns_i[j];
00222     }
00223     else
00224     {
00225         // ensure that weights_inc has the right size
00226         weights_inc.resize( l, w );
00227 
00228         // The update rule becomes:
00229         // weights_inc = momentum * weights_inc
00230         //               - learning_rate * (weights_pos_stats/pos_count
00231         //                                  - weights_neg_stats/neg_count);
00232         // weights += weights_inc;
00233         real* winc_i = weights_inc.data();
00234         int winc_mod = weights_inc.mod();
00235         for( int i=0 ; i<l ; i++, w_i += w_mod, wps_i += wps_mod,
00236                              wns_i += wns_mod, winc_i += winc_mod )
00237             for( int j=0 ; j<w ; j++ )
00238             {
00239                 winc_i[j] = momentum * winc_i[j]
00240                     + pos_factor * wps_i[j] + neg_factor * wns_i[j];
00241                 w_i[j] += winc_i[j];
00242             }
00243     }
00244 
00245     // down_units_bias -= learning_rate * (down_units_bias_pos_stats/pos_count
00246     //                                    -down_units_bias_neg_stats/neg_count)
00247     l = down_units_bias.length();
00248     real* dub = down_units_bias.data();
00249     real* dubps = down_units_bias_pos_stats.data();
00250     real* dubns = down_units_bias_neg_stats.data();
00251 
00252     if( momentum == 0. )
00253     {
00254         // no need to use down_units_bias_inc
00255         for( int i=0 ; i<l ; i++ )
00256             dub[i] += pos_factor * dubps[i] + neg_factor * dubns[i];
00257     }
00258     else
00259     {
00260         // ensure that down_units_bias_inc has the right size
00261         down_units_bias_inc.resize( l );
00262 
00263         // The update rule becomes:
00264         // down_units_bias_inc =
00265         //      momentum * down_units_bias_inc
00266         //      - learning_rate * (down_units_bias_pos_stats/pos_count
00267         //                         -down_units_bias_neg_stats/neg_count);
00268         // down_units_bias += down_units_bias_inc;
00269         real* dubinc = down_units_bias_inc.data();
00270         for( int i=0 ; i<l ; i++ )
00271         {
00272             dubinc[i] = momentum * dubinc[i]
00273                 + pos_factor * dubps[i] + neg_factor * dubns[i];
00274             dub[i] += dubinc[i];
00275         }
00276     }
00277 
00278     // up_units_bias -= learning_rate * (up_units_bias_pos_stats/pos_count
00279     //                                   -up_units_bias_neg_stats/neg_count)
00280     l = up_units_bias.length();
00281     real* uub = up_units_bias.data();
00282     real* uubps = up_units_bias_pos_stats.data();
00283     real* uubns = up_units_bias_neg_stats.data();
00284     if( momentum == 0. )
00285     {
00286         // no need to use up_units_bias_inc
00287         for( int i=0 ; i<l ; i++ )
00288             uub[i] += pos_factor * uubps[i] + neg_factor * uubns[i];
00289     }
00290     else
00291     {
00292         // ensure that up_units_bias_inc has the right size
00293         up_units_bias_inc.resize( l );
00294 
00295         // The update rule becomes:
00296         // up_units_bias_inc =
00297         //      momentum * up_units_bias_inc
00298         //      - learning_rate * (up_units_bias_pos_stats/pos_count
00299         //                         -up_units_bias_neg_stats/neg_count);
00300         // up_units_bias += up_units_bias_inc;
00301         real* uubinc = up_units_bias_inc.data();
00302         for( int i=0 ; i<l ; i++ )
00303         {
00304             uubinc[i] = momentum * uubinc[i]
00305                 + pos_factor * uubps[i] + neg_factor * uubns[i];
00306             uub[i] += uubinc[i];
00307         }
00308     }
00309 
00310     clearStats();
00311 }
00312 
00313 // Instead of using the statistics, we assume we have only one markov chain
00314 // runned and we update the parameters from the first 4 values of the chain
00315 void RBMLLParameters::update( const Vec& pos_down_values, // v_0
00316                               const Vec& pos_up_values,   // h_0
00317                               const Vec& neg_down_values, // v_1
00318                               const Vec& neg_up_values )  // h_1
00319 {
00320     // weights -= learning_rate * ( h_0 v_0' - h_1 v_1' );
00321     // or:
00322     // weights[i][j] += learning_rate * (h_1[i] v_1[j] - h_0[i] v_0[j]);
00323 
00324     int l = weights.length();
00325     int w = weights.width();
00326     PLASSERT( pos_up_values.length() == l );
00327     PLASSERT( neg_up_values.length() == l );
00328     PLASSERT( pos_down_values.length() == w );
00329     PLASSERT( neg_down_values.length() == w );
00330 
00331     real* w_i = weights.data();
00332     real* puv_i = pos_up_values.data();
00333     real* nuv_i = neg_up_values.data();
00334     real* pdv = pos_down_values.data();
00335     real* ndv = neg_down_values.data();
00336     int w_mod = weights.mod();
00337 
00338     if( momentum == 0. )
00339     {
00340         for( int i=0 ; i<l ; i++, w_i += w_mod, puv_i++, nuv_i++ )
00341             for( int j=0 ; j<w ; j++ )
00342                 w_i[j] += learning_rate * (*nuv_i * ndv[j] - *puv_i * pdv[j]);
00343     }
00344     else
00345     {
00346         // ensure that weights_inc has the right size
00347         weights_inc.resize( l, w );
00348 
00349         // The update rule becomes:
00350         // weights_inc = momentum * weights_inc
00351         //               - learning_rate * ( h_0 v_0' - h_1 v_1' );
00352         // weights += weights_inc;
00353 
00354         real* winc_i = weights_inc.data();
00355         int winc_mod = weights_inc.mod();
00356         for( int i=0 ; i<l ; i++, w_i += w_mod, winc_i += winc_mod,
00357                              puv_i++, nuv_i++ )
00358             for( int j=0 ; j<w ; j++ )
00359             {
00360                 winc_i[j] = momentum * winc_i[j]
00361                     + learning_rate * (*nuv_i * ndv[j] - *puv_i * pdv[j]);
00362                 w_i[j] += winc_i[j];
00363             }
00364     }
00365 
00366     // down_units_bias -= learning_rate * ( v_0 - v_1 )
00367 
00368     real* dub = down_units_bias.data();
00369     // pdv and ndv didn't change since last time
00370     // real* pdv = pos_down_values.data();
00371     // real* ndv = neg_down_values.data();
00372 
00373     if( momentum == 0. )
00374     {
00375         // no need to use down_units_bias_inc
00376         for( int j=0 ; j<w ; j++ )
00377             dub[j] += learning_rate * ( ndv[j] - pdv[j] );
00378     }
00379     else
00380     {
00381         // ensure that down_units_bias_inc has the right size
00382         down_units_bias_inc.resize( w );
00383 
00384         // The update rule becomes:
00385         // down_units_bias_inc = momentum * down_units_bias_inc
00386         //                       - learning_rate * ( v_0 - v_1 )
00387         // down_units_bias += down_units_bias_inc;
00388 
00389         real* dubinc = down_units_bias_inc.data();
00390         for( int j=0 ; j<w ; j++ )
00391         {
00392             dubinc[j] = momentum * dubinc[j]
00393                 + learning_rate * ( ndv[j] - pdv[j] );
00394             dub[j] += dubinc[j];
00395         }
00396     }
00397 
00398     // up_units_bias -= learning_rate * ( h_0 - h_1 )
00399     real* uub = up_units_bias.data();
00400     real* puv = pos_up_values.data();
00401     real* nuv = neg_up_values.data();
00402 
00403     if( momentum == 0. )
00404     {
00405         // no need to use up_units_bias_inc
00406         for( int i=0 ; i<l ; i++ )
00407             uub[i] += learning_rate * (nuv[i] - puv[i] );
00408     }
00409     else
00410     {
00411         // ensure that up_units_bias_inc has the right size
00412         up_units_bias_inc.resize( l );
00413 
00414         // The update rule becomes:
00415         // up_units_bias_inc =
00416         //      momentum * up_units_bias_inc
00417         //      - learning_rate * (up_units_bias_pos_stats/pos_count
00418         //                         -up_units_bias_neg_stats/neg_count);
00419         // up_units_bias += up_units_bias_inc;
00420         real* uubinc = up_units_bias_inc.data();
00421         for( int i=0 ; i<l ; i++ )
00422         {
00423             uubinc[i] = momentum * uubinc[i]
00424                 + learning_rate * ( nuv[i] - puv[i] );
00425             uub[i] += uubinc[i];
00426         }
00427     }
00428 }
00429 
00430 void RBMLLParameters::clearStats()
00431 {
00432     weights_pos_stats.clear();
00433     weights_neg_stats.clear();
00434 
00435     down_units_bias_pos_stats.clear();
00436     down_units_bias_neg_stats.clear();
00437 
00438     up_units_bias_pos_stats.clear();
00439     up_units_bias_neg_stats.clear();
00440 
00441     pos_count = 0;
00442     neg_count = 0;
00443 }
00444 
00445 void RBMLLParameters::computeUnitActivations
00446     ( int start, int length, const Vec& activations ) const
00447 {
00448     PLASSERT( activations.length() == length );
00449     if( going_up )
00450     {
00451         PLASSERT( start+length <= up_layer_size );
00452         // activations[i-start] = sum_j weights(i,j) input_vec[j] + b[i]
00453         product( activations, weights.subMatRows(start, length), input_vec );
00454         activations += up_units_bias.subVec(start, length);
00455     }
00456     else
00457     {
00458         PLASSERT( start+length <= down_layer_size );
00459         // activations[i-start] = sum_j weights(j,i) input_vec[j] + b[i]
00460         transposeProduct( activations, weights.subMatColumns(start, length),
00461                           input_vec );
00462         activations += down_units_bias.subVec(start, length);
00463     }
00464 }
00465 
00467 void RBMLLParameters::bpropUpdate(const Vec& input, const Vec& output,
00468                                   Vec& input_gradient,
00469                                   const Vec& output_gradient)
00470 {
00471     PLASSERT( input.size() == down_layer_size );
00472     PLASSERT( output.size() == up_layer_size );
00473     PLASSERT( output_gradient.size() == up_layer_size );
00474     input_gradient.resize( down_layer_size );
00475 
00476     // input_gradient = weights' * output_gradient
00477     transposeProduct( input_gradient, weights, output_gradient );
00478 
00479     // weights -= learning_rate * output_gradient * input'
00480     externalProductScaleAcc( weights, output_gradient, input, -learning_rate );
00481 
00482     // (up) bias -= learning_rate * output_gradient
00483     multiplyAcc( up_units_bias, output_gradient, -learning_rate );
00484 
00485 }
00486 
00489 void RBMLLParameters::forget()
00490 {
00491     if( initialization_method == "zero" )
00492         weights.clear();
00493     else
00494     {
00495         if( !random_gen )
00496             random_gen = new PRandom();
00497 
00498         real d = 1. / max( down_layer_size, up_layer_size );
00499         if( initialization_method == "uniform_sqrt" )
00500             d = sqrt( d );
00501 
00502         random_gen->fill_random_uniform( weights, -d, d );
00503     }
00504 
00505     down_units_bias.clear();
00506     up_units_bias.clear();
00507 
00508     clearStats();
00509 }
00510 
00511 
00512 /* THIS METHOD IS OPTIONAL
00517 void RBMLLParameters::finalize()
00518 {
00519 }
00520 */
00521 
00523 int RBMLLParameters::nParameters(bool share_up_params, bool share_down_params) const
00524 {
00525     return weights.size() + (share_up_params?up_units_bias.size():0) + 
00526         (share_down_params?down_units_bias.size():0);
00527 }
00528 
00534 Vec RBMLLParameters::makeParametersPointHere(const Vec& global_parameters, bool share_up_params, bool share_down_params)
00535 {
00536     int n1=weights.size();
00537     int n2=up_units_bias.size();
00538     int n3=down_units_bias.size();
00539     int n = n1+(share_up_params?n2:0)+(share_down_params?n3:0); // should be = nParameters()
00540     int m = global_parameters.size();
00541     if (m<n)
00542         PLERROR("RBMLLParameters::makeParametersPointHere: argument has length %d, should be longer than nParameters()=%d",m,n);
00543     real* p = global_parameters.data();
00544     weights.makeSharedValue(p,n1);
00545     p+=n1;
00546     if (share_up_params)
00547     {
00548         up_units_bias.makeSharedValue(p,n2);
00549         p+=n2;
00550     }
00551     if (share_down_params)
00552         down_units_bias.makeSharedValue(p,n3);
00553     return global_parameters.subVec(n,m-n);
00554 }
00555 
00556 
00557 
00558 } // end of namespace PLearn
00559 
00560 
00561 /*
00562   Local Variables:
00563   mode:c++
00564   c-basic-offset:4
00565   c-file-style:"stroustrup"
00566   c-file-offsets:((innamespace . 0)(inline-open . 0))
00567   indent-tabs-mode:nil
00568   fill-column:79
00569   End:
00570 */
00571 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines