PLearn 0.1
RBMGenericParameters.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMGenericParameters.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin
00036 
00041 #include "RBMGenericParameters.h"
00042 #include <plearn/math/TMat_maths.h>
00043 //#include "RBMLayer.h"
00044 
00045 namespace PLearn {
00046 using namespace std;
00047 
00048 PLEARN_IMPLEMENT_OBJECT(
00049     RBMGenericParameters,
00050     "Stores and learns the parameters between two layers of an RBM",
00051     "");
00052 
00053 RBMGenericParameters::RBMGenericParameters( real the_learning_rate ) :
00054     inherited(the_learning_rate)
00055 {
00056 }
00057 
00058 RBMGenericParameters::RBMGenericParameters( string down_types, string up_types,
00059                                             real the_learning_rate ) :
00060     inherited( down_types, up_types, the_learning_rate )
00061 {
00062     // We're not sure inherited::build() has been called
00063     build();
00064 }
00065 
00066 void RBMGenericParameters::declareOptions(OptionList& ol)
00067 {
00068     // ### Declare all of this object's options here.
00069     // ### For the "flags" of each option, you should typically specify
00070     // ### one of OptionBase::buildoption, OptionBase::learntoption or
00071     // ### OptionBase::tuningoption. If you don't provide one of these three,
00072     // ### this option will be ignored when loading values from a script.
00073     // ### You can also combine flags, for example with OptionBase::nosave:
00074     // ### (OptionBase::buildoption | OptionBase::nosave)
00075 
00076     declareOption(ol, "weights", &RBMGenericParameters::weights,
00077                   OptionBase::learntoption,
00078                   "Matrix containing unit-to-unit weights (output_size ×"
00079                   " input_size)");
00080 
00081     declareOption(ol, "up_units_params",
00082                   &RBMGenericParameters::up_units_params,
00083                   OptionBase::learntoption,
00084                   "Element i contains inner parameters (like the bias) of up"
00085                   " unit i");
00086 
00087     declareOption(ol, "down_units_params",
00088                   &RBMGenericParameters::down_units_params,
00089                   OptionBase::learntoption,
00090                   "Element i contains inner parameters (like the bias) of down"
00091                   " unit i");
00092 
00093     // Now call the parent class' declareOptions
00094     inherited::declareOptions(ol);
00095 }
00096 
00097 void RBMGenericParameters::build_()
00098 {
00099     if( up_layer_size == 0 || down_layer_size == 0 )
00100         return;
00101 
00102     output_size = 0;
00103     bool needs_forget = false; // do we need to reinitialize the parameters?
00104 
00105     if( weights.length() != up_layer_size ||
00106         weights.width() != down_layer_size )
00107     {
00108         weights.resize( up_layer_size, down_layer_size );
00109         needs_forget = true;
00110     }
00111 
00112     weights_pos_stats.resize( up_layer_size, down_layer_size );
00113     weights_neg_stats.resize( up_layer_size, down_layer_size );
00114 
00115     down_units_params.resize( down_layer_size );
00116     down_units_params_pos_stats.resize( down_layer_size );
00117     down_units_params_neg_stats.resize( down_layer_size );
00118     for( int i=0 ; i<down_layer_size ; i++ )
00119     {
00120         char dut_i = down_units_types[i];
00121         if( dut_i == 'l' ) // linear activation unit
00122         {
00123             down_units_params[i].resize(1);
00124             down_units_params_pos_stats[i].resize(1);
00125             down_units_params_neg_stats[i].resize(1);
00126         }
00127         else if( dut_i == 'q' ) // quadratic
00128         {
00129             down_units_params[i].resize(2);
00130             down_units_params_pos_stats[i].resize(2);
00131             down_units_params_neg_stats[i].resize(2);
00132         }
00133         else
00134             PLERROR( "RBMGenericParameters::build_() - value '%c' for"
00135                      " down_units_types[%d]\n"
00136                      "is unknown. Supported values are 'l' and 'q'.\n",
00137                      dut_i, i );
00138     }
00139 
00140     up_units_params.resize( up_layer_size );
00141     up_units_params_pos_stats.resize( up_layer_size );
00142     up_units_params_neg_stats.resize( up_layer_size );
00143     for( int i=0 ; i<up_layer_size ; i++ )
00144     {
00145         char uut_i = up_units_types[i];
00146         if( uut_i == 'l' ) // linear activation unit
00147         {
00148             up_units_params[i].resize(1);
00149             up_units_params_pos_stats[i].resize(1);
00150             up_units_params_neg_stats[i].resize(1);
00151             output_size += 1;
00152         }
00153         else if( uut_i == 'q' )
00154         {
00155             up_units_params[i].resize(2);
00156             up_units_params_pos_stats[i].resize(2);
00157             up_units_params_neg_stats[i].resize(2);
00158             output_size += 2;
00159         }
00160         else
00161             PLERROR( "RBMGenericParameters::build_() - value '%c' for"
00162                      " up_units_types[%d]\n"
00163                      "is unknown. Supported values are 'l' and 'q'.\n",
00164                      uut_i, i );
00165     }
00166 
00167     if( needs_forget )
00168         forget();
00169 
00170     clearStats();
00171 }
00172 
00173 void RBMGenericParameters::build()
00174 {
00175     inherited::build();
00176     build_();
00177 }
00178 
00179 
00180 void RBMGenericParameters::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00181 {
00182     inherited::makeDeepCopyFromShallowCopy(copies);
00183 
00184     deepCopyField(weights, copies);
00185     deepCopyField(up_units_params, copies);
00186     deepCopyField(down_units_params, copies);
00187     deepCopyField(weights_pos_stats, copies);
00188     deepCopyField(weights_neg_stats, copies);
00189     deepCopyField(up_units_params_pos_stats, copies);
00190     deepCopyField(up_units_params_neg_stats, copies);
00191     deepCopyField(down_units_params_pos_stats, copies);
00192     deepCopyField(down_units_params_neg_stats, copies);
00193 }
00194 
00195 void RBMGenericParameters::accumulatePosStats( const Vec& down_values,
00196                                                const Vec& up_values )
00197 {
00198     // weights_pos_stats += up_values * down_values'
00199     externalProductAcc( weights_pos_stats, up_values, down_values );
00200 
00201     for( int i=0 ; i<down_layer_size ; i++ )
00202     {
00203         // the bias is updated the same way for 'l' and 'g' units
00204         down_units_params_pos_stats[i][0] += down_values[i];
00205 
00206         // update also 'g' units' quadratic term
00207         if( down_units_types[i] == 'g' )
00208             down_units_params_pos_stats[i][1] +=
00209                 2 * down_units_params[i][1] * down_values[i] * down_values[i];
00210     }
00211 
00212     for( int i=0 ; i<up_layer_size ; i++ )
00213     {
00214         // the bias is updated the same way for 'l' and 'g' units
00215         up_units_params_pos_stats[i][0] += up_values[i];
00216 
00217         // update also 'g' units' quadratic term
00218         if( up_units_types[i] == 'g' )
00219             up_units_params_pos_stats[i][1] +=
00220                 2 * up_units_params[i][1] * up_values[i] * up_values[i];
00221     }
00222 
00223     pos_count++;
00224 }
00225 
00226 void RBMGenericParameters::accumulateNegStats( const Vec& down_values,
00227                                                const Vec& up_values )
00228 {
00229     // weights_neg_stats += up_values * down_values'
00230     externalProductAcc( weights_neg_stats, up_values, down_values );
00231 
00232     for( int i=0 ; i<down_layer_size ; i++ )
00233     {
00234         // the bias is updated the same way for 'l' and 'g' units
00235         down_units_params_neg_stats[i][0] += down_values[i];
00236 
00237         // update also 'g' units' quadratic term
00238         if( down_units_types[i] == 'g' )
00239             down_units_params_neg_stats[i][1] +=
00240                 2 * down_units_params[i][1] * down_values[i] * down_values[i];
00241     }
00242 
00243     for( int i=0 ; i<up_layer_size ; i++ )
00244     {
00245         // the bias is updated the same way for 'l' and 'g' units
00246         up_units_params_neg_stats[i][0] += up_values[i];
00247 
00248         // update also 'g' units' quadratic term
00249         if( up_units_types[i] == 'g' )
00250             up_units_params_neg_stats[i][1] +=
00251                 2 * up_units_params[i][1] * up_values[i] * up_values[i];
00252     }
00253 
00254     neg_count++;
00255 }
00256 
00257 void RBMGenericParameters::update()
00258 {
00259     // updates parameters
00260     //weights -= learning_rate * (weights_pos_stats/pos_count
00261     //                              - weights_neg_stats/neg_count)
00262     weights_pos_stats /= pos_count;
00263     weights_neg_stats /= neg_count;
00264     weights_pos_stats -= weights_neg_stats;
00265     weights_pos_stats *= learning_rate;
00266     weights -= weights_pos_stats;
00267 
00268     for( int i=0 ; i<up_layer_size ; i++ )
00269     {
00270         up_units_params[i] -=
00271             learning_rate * (up_units_params_pos_stats[i]/pos_count
00272                              - up_units_params_neg_stats[i]/neg_count);
00273     }
00274 
00275     for( int i=0 ; i<down_layer_size ; i++ )
00276     {
00277         down_units_params[i] -=
00278             learning_rate * (down_units_params_pos_stats[i]/pos_count
00279                              - down_units_params_neg_stats[i]/neg_count);
00280     }
00281 
00282     clearStats();
00283 }
00284 
00285 void RBMGenericParameters::clearStats()
00286 {
00287     weights_pos_stats.clear();
00288     weights_neg_stats.clear();
00289     for( int i=0 ; i<down_layer_size ; i++ )
00290     {
00291         down_units_params_pos_stats[i].clear();
00292         down_units_params_neg_stats[i].clear();
00293     }
00294     for( int i=0 ; i<up_layer_size ; i++ )
00295     {
00296         up_units_params_pos_stats[i].clear();
00297         up_units_params_neg_stats[i].clear();
00298     }
00299     pos_count = 0;
00300     neg_count = 0;
00301 }
00302 
00303 void RBMGenericParameters::computeLinearUnitActivations
00304     ( int i, const Vec& activations ) const
00305 {
00306     PLASSERT( activations.length() == 1 );
00307 
00308     if( going_up )
00309     {
00310         PLASSERT( up_units_types[i] == 'l' );
00311 
00312         // activations[0] = sum_j weights(i,j) input_vec[j] + b[i]
00313         product( activations, weights.subMatRows(i,1), input_vec );
00314         activations[0] += up_units_params[i][0];
00315     }
00316     else
00317     {
00318         PLASSERT( down_units_types[i] == 'l' );
00319 
00320         // activations[0] = sum_j weights(j,i) input_vec[j] + b[i]
00321         transposeProduct( activations, weights.subMatColumns(i,1), input_vec );
00322         activations[0] += down_units_params[i][0];
00323     }
00324 }
00325 
00326 void RBMGenericParameters::computeQuadraticUnitActivations
00327     ( int i, const Vec& activations ) const
00328 {
00329     PLASSERT( activations.length() == 2 );
00330 
00331     if( going_up )
00332     {
00333         PLASSERT( up_units_types[i] == 'q' );
00334 
00335         // activations[0] = -(sum_j weights(i,j) input_vec[j] + b[i])
00336         //                    / (2 * up_units_params[i][1]^2)
00337         product( activations, weights.subMatRows(i,1), input_vec );
00338         real a_i = up_units_params[i][1];
00339         activations[0] = -(activations[0] + up_units_params[i][0])
00340                            / (2 * a_i * a_i);
00341 
00342         // activations[1] = 1 / (2 * up_units_params[i][1]^2)
00343         activations[1] = 1. / (2. * a_i * a_i);
00344     }
00345     else
00346     {
00347         PLASSERT( down_units_types[i] == 'q' );
00348 
00349         // activations[0] = -(sum_j weights(j,i) input_vec[j] + b[i])
00350         //                    / (2 * down_units_params[i][1]^2)
00351         transposeProduct( activations, weights.subMatColumns(i,1), input_vec );
00352         real a_i = down_units_params[i][1];
00353         activations[0] = -(activations[0] + down_units_params[i][0])
00354                            / (2 * a_i * a_i);
00355 
00356         // activations[1] = 1 / (2 * down_units_params[i][1]^2)
00357         activations[1] = 1. / (2. * a_i * a_i);
00358     }
00359 }
00360 
00361 
00362 void RBMGenericParameters::computeUnitActivations
00363     ( int start, int length, const Vec& activations ) const
00364 {
00365     string units_types;
00366     if( going_up )
00367         units_types = up_units_types;
00368     else
00369         units_types = down_units_types;
00370 
00371     PLASSERT( start+length <= (int) units_types.length() );
00372     int cur_pos = 0; // position index inside activations
00373 
00374     for( int i=start ; i<start+length ; i++ )
00375     {
00376         char ut_i = units_types[i];
00377         if( ut_i == 'l' )
00378         {
00379             computeLinearUnitActivations( i, activations.subVec(cur_pos, 1) );
00380             cur_pos++;
00381         }
00382         else if( ut_i == 'q' )
00383         {
00384             computeQuadraticUnitActivations( i,
00385                                              activations.subVec(cur_pos, 2) );
00386             cur_pos += 2;
00387         }
00388         else
00389             PLERROR( "RBMGenericParameters::computeUnitActivations():\n"
00390                      "value '%c' for units_types[%d] is unknown.\n"
00391                      "Supported values are 'l' and 'q'.\n", ut_i, i );
00392     }
00393 }
00394 
00396 void RBMGenericParameters::bpropUpdate(const Vec& input, const Vec& output,
00397                                        Vec& input_gradient,
00398                                        const Vec& output_gradient)
00399 {
00400     PLERROR( "RBMGenericParameters::bpropUpdate() not implemented yet.\n"
00401              "If you only have linear units on up and down layer, you should\n"
00402              "consider using RBMLLParameters instead.\n" );
00403 }
00404 
00407 void RBMGenericParameters::forget()
00408 {
00409     if( initialization_method == "zero" )
00410         weights.clear();
00411     else
00412     {
00413         if( !random_gen )
00414             random_gen = new PRandom();
00415 
00416         real d = 1. / max( down_layer_size, up_layer_size );
00417         if( initialization_method == "uniform_sqrt" )
00418             d = sqrt( d );
00419 
00420         random_gen->fill_random_uniform( weights, -d, d );
00421     }
00422 
00423     for( int i=0 ; i<down_layer_size ; i++ )
00424         down_units_params[i].clear();
00425 
00426     for( int i=0 ; i<up_layer_size ; i++ )
00427         up_units_params[i].clear();
00428 
00429     clearStats();
00430 }
00431 
00432 
00433 /* THIS METHOD IS OPTIONAL
00438 void RBMGenericParameters::finalize()
00439 {
00440 }
00441 */
00442 
00443 
00445 int RBMGenericParameters::nParameters(bool share_up_params, bool share_down_params) const
00446 {
00447     int m = weights.size();
00448     if (share_up_params)
00449         for (int i=0;i<up_units_params.length();i++)
00450             m += up_units_params[i].size();
00451     if (share_down_params)
00452         for (int i=0;i<down_units_params.length();i++)
00453             m += down_units_params[i].size();
00454     return m;
00455 }
00456 
00462 Vec RBMGenericParameters::makeParametersPointHere(const Vec& global_parameters, bool share_up_params, bool share_down_params)
00463 {
00464     int n = nParameters(share_up_params,share_down_params);
00465     int m = global_parameters.size();
00466     if (m<n)
00467         PLERROR("RBMLLParameters::makeParametersPointHere: argument has length %d, should be longer than nParameters()=%d",m,n);
00468     real* p = global_parameters.data();
00469     weights.makeSharedValue(p,weights.size());
00470     p+=weights.size();
00471     if(share_up_params)
00472         for (int i=0;i<up_units_params.length();i++)
00473         {
00474             up_units_params[i].makeSharedValue(p,up_units_params[i].size());
00475             p+=up_units_params[i].size();
00476         }
00477     if (share_down_params)
00478         for (int i=0;i<down_units_params.length();i++)
00479         {
00480             down_units_params[i].makeSharedValue(p,down_units_params[i].size());
00481             p+=down_units_params[i].size();
00482         }
00483     return global_parameters.subVec(n,m-n);
00484 }
00485 
00486 } // end of namespace PLearn
00487 
00488 
00489 /*
00490   Local Variables:
00491   mode:c++
00492   c-basic-offset:4
00493   c-file-style:"stroustrup"
00494   c-file-offsets:((innamespace . 0)(inline-open . 0))
00495   indent-tabs-mode:nil
00496   fill-column:79
00497   End:
00498 */
00499 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines