PLearn 0.1
RBMDiagonalMatrixConnection.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMDiagonalMatrixConnection.cc
00004 //
00005 // Copyright (C) 2006 Hugo Larochelle
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Hugo Larochelle
00036 
00041 #include "RBMDiagonalMatrixConnection.h"
00042 #include <plearn/math/TMat_maths.h>
00043 
00044 namespace PLearn {
00045 using namespace std;
00046 
00047 PLEARN_IMPLEMENT_OBJECT(
00048     RBMDiagonalMatrixConnection,
00049     "Stores and learns the parameters between two linear layers of an RBM",
00050     "");
00051 
00052 RBMDiagonalMatrixConnection::RBMDiagonalMatrixConnection( real the_learning_rate ) :
00053     inherited(the_learning_rate)
00054 {
00055 }
00056 
00057 void RBMDiagonalMatrixConnection::declareOptions(OptionList& ol)
00058 {
00059     declareOption(ol, "weights_diag", &RBMDiagonalMatrixConnection::weights_diag,
00060                   OptionBase::learntoption,
00061                   "Vector containing the diagonal of the weight matrix.\n");
00062 
00063     declareOption(ol, "L1_penalty_factor",
00064                   &RBMDiagonalMatrixConnection::L1_penalty_factor,
00065                   OptionBase::buildoption,
00066                   "Optional (default=0) factor of L1 regularization term, i.e.\n"
00067                   "minimize L1_penalty_factor * sum_{ij} |weights(i,j)| "
00068                   "during training.\n");
00069 
00070     declareOption(ol, "L2_penalty_factor",
00071                   &RBMDiagonalMatrixConnection::L2_penalty_factor,
00072                   OptionBase::buildoption,
00073                   "Optional (default=0) factor of L2 regularization term, i.e.\n"
00074                   "minimize 0.5 * L2_penalty_factor * sum_{ij} weights(i,j)^2 "
00075                   "during training.\n");
00076 
00077     declareOption(ol, "L2_decrease_constant",
00078                   &RBMDiagonalMatrixConnection::L2_decrease_constant,
00079                   OptionBase::buildoption,
00080         "Parameter of the L2 penalty decrease (see L2_decrease_type).",
00081         OptionBase::advanced_level);
00082 
00083     declareOption(ol, "L2_shift",
00084                   &RBMDiagonalMatrixConnection::L2_shift,
00085                   OptionBase::buildoption,
00086         "Parameter of the L2 penalty decrease (see L2_decrease_type).",
00087         OptionBase::advanced_level);
00088 
00089     declareOption(ol, "L2_decrease_type",
00090                   &RBMDiagonalMatrixConnection::L2_decrease_type,
00091                   OptionBase::buildoption,
00092         "The kind of L2 decrease that is being applied. The decrease\n"
00093         "consists in scaling the L2 penalty by a factor that depends on the\n"
00094         "number 't' of times this penalty has been used to modify the\n"
00095         "weights of the connection. It can be one of:\n"
00096         " - 'one_over_t': 1 / (1 + t * L2_decrease_constant)\n"
00097         " - 'sigmoid_like': sigmoid((L2_shift - t) * L2_decrease_constant)",
00098         OptionBase::advanced_level);
00099 
00100     declareOption(ol, "L2_n_updates",
00101                   &RBMDiagonalMatrixConnection::L2_n_updates,
00102                   OptionBase::learntoption,
00103         "Number of times that weights have been changed by the L2 penalty\n"
00104         "update rule.");
00105 
00106 
00107     // Now call the parent class' declareOptions
00108     inherited::declareOptions(ol);
00109 }
00110 
00111 void RBMDiagonalMatrixConnection::build_()
00112 {
00113     if( up_size <= 0 || down_size <= 0 )
00114         return;
00115 
00116     if( up_size != down_size )
00117         PLERROR("In RBMDiagonalMatrixConnection::build_(): up_size should be "
00118             "equal to down_size");
00119 
00120     bool needs_forget = false; // do we need to reinitialize the parameters?
00121 
00122     if( weights_diag.length() != up_size )
00123     {
00124         weights_diag.resize( up_size );
00125         needs_forget = true;
00126     }
00127 
00128     weights_pos_stats.resize( up_size );
00129     weights_neg_stats.resize( up_size );
00130 
00131     if( momentum != 0. )
00132         weights_inc.resize( up_size );
00133 
00134     if( needs_forget )
00135         forget();
00136 
00137     clearStats();
00138 }
00139 
00140 void RBMDiagonalMatrixConnection::build()
00141 {
00142     inherited::build();
00143     build_();
00144 }
00145 
00146 
00147 void RBMDiagonalMatrixConnection::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00148 {
00149     inherited::makeDeepCopyFromShallowCopy(copies);
00150 
00151     deepCopyField(weights_diag, copies);
00152     deepCopyField(weights_pos_stats, copies);
00153     deepCopyField(weights_neg_stats, copies);
00154     deepCopyField(weights_inc, copies);
00155 }
00156 
00157 void RBMDiagonalMatrixConnection::accumulatePosStats( const Vec& down_values,
00158                                               const Vec& up_values )
00159 {
00160     real* wps = weights_pos_stats.data();
00161     real* uv = up_values.data();
00162     real* dv = down_values.data();
00163     for( int i=0; i<up_size; i++ )
00164         wps[i] += uv[i]*dv[i];
00165 
00166     pos_count++;
00167 }
00168 
00169 void RBMDiagonalMatrixConnection::accumulatePosStats( const Mat& down_values,
00170                                               const Mat& up_values )
00171 {
00172     int mbs=down_values.length();
00173     PLASSERT(up_values.length()==mbs);
00174 
00175     real* wps;
00176     real* uv;
00177     real* dv;
00178     for( int t=0; t<mbs; t++ )
00179     {
00180         wps = weights_pos_stats.data();
00181         uv = up_values[t];
00182         dv = down_values[t];
00183         for( int i=0; i<up_size; i++ )
00184             wps[i] += uv[i]*dv[i];
00185     }
00186     pos_count+=mbs;
00187 }
00188 
00190 // accumulateNegStats //
00192 void RBMDiagonalMatrixConnection::accumulateNegStats( const Vec& down_values,
00193                                               const Vec& up_values )
00194 {
00195     real* wns = weights_neg_stats.data();
00196     real* uv = up_values.data();
00197     real* dv = down_values.data();
00198     for( int i=0; i<up_size; i++ )
00199         wns[i] += uv[i]*dv[i];
00200     neg_count++;
00201 }
00202 
00203 void RBMDiagonalMatrixConnection::accumulateNegStats( const Mat& down_values,
00204                                               const Mat& up_values )
00205 {
00206     int mbs=down_values.length();
00207     PLASSERT(up_values.length()==mbs);
00208 
00209     real* wns;
00210     real* uv;
00211     real* dv;
00212     for( int t=0; t<mbs; t++ )
00213     {
00214         wns = weights_neg_stats.data();
00215         uv = up_values[t];
00216         dv = down_values[t];
00217         for( int i=0; i<up_size; i++ )
00218             wns[i] += uv[i]*dv[i];
00219     }
00220     neg_count+=mbs;
00221 }
00222 
00224 // update //
00226 void RBMDiagonalMatrixConnection::update()
00227 {
00228     // updates parameters
00229     //weights += learning_rate * (weights_pos_stats/pos_count
00230     //                              - weights_neg_stats/neg_count)
00231     real pos_factor = learning_rate / pos_count;
00232     real neg_factor = -learning_rate / neg_count;
00233 
00234     int l = weights_diag.length();
00235 
00236     real* w_i = weights_diag.data();
00237     real* wps_i = weights_pos_stats.data();
00238     real* wns_i = weights_neg_stats.data();
00239 
00240     if( momentum == 0. )
00241     {
00242         // no need to use weights_inc
00243         for( int i=0 ; i<l ; i++ )
00244             w_i[i] += pos_factor * wps_i[i] + neg_factor * wns_i[i];
00245     }
00246     else
00247     {
00248         // ensure that weights_inc has the right size
00249         weights_inc.resize( l );
00250 
00251         // The update rule becomes:
00252         // weights_inc = momentum * weights_inc
00253         //               - learning_rate * (weights_pos_stats/pos_count
00254         //                                  - weights_neg_stats/neg_count);
00255         // weights += weights_inc;
00256         real* winc_i = weights_inc.data();
00257         for( int i=0 ; i<l ; i++ )
00258         {
00259             winc_i[i] = momentum * winc_i[i]
00260                 + pos_factor * wps_i[i] + neg_factor * wns_i[i];
00261             w_i[i] += winc_i[i];
00262         }
00263     }
00264 
00265     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0))
00266         applyWeightPenalty();
00267 
00268     clearStats();
00269 }
00270 
00271 // Instead of using the statistics, we assume we have only one markov chain
00272 // runned and we update the parameters from the first 4 values of the chain
00273 void RBMDiagonalMatrixConnection::update( const Vec& pos_down_values, // v_0
00274                                   const Vec& pos_up_values,   // h_0
00275                                   const Vec& neg_down_values, // v_1
00276                                   const Vec& neg_up_values )  // h_1
00277 {
00278     int l = weights_diag.length();
00279     PLASSERT( pos_up_values.length() == l );
00280     PLASSERT( neg_up_values.length() == l );
00281     PLASSERT( pos_down_values.length() == l );
00282     PLASSERT( neg_down_values.length() == l );
00283 
00284     real* w_i = weights_diag.data();
00285     real* pdv = pos_down_values.data();
00286     real* puv = pos_up_values.data();
00287     real* ndv = neg_down_values.data();
00288     real* nuv = neg_up_values.data();
00289 
00290     if( momentum == 0. )
00291     {
00292         for( int i=0 ; i<l ; i++)
00293             w_i[i] += learning_rate * (puv[i] * pdv[i] - nuv[i] * ndv[i]);
00294     }
00295     else
00296     {
00297         // ensure that weights_inc has the right size
00298         weights_inc.resize( l );
00299 
00300         real* winc_i = weights_inc.data();
00301         for( int i=0 ; i<l ; i++ )
00302         {
00303             winc_i[i] = momentum * winc_i[i]
00304                 + learning_rate * (puv[i] * pdv[i] - nuv[i] * ndv[i]);
00305             w_i[i] += winc_i[i];
00306         }
00307     }
00308 
00309     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0))
00310         applyWeightPenalty();
00311 }
00312 
00313 void RBMDiagonalMatrixConnection::update( const Mat& pos_down_values, // v_0
00314                                   const Mat& pos_up_values,   // h_0
00315                                   const Mat& neg_down_values, // v_1
00316                                   const Mat& neg_up_values )  // h_1
00317 {
00318     // weights += learning_rate * ( h_0 v_0' - h_1 v_1' );
00319     // or:
00320     // weights[i][j] += learning_rate * (h_0[i] v_0[j] - h_1[i] v_1[j]);
00321 
00322     int l = weights_diag.length();
00323 
00324     PLASSERT( pos_up_values.width() == l );
00325     PLASSERT( neg_up_values.width() == l );
00326     PLASSERT( pos_down_values.width() == l );
00327     PLASSERT( neg_down_values.width() == l );
00328 
00329     real* w_i = weights_diag.data();
00330     real* pdv;
00331     real* puv;
00332     real* ndv;
00333     real* nuv;
00334 
00335     if( momentum == 0. )
00336     {
00337         // We use the average gradient over a mini-batch.
00338         real avg_lr = learning_rate / pos_down_values.length();
00339 
00340         for( int t=0; t<pos_up_values.length(); t++ )
00341         {
00342             pdv = pos_down_values[t];
00343             puv = pos_up_values[t];
00344             ndv = neg_down_values[t];
00345             nuv = neg_up_values[t];
00346             for( int i=0 ; i<l ; i++)
00347                 w_i[i] += avg_lr * (puv[i] * pdv[i] - nuv[i] * ndv[i]);
00348         }
00349     }
00350     else
00351     {
00352         PLERROR("RBMDiagonalMatrixConnection::update minibatch with momentum - Not implemented");
00353     }
00354 
00355     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0))
00356         applyWeightPenalty();
00357 }
00358 
00360 // clearStats //
00362 void RBMDiagonalMatrixConnection::clearStats()
00363 {
00364     weights_pos_stats.clear();
00365     weights_neg_stats.clear();
00366 
00367     pos_count = 0;
00368     neg_count = 0;
00369 }
00370 
00372 // computeProduct //
00374 void RBMDiagonalMatrixConnection::computeProduct( int start, int length,
00375                                           const Vec& activations,
00376                                           bool accumulate ) const
00377 {
00378     PLASSERT( activations.length() == length );
00379     PLASSERT( start+length <= up_size );
00380     real* act = activations.data();
00381     real* w = weights_diag.data();
00382     real* iv = input_vec.data();
00383     if( accumulate )
00384         for( int i=0; i<length; i++ )
00385             act[i] += w[i+start] * iv[i+start];
00386     else
00387         for( int i=0; i<length; i++ )
00388             act[i] = w[i+start] * iv[i+start];
00389 }
00390 
00392 // computeProducts //
00394 void RBMDiagonalMatrixConnection::computeProducts(int start, int length,
00395                                           Mat& activations,
00396                                           bool accumulate ) const
00397 {
00398     PLASSERT( activations.width() == length );
00399     activations.resize(inputs_mat.length(), length);
00400     real* act;
00401     real* w = weights_diag.data();
00402     real* iv;
00403     if( accumulate )
00404         for( int t=0; t<inputs_mat.length(); t++ )
00405         {
00406             act = activations[t];
00407             iv = inputs_mat[t];
00408             for( int i=0; i<length; i++ )
00409                 act[i] += w[i+start] * iv[i+start];
00410         }
00411     else
00412         for( int t=0; t<inputs_mat.length(); t++ )
00413         {
00414             act = activations[t];
00415             iv = inputs_mat[t];
00416             for( int i=0; i<length; i++ )
00417                 act[i] = w[i+start] * iv[i+start];
00418         }
00419 }
00420 
00422 // bpropUpdate //
00424 void RBMDiagonalMatrixConnection::bpropUpdate(const Vec& input, const Vec& output,
00425                                       Vec& input_gradient,
00426                                       const Vec& output_gradient,
00427                                       bool accumulate)
00428 {
00429     PLASSERT( input.size() == down_size );
00430     PLASSERT( output.size() == up_size );
00431     PLASSERT( output_gradient.size() == up_size );
00432 
00433     real* w = weights_diag.data();
00434     real* in = input.data();
00435     real* ing = input_gradient.data();
00436     real* outg = output_gradient.data();
00437     if( accumulate )
00438     {
00439         PLASSERT_MSG( input_gradient.size() == down_size,
00440                       "Cannot resize input_gradient AND accumulate into it" );
00441 
00442         for( int i=0; i<down_size; i++ )
00443         {
00444             ing[i] += outg[i]*w[i];
00445             w[i] -= learning_rate * in[i] * outg[i];
00446         }
00447     }
00448     else
00449     {
00450         input_gradient.resize( down_size );
00451         for( int i=0; i<down_size; i++ )
00452         {
00453             ing[i] = outg[i]*w[i];
00454             w[i] -= learning_rate * in[i] * outg[i];
00455         }
00456     }
00457 
00458     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0))
00459         applyWeightPenalty();
00460 }
00461 
00462 void RBMDiagonalMatrixConnection::bpropUpdate(const Mat& inputs, const Mat& outputs,
00463                                       Mat& input_gradients,
00464                                       const Mat& output_gradients,
00465                                       bool accumulate)
00466 {
00467     PLASSERT( inputs.width() == down_size );
00468     PLASSERT( outputs.width() == up_size );
00469     PLASSERT( output_gradients.width() == up_size );
00470 
00471     int mbatch = inputs.length();
00472 
00473     real* w = weights_diag.data();
00474     real* in;
00475     real* ing;
00476     real* outg;
00477     if( accumulate )
00478     {
00479         PLASSERT_MSG( input_gradients.width() == down_size &&
00480                       input_gradients.length() == inputs.length(),
00481                       "Cannot resize input_gradients and accumulate into it" );
00482 
00483         for( int t=0; t<mbatch; t++ )
00484         {
00485             ing = input_gradients[t];
00486             outg = output_gradients[t];
00487             for( int i=0; i<down_size; i++ )
00488                 ing[i] += outg[i]*w[i];
00489         }
00490     }
00491     else
00492     {
00493         input_gradients.resize(inputs.length(), down_size);
00494         for( int t=0; t<mbatch; t++ )
00495         {
00496             ing = input_gradients[t];
00497             outg = output_gradients[t];
00498             for( int i=0; i<down_size; i++ )
00499                 ing[i] = outg[i]*w[i];
00500         }
00501     }
00502 
00503     real avg_lr = learning_rate / mbatch;
00504     for( int t=0; t<mbatch; t++ )
00505     {
00506         in = inputs[t];
00507         outg = output_gradients[t];
00508         for( int i=0; i<down_size; i++ )
00509             w[i] -= avg_lr * in[i] * outg[i];
00510     }
00511 
00512     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0))
00513         applyWeightPenalty();
00514 }
00515 
00517 // applyWeightPenalty //
00519 void RBMDiagonalMatrixConnection::applyWeightPenalty()
00520 {
00521     // Apply penalty (decay) on weights.
00522     real delta_L1 = learning_rate * L1_penalty_factor;
00523     real delta_L2 = learning_rate * L2_penalty_factor;
00524     if (L2_decrease_type == "one_over_t")
00525         delta_L2 /= (1 + L2_decrease_constant * L2_n_updates);
00526     else if (L2_decrease_type == "sigmoid_like")
00527         delta_L2 *= sigmoid((L2_shift - L2_n_updates) * L2_decrease_constant);
00528     else
00529         PLERROR("In RBMDiagonalMatrixConnection::applyWeightPenalty - Invalid value "
00530                 "for L2_decrease_type: %s", L2_decrease_type.c_str());
00531     real* w_ = weights_diag.data();
00532     for( int i=0; i<down_size; i++ )
00533     {
00534         if( delta_L2 != 0. )
00535             w_[i] *= (1 - delta_L2);
00536 
00537         if( delta_L1 != 0. )
00538         {
00539             if( w_[i] > delta_L1 )
00540                 w_[i] -= delta_L1;
00541             else if( w_[i] < -delta_L1 )
00542                 w_[i] += delta_L1;
00543             else
00544                 w_[i] = 0.;
00545         }
00546     }
00547 
00548     if (delta_L2 > 0)
00549         L2_n_updates++;
00550 }
00551 
00553 // addWeightPenalty //
00555 void RBMDiagonalMatrixConnection::addWeightPenalty(Vec weights_diag, Vec weight_diag_gradients)
00556 {
00557     // Add penalty (decay) gradient.
00558     real delta_L1 = L1_penalty_factor;
00559     real delta_L2 = L2_penalty_factor;
00560     PLASSERT_MSG( is_equal(L2_decrease_constant, 0) && is_equal(L2_shift, 100),
00561                   "L2 decrease not implemented in this method" );
00562     real* w_ = weights_diag.data();
00563     real* gw_ = weight_diag_gradients.data();
00564     for( int i=0; i<down_size; i++ )
00565     {
00566         if( delta_L2 != 0. )
00567             gw_[i] += delta_L2*w_[i];
00568 
00569         if( delta_L1 != 0. )
00570         {
00571             if( w_[i] > 0 )
00572                 gw_[i] += delta_L1;
00573             else if( w_[i] < 0 )
00574                 gw_[i] -= delta_L1;
00575         }
00576     }
00577 }
00578 
00580 // forget //
00582 // Reset the parameters to the state they would be BEFORE starting training.
00583 // Note that this method is necessarily called from build().
00584 void RBMDiagonalMatrixConnection::forget()
00585 {
00586     clearStats();
00587     if( initialization_method == "zero" )
00588         weights_diag.clear();
00589     else
00590     {
00591         if( !random_gen )
00592         {
00593             PLWARNING( "RBMDiagonalMatrixConnection: cannot forget() without"
00594                        " random_gen" );
00595             return;
00596         }
00597 
00598         //random_gen->manual_seed(1827);
00599 
00600         real d = 1. / max( down_size, up_size );
00601         if( initialization_method == "uniform_sqrt" )
00602             d = sqrt( d );
00603 
00604         random_gen->fill_random_uniform( weights_diag, -d, d );
00605     }
00606     L2_n_updates = 0;
00607 }
00608 
00609 
00610 /* THIS METHOD IS OPTIONAL
00615 void RBMDiagonalMatrixConnection::finalize()
00616 {
00617 }
00618 */
00619 
00621 int RBMDiagonalMatrixConnection::nParameters() const
00622 {
00623     return weights_diag.size();
00624 }
00625 
00631 Vec RBMDiagonalMatrixConnection::makeParametersPointHere(const Vec& global_parameters)
00632 {
00633     int n=weights_diag.size();
00634     int m = global_parameters.size();
00635     if (m<n)
00636         PLERROR("RBMDiagonalMatrixConnection::makeParametersPointHere: argument has length %d, should be longer than nParameters()=%d",m,n);
00637     real* p = global_parameters.data();
00638     weights_diag.makeSharedValue(p,n);
00639 
00640     return global_parameters.subVec(n,m-n);
00641 }
00642 
00643 
00644 
00645 } // end of namespace PLearn
00646 
00647 
00648 /*
00649   Local Variables:
00650   mode:c++
00651   c-basic-offset:4
00652   c-file-style:"stroustrup"
00653   c-file-offsets:((innamespace . 0)(inline-open . 0))
00654   indent-tabs-mode:nil
00655   fill-column:79
00656   End:
00657 */
00658 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines