PLearn 0.1
RBMMatrixConnection.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMMatrixConnection.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin
00036 
00041 #include "RBMMatrixConnection.h"
00042 #include <plearn/math/TMat_maths.h>
00043 
00044 namespace PLearn {
00045 using namespace std;
00046 
00047 PLEARN_IMPLEMENT_OBJECT(
00048     RBMMatrixConnection,
00049     "Stores and learns the parameters between two linear layers of an RBM",
00050     "");
00051 
00052 RBMMatrixConnection::RBMMatrixConnection( real the_learning_rate ) :
00053     inherited(the_learning_rate),
00054     gibbs_ma_increment(0.1),
00055     gibbs_initial_ma_coefficient(0.1),
00056     L1_penalty_factor(0),
00057     L2_penalty_factor(0),
00058     L2_decrease_constant(0),
00059     L2_shift(100),
00060     L2_decrease_type("one_over_t"),
00061     L2_n_updates(0)
00062 {
00063 }
00064 
00065 void RBMMatrixConnection::declareOptions(OptionList& ol)
00066 {
00067     declareOption(ol, "weights", &RBMMatrixConnection::weights,
00068                   OptionBase::learntoption,
00069                   "Matrix containing unit-to-unit weights (up_size  x"
00070                   " down_size)");
00071 
00072     declareOption(ol, "gibbs_ma_schedule", &RBMMatrixConnection::gibbs_ma_schedule,
00073                   OptionBase::buildoption,
00074                   "Each element of this vector is a number of updates after which\n"
00075                   "the moving average coefficient is incremented (by incrementing\n"
00076                   "its inverse sigmoid by gibbs_ma_increment). After the last\n"
00077                   "increase has been made, the moving average coefficient stays constant.\n");
00078 
00079     declareOption(ol, "gibbs_ma_increment",
00080                   &RBMMatrixConnection::gibbs_ma_increment,
00081                   OptionBase::buildoption,
00082                   "The increment in the inverse sigmoid of the moving "
00083                   "average coefficient\n"
00084                   "to apply after the number of updates reaches an element "
00085                   "of the gibbs_ma_schedule.\n");
00086 
00087     declareOption(ol, "gibbs_initial_ma_coefficient",
00088                   &RBMMatrixConnection::gibbs_initial_ma_coefficient,
00089                   OptionBase::buildoption,
00090                   "Initial moving average coefficient for the negative phase "
00091                   "statistics in the Gibbs chain.\n");
00092 
00093     declareOption(ol, "L1_penalty_factor",
00094                   &RBMMatrixConnection::L1_penalty_factor,
00095                   OptionBase::buildoption,
00096                   "Optional (default=0) factor of L1 regularization term, i.e.\n"
00097                   "minimize L1_penalty_factor * sum_{ij} |weights(i,j)| "
00098                   "during training.\n");
00099 
00100     declareOption(ol, "L2_penalty_factor",
00101                   &RBMMatrixConnection::L2_penalty_factor,
00102                   OptionBase::buildoption,
00103                   "Optional (default=0) factor of L2 regularization term, i.e.\n"
00104                   "minimize 0.5 * L2_penalty_factor * sum_{ij} weights(i,j)^2 "
00105                   "during training.\n");
00106 
00107     declareOption(ol, "L2_decrease_constant",
00108                   &RBMMatrixConnection::L2_decrease_constant,
00109                   OptionBase::buildoption,
00110         "Parameter of the L2 penalty decrease (see L2_decrease_type).",
00111         OptionBase::advanced_level);
00112 
00113     declareOption(ol, "L2_shift",
00114                   &RBMMatrixConnection::L2_shift,
00115                   OptionBase::buildoption,
00116         "Parameter of the L2 penalty decrease (see L2_decrease_type).",
00117         OptionBase::advanced_level);
00118 
00119     declareOption(ol, "L2_decrease_type",
00120                   &RBMMatrixConnection::L2_decrease_type,
00121                   OptionBase::buildoption,
00122         "The kind of L2 decrease that is being applied. The decrease\n"
00123         "consists in scaling the L2 penalty by a factor that depends on the\n"
00124         "number 't' of times this penalty has been used to modify the\n"
00125         "weights of the connection. It can be one of:\n"
00126         " - 'one_over_t': 1 / (1 + t * L2_decrease_constant)\n"
00127         " - 'sigmoid_like': sigmoid((L2_shift - t) * L2_decrease_constant)",
00128         OptionBase::advanced_level);
00129 
00130     declareOption(ol, "L2_n_updates",
00131                   &RBMMatrixConnection::L2_n_updates,
00132                   OptionBase::learntoption,
00133         "Number of times that weights have been changed by the L2 penalty\n"
00134         "update rule.");
00135 
00136 
00137 
00138     // Now call the parent class' declareOptions
00139     inherited::declareOptions(ol);
00140 }
00141 
00142 void RBMMatrixConnection::build_()
00143 {
00144     if( up_size <= 0 || down_size <= 0 )
00145         return;
00146 
00147     bool needs_forget = false; // do we need to reinitialize the parameters?
00148 
00149     if( weights.length() != up_size ||
00150         weights.width() != down_size )
00151     {
00152         weights.resize( up_size, down_size );
00153         needs_forget = true;
00154     }
00155 
00156     weights_pos_stats.resize( up_size, down_size );
00157     weights_neg_stats.resize( up_size, down_size );
00158 
00159     if( momentum != 0. )
00160         weights_inc.resize( up_size, down_size );
00161 
00162     if( needs_forget )
00163         forget();
00164 
00165     clearStats();
00166 }
00167 
00168 void RBMMatrixConnection::build()
00169 {
00170     inherited::build();
00171     build_();
00172 }
00173 
00174 
00175 void RBMMatrixConnection::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00176 {
00177     inherited::makeDeepCopyFromShallowCopy(copies);
00178 
00179     deepCopyField(weights, copies);
00180     deepCopyField(weights_pos_stats, copies);
00181     deepCopyField(weights_neg_stats, copies);
00182     deepCopyField(weights_inc, copies);
00183 }
00184 
00185 void RBMMatrixConnection::accumulatePosStats( const Vec& down_values,
00186                                               const Vec& up_values )
00187 {
00188     // weights_pos_stats += up_values * down_values'
00189     externalProductAcc( weights_pos_stats, up_values, down_values );
00190 
00191     pos_count++;
00192 }
00193 
00194 void RBMMatrixConnection::accumulatePosStats( const Mat& down_values,
00195                                               const Mat& up_values )
00196 {
00197     int mbs=down_values.length();
00198     PLASSERT(up_values.length()==mbs);
00199     // weights_pos_stats += up_values * down_values'
00200     transposeProductAcc(weights_pos_stats, up_values, down_values);
00201     pos_count+=mbs;
00202 }
00203 
00205 // accumulateNegStats //
00207 void RBMMatrixConnection::accumulateNegStats( const Vec& down_values,
00208                                               const Vec& up_values )
00209 {
00210     // weights_neg_stats += up_values * down_values'
00211     externalProductAcc( weights_neg_stats, up_values, down_values );
00212 
00213     neg_count++;
00214 }
00215 
00216 void RBMMatrixConnection::accumulateNegStats( const Mat& down_values,
00217                                               const Mat& up_values )
00218 {
00219     int mbs=down_values.length();
00220     PLASSERT(up_values.length()==mbs);
00221     // weights_neg_stats += up_values * down_values'
00222     transposeProductAcc(weights_neg_stats, up_values, down_values);
00223     neg_count+=mbs;
00224 }
00225 
00227 // update //
00229 void RBMMatrixConnection::update()
00230 {
00231     // updates parameters
00232     //weights += learning_rate * (weights_pos_stats/pos_count
00233     //                              - weights_neg_stats/neg_count)
00234     real pos_factor = learning_rate / pos_count;
00235     real neg_factor = -learning_rate / neg_count;
00236 
00237     int l = weights.length();
00238     int w = weights.width();
00239 
00240     real* w_i = weights.data();
00241     real* wps_i = weights_pos_stats.data();
00242     real* wns_i = weights_neg_stats.data();
00243     int w_mod = weights.mod();
00244     int wps_mod = weights_pos_stats.mod();
00245     int wns_mod = weights_neg_stats.mod();
00246 
00247     if( momentum == 0. )
00248     {
00249         // no need to use weights_inc
00250         for( int i=0 ; i<l ; i++, w_i+=w_mod, wps_i+=wps_mod, wns_i+=wns_mod )
00251             for( int j=0 ; j<w ; j++ )
00252                 w_i[j] += pos_factor * wps_i[j] + neg_factor * wns_i[j];
00253     }
00254     else
00255     {
00256         // ensure that weights_inc has the right size
00257         weights_inc.resize( l, w );
00258 
00259         // The update rule becomes:
00260         // weights_inc = momentum * weights_inc
00261         //               - learning_rate * (weights_pos_stats/pos_count
00262         //                                  - weights_neg_stats/neg_count);
00263         // weights += weights_inc;
00264         real* winc_i = weights_inc.data();
00265         int winc_mod = weights_inc.mod();
00266         for( int i=0 ; i<l ; i++, w_i += w_mod, wps_i += wps_mod,
00267                              wns_i += wns_mod, winc_i += winc_mod )
00268             for( int j=0 ; j<w ; j++ )
00269             {
00270                 winc_i[j] = momentum * winc_i[j]
00271                     + pos_factor * wps_i[j] + neg_factor * wns_i[j];
00272                 w_i[j] += winc_i[j];
00273             }
00274     }
00275 
00276     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0))
00277         applyWeightPenalty();
00278 
00279     clearStats();
00280 }
00281 
00282 // Instead of using the statistics, we assume we have only one markov chain
00283 // runned and we update the parameters from the first 4 values of the chain
00284 void RBMMatrixConnection::update( const Vec& pos_down_values, // v_0
00285                                   const Vec& pos_up_values,   // h_0
00286                                   const Vec& neg_down_values, // v_1
00287                                   const Vec& neg_up_values )  // h_1
00288 {
00289     // weights += learning_rate * ( h_0 v_0' - h_1 v_1' );
00290     // or:
00291     // weights[i][j] += learning_rate * (h_0[i] v_0[j] - h_1[i] v_1[j]);
00292 
00293     int l = weights.length();
00294     int w = weights.width();
00295     PLASSERT( pos_up_values.length() == l );
00296     PLASSERT( neg_up_values.length() == l );
00297     PLASSERT( pos_down_values.length() == w );
00298     PLASSERT( neg_down_values.length() == w );
00299 
00300     real* w_i = weights.data();
00301     real* puv_i = pos_up_values.data();
00302     real* nuv_i = neg_up_values.data();
00303     real* pdv = pos_down_values.data();
00304     real* ndv = neg_down_values.data();
00305     int w_mod = weights.mod();
00306 
00307     if( momentum == 0. )
00308     {
00309         for( int i=0 ; i<l ; i++, w_i += w_mod, puv_i++, nuv_i++ )
00310             for( int j=0 ; j<w ; j++ )
00311                 w_i[j] += learning_rate * (*puv_i * pdv[j] - *nuv_i * ndv[j]);
00312     }
00313     else
00314     {
00315         // ensure that weights_inc has the right size
00316         weights_inc.resize( l, w );
00317 
00318         // The update rule becomes:
00319         // weights_inc = momentum * weights_inc
00320         //               - learning_rate * ( h_0 v_0' - h_1 v_1' );
00321         // weights += weights_inc;
00322 
00323         real* winc_i = weights_inc.data();
00324         int winc_mod = weights_inc.mod();
00325         for( int i=0 ; i<l ; i++, w_i += w_mod, winc_i += winc_mod,
00326                              puv_i++, nuv_i++ )
00327             for( int j=0 ; j<w ; j++ )
00328             {
00329                 winc_i[j] = momentum * winc_i[j]
00330                     + learning_rate * (*puv_i * pdv[j] - *nuv_i * ndv[j]);
00331                 w_i[j] += winc_i[j];
00332             }
00333     }
00334 
00335     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0))
00336         applyWeightPenalty();
00337 }
00338 
00339 void RBMMatrixConnection::update( const Mat& pos_down_values, // v_0
00340                                   const Mat& pos_up_values,   // h_0
00341                                   const Mat& neg_down_values, // v_1
00342                                   const Mat& neg_up_values )  // h_1
00343 {
00344     // weights += learning_rate * ( h_0 v_0' - h_1 v_1' );
00345     // or:
00346     // weights[i][j] += learning_rate * (h_0[i] v_0[j] - h_1[i] v_1[j]);
00347 
00348     PLASSERT( pos_up_values.width() == weights.length() );
00349     PLASSERT( neg_up_values.width() == weights.length() );
00350     PLASSERT( pos_down_values.width() == weights.width() );
00351     PLASSERT( neg_down_values.width() == weights.width() );
00352 
00353     if( momentum == 0. )
00354     {
00355         // We use the average gradient over a mini-batch.
00356         real avg_lr = learning_rate / pos_down_values.length();
00357 
00358         transposeProductScaleAcc(weights, pos_up_values, pos_down_values,
00359                                  avg_lr, real(1));
00360 
00361         transposeProductScaleAcc(weights, neg_up_values, neg_down_values,
00362                                  -avg_lr, real(1));
00363     }
00364     else
00365     {
00366         PLERROR("RBMMatrixConnection::update minibatch with momentum - Not implemented");
00367         /*
00368         // ensure that weights_inc has the right size
00369         weights_inc.resize( l, w );
00370 
00371         // The update rule becomes:
00372         // weights_inc = momentum * weights_inc
00373         //               + learning_rate * ( h_0 v_0' - h_1 v_1' );
00374         // weights += weights_inc;
00375 
00376         real* winc_i = weights_inc.data();
00377         int winc_mod = weights_inc.mod();
00378         for( int i=0 ; i<l ; i++, w_i += w_mod, winc_i += winc_mod,
00379                              puv_i++, nuv_i++ )
00380             for( int j=0 ; j<w ; j++ )
00381             {
00382                 winc_i[j] = momentum * winc_i[j]
00383                     + learning_rate * (*puv_i * pdv[j] - *nuv_i * ndv[j]);
00384                 w_i[j] += winc_i[j];
00385             }
00386          */
00387     }
00388 
00389     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0))
00390         applyWeightPenalty();
00391 }
00392 
00393 
00394 void RBMMatrixConnection::updateCDandGibbs( const Mat& pos_down_values,
00395                                             const Mat& pos_up_values,
00396                                             const Mat& cd_neg_down_values,
00397                                             const Mat& cd_neg_up_values,
00398                                             const Mat& gibbs_neg_down_values,
00399                                             const Mat& gibbs_neg_up_values,
00400                                             real background_gibbs_update_ratio)
00401 {
00402     real normalize_factor = 1.0/pos_down_values.length();
00403     // neg_stats <-- gibbs_chain_statistics_forgetting_factor * neg_stats
00404     //              +(1-gibbs_chain_statistics_forgetting_factor)
00405     //               * gibbs_neg_up_values'*gibbs_neg_down_values/minibatch_size
00406     if (neg_count==0)
00407         transposeProductScaleAcc(weights_neg_stats, gibbs_neg_up_values,
00408                                  gibbs_neg_down_values,
00409                                  normalize_factor, real(0));
00410     else
00411         transposeProductScaleAcc(weights_neg_stats,
00412                                  gibbs_neg_up_values,
00413                                  gibbs_neg_down_values,
00414                                  normalize_factor*(1-gibbs_ma_coefficient),
00415                                  gibbs_ma_coefficient);
00416     neg_count++;
00417 
00418     // delta w = lrate * ( pos_up_values'*pos_down_values
00419     //                   - ( background_gibbs_update_ratio*neg_stats
00420     //                      +(1-background_gibbs_update_ratio)
00421     //                       * cd_neg_up_values'*cd_neg_down_values/minibatch_size))
00422     transposeProductScaleAcc(weights, pos_up_values, pos_down_values,
00423                              learning_rate*normalize_factor, real(1));
00424     multiplyAcc(weights, weights_neg_stats,
00425                 -learning_rate*background_gibbs_update_ratio);
00426     transposeProductScaleAcc(weights, cd_neg_up_values, cd_neg_down_values,
00427         -learning_rate*(1-background_gibbs_update_ratio)*normalize_factor,
00428         real(1));
00429 
00430     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0))
00431         applyWeightPenalty();
00432 }
00433 
00434 void RBMMatrixConnection::updateGibbs( const Mat& pos_down_values,
00435                                        const Mat& pos_up_values,
00436                                        const Mat& gibbs_neg_down_values,
00437                                        const Mat& gibbs_neg_up_values)
00438 {
00439     int minibatch_size = pos_down_values.length();
00440     real normalize_factor = 1.0/minibatch_size;
00441     // neg_stats <-- gibbs_chain_statistics_forgetting_factor * neg_stats
00442     //              +(1-gibbs_chain_statistics_forgetting_factor)
00443     //               * gibbs_neg_up_values'*gibbs_neg_down_values
00444     static Mat tmp;
00445     tmp.resize(weights.length(),weights.width());
00446     transposeProduct(tmp, gibbs_neg_up_values, gibbs_neg_down_values);
00447 
00448     if (neg_count==0)
00449         multiply(weights_neg_stats,tmp,normalize_factor);
00450     else
00451         multiplyScaledAdd(tmp,gibbs_ma_coefficient,
00452                           normalize_factor*(1-gibbs_ma_coefficient),
00453                           weights_neg_stats);
00454 
00455     neg_count++;
00456 
00457     bool increase_ma=false;
00458     for (int i=0;i<gibbs_ma_schedule.length();i++)
00459         if (gibbs_ma_schedule[i]==neg_count*minibatch_size)
00460         {
00461             increase_ma=true;
00462             break;
00463         }
00464     if (increase_ma)
00465     {
00466         gibbs_ma_coefficient = sigmoid(gibbs_ma_increment + inverse_sigmoid(gibbs_ma_coefficient));
00467         cout << "new coefficient = " << gibbs_ma_coefficient << " at example " << neg_count*minibatch_size << endl;
00468     }
00469 
00470     // delta w = lrate * ( pos_up_values'*pos_down_values/minibatch_size - neg_stats )
00471     transposeProductScaleAcc(weights, pos_up_values, pos_down_values,
00472                              learning_rate*normalize_factor, real(1));
00473     multiplyAcc(weights, weights_neg_stats, -learning_rate);
00474 
00475     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0))
00476         applyWeightPenalty();
00477 }
00478 
00480 // clearStats //
00482 void RBMMatrixConnection::clearStats()
00483 {
00484     weights_pos_stats.clear();
00485     weights_neg_stats.clear();
00486 
00487     pos_count = 0;
00488     neg_count = 0;
00489 
00490     gibbs_ma_coefficient = gibbs_initial_ma_coefficient;
00491 }
00492 
00494 // computeProduct //
00496 void RBMMatrixConnection::computeProduct( int start, int length,
00497                                           const Vec& activations,
00498                                           bool accumulate ) const
00499 {
00500     PLASSERT( activations.length() == length );
00501     if( going_up )
00502     {
00503         PLASSERT( start+length <= up_size );
00504         // activations[i-start] += sum_j weights(i,j) input_vec[j]
00505 
00506         if( accumulate )
00507             productAcc( activations,
00508                         weights.subMatRows(start,length),
00509                         input_vec );
00510         else
00511             product( activations,
00512                      weights.subMatRows(start,length),
00513                      input_vec );
00514     }
00515     else
00516     {
00517         PLASSERT( start+length <= down_size );
00518         // activations[i-start] += sum_j weights(j,i) input_vec[j]
00519         if( accumulate )
00520             transposeProductAcc( activations,
00521                                  weights.subMatColumns(start,length),
00522                                  input_vec );
00523         else
00524             transposeProduct( activations,
00525                               weights.subMatColumns(start,length),
00526                               input_vec );
00527     }
00528 }
00529 
00531 // computeProducts //
00533 void RBMMatrixConnection::computeProducts(int start, int length,
00534                                           Mat& activations,
00535                                           bool accumulate ) const
00536 {
00537     PLASSERT( activations.width() == length );
00538     activations.resize(inputs_mat.length(), length);
00539     if( going_up )
00540     {
00541         PLASSERT( start+length <= up_size );
00542         // activations(k, i-start) += sum_j weights(i,j) inputs_mat(k, j)
00543 
00544         if( accumulate )
00545             productTransposeAcc(activations,
00546                     inputs_mat,
00547                     weights.subMatRows(start,length));
00548         else
00549             productTranspose(activations,
00550                     inputs_mat,
00551                     weights.subMatRows(start,length));
00552     }
00553     else
00554     {
00555         PLASSERT( start+length <= down_size );
00556         // activations(k, i-start) += sum_j weights(j,i) inputs_mat(k, j)
00557         if( accumulate )
00558             productAcc(activations,
00559                     inputs_mat,
00560                     weights.subMatColumns(start,length) );
00561         else
00562             product(activations,
00563                     inputs_mat,
00564                     weights.subMatColumns(start,length) );
00565     }
00566 }
00567 
00569 // fprop //
00571 void RBMMatrixConnection::fprop(const Vec& input, const Mat& rbm_weights,
00572                           Vec& output) const
00573 {
00574     product( output, rbm_weights, input );
00575 }
00576 
00578 // getAllWeights //
00580 void RBMMatrixConnection::getAllWeights(Mat& rbm_weights) const
00581 {
00582     rbm_weights = weights;
00583 }
00584 
00586 // setAllWeights //
00588 void RBMMatrixConnection::setAllWeights(const Mat& rbm_weights)
00589 {
00590     weights = rbm_weights;
00591 }
00592 
00594 // bpropUpdate //
00596 void RBMMatrixConnection::bpropUpdate(const Vec& input, const Vec& output,
00597                                       Vec& input_gradient,
00598                                       const Vec& output_gradient,
00599                                       bool accumulate)
00600 {
00601     PLASSERT( input.size() == down_size );
00602     PLASSERT( output.size() == up_size );
00603     PLASSERT( output_gradient.size() == up_size );
00604 
00605     if( accumulate )
00606     {
00607         PLASSERT_MSG( input_gradient.size() == down_size,
00608                       "Cannot resize input_gradient AND accumulate into it" );
00609 
00610         // input_gradient += weights' * output_gradient
00611         transposeProductAcc( input_gradient, weights, output_gradient );
00612     }
00613     else
00614     {
00615         input_gradient.resize( down_size );
00616 
00617         // input_gradient = weights' * output_gradient
00618         transposeProduct( input_gradient, weights, output_gradient );
00619     }
00620 
00621     // weights -= learning_rate * output_gradient * input'
00622     externalProductScaleAcc( weights, output_gradient, input, -learning_rate );
00623 
00624     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0))
00625         applyWeightPenalty();
00626 }
00627 
00628 void RBMMatrixConnection::bpropUpdate(const Mat& inputs, const Mat& outputs,
00629                                       Mat& input_gradients,
00630                                       const Mat& output_gradients,
00631                                       bool accumulate)
00632 {
00633     PLASSERT( inputs.width() == down_size );
00634     PLASSERT( outputs.width() == up_size );
00635     PLASSERT( output_gradients.width() == up_size );
00636 
00637     if( accumulate )
00638     {
00639         PLASSERT_MSG( input_gradients.width() == down_size &&
00640                       input_gradients.length() == inputs.length(),
00641                       "Cannot resize input_gradients and accumulate into it" );
00642 
00643         // input_gradients += output_gradient * weights
00644         productAcc(input_gradients, output_gradients, weights);
00645     }
00646     else
00647     {
00648         input_gradients.resize(inputs.length(), down_size);
00649         // input_gradients = output_gradient * weights
00650         product(input_gradients, output_gradients, weights);
00651     }
00652 
00653     // weights -= learning_rate/n * output_gradients' * inputs
00654     transposeProductScaleAcc(weights, output_gradients, inputs,
00655                              -learning_rate / inputs.length(), real(1));
00656 
00657     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0))
00658         applyWeightPenalty();
00659 }
00660 
00661 void RBMMatrixConnection::petiteCulotteOlivierUpdate(
00662     const Vec& input, const Mat& rbm_weights,
00663     const Vec& output,
00664     Vec& input_gradient,
00665     Mat& rbm_weights_gradient,
00666     const Vec& output_gradient,
00667     bool accumulate)
00668 {
00669     PLASSERT( input.size() == down_size );
00670     PLASSERT( output.size() == up_size );
00671     PLASSERT( output_gradient.size() == up_size );
00672 
00673     if( accumulate )
00674     {
00675         PLASSERT_MSG( input_gradient.size() == down_size,
00676                       "Cannot resize input_gradient AND accumulate into it" );
00677 
00678         // input_gradient += rbm_weights' * output_gradient
00679         transposeProductAcc( input_gradient, rbm_weights, output_gradient );
00680 
00681         // rbm_weights_gradient += output_gradient' * input
00682         externalProductAcc( rbm_weights_gradient, output_gradient,
00683                             input);
00684 
00685     }
00686     else
00687     {
00688         input_gradient.resize( down_size );
00689 
00690         // input_gradient = rbm_weights' * output_gradient
00691         transposeProduct( input_gradient, rbm_weights, output_gradient );
00692 
00693         // rbm_weights_gradient = output_gradient' * input
00694         externalProduct( rbm_weights_gradient, output_gradient,
00695                          input);
00696     }
00697 
00698     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0))
00699         addWeightPenalty(rbm_weights, rbm_weights_gradient);
00700 }
00701 
00702 
00704 // bpropAccUpdate //
00706 void RBMMatrixConnection::bpropAccUpdate(const TVec<Mat*>& ports_value,
00707                                          const TVec<Mat*>& ports_gradient)
00708 {
00709     //TODO: add weights as port?
00710     PLASSERT( ports_value.length() == nPorts()
00711               && ports_gradient.length() == nPorts() );
00712 
00713     Mat* down = ports_value[0];
00714     Mat* up = ports_value[1];
00715     Mat* down_grad = ports_gradient[0];
00716     Mat* up_grad = ports_gradient[1];
00717 
00718     PLASSERT( down && !down->isEmpty() );
00719     PLASSERT( up && !up->isEmpty() );
00720 
00721     int batch_size = down->length();
00722     PLASSERT( up->length() == batch_size );
00723 
00724     // If we have up_grad
00725     if( up_grad && !up_grad->isEmpty() )
00726     {
00727         // down_grad should not be provided
00728         PLASSERT( !down_grad || down_grad->isEmpty() );
00729         PLASSERT( up_grad->length() == batch_size );
00730         PLASSERT( up_grad->width() == up_size );
00731 
00732         // If we want down_grad
00733         if( down_grad && down_grad->isEmpty() )
00734         {
00735             PLASSERT( down_grad->width() == down_size );
00736             down_grad->resize(batch_size, down_size);
00737 
00738             // down_grad = up_grad * weights
00739             product(*down_grad, *up_grad, weights);
00740         }
00741 
00742         // weights -= learning_rate/n * up_grad' * down
00743         transposeProductScaleAcc(weights, *up_grad, *down,
00744                                  -learning_rate/batch_size, real(1));
00745     }
00746     else if( down_grad && !down_grad->isEmpty() )
00747     {
00748         PLASSERT( down_grad->length() == batch_size );
00749         PLASSERT( down_grad->width() == down_size );
00750 
00751         // If we wand up_grad
00752         if( up_grad && up_grad->isEmpty() )
00753         {
00754             PLASSERT( up_grad->width() == up_size );
00755             up_grad->resize(batch_size, up_size);
00756 
00757             // up_grad = down_grad * weights'
00758             productTranspose(*up_grad, *down_grad, weights);
00759         }
00760 
00761         // weights = -learning_rate/n * up' * down_grad
00762         transposeProductScaleAcc(weights, *up, *down_grad,
00763                                  -learning_rate/batch_size, real(1));
00764     }
00765     else
00766         PLCHECK_MSG( false,
00767                      "Unknown port configuration" );
00768 
00769     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0))
00770         applyWeightPenalty();
00771 }
00772 
00773 
00775 // bpropCD //
00777 void RBMMatrixConnection::petiteCulotteOlivierCD(Mat& weights_gradient,
00778                                                  bool accumulate)
00779 {
00780     int l = weights_gradient.length();
00781     int w = weights_gradient.width();
00782 
00783     real* w_i = weights_gradient.data();
00784     real* wps_i = weights_pos_stats.data();
00785     real* wns_i = weights_neg_stats.data();
00786     int w_mod = weights_gradient.mod();
00787     int wps_mod = weights_pos_stats.mod();
00788     int wns_mod = weights_neg_stats.mod();
00789 
00790     if(accumulate)
00791     {
00792         for( int i=0 ; i<l ; i++, w_i+=w_mod, wps_i+=wps_mod, wns_i+=wns_mod )
00793             for( int j=0 ; j<w ; j++ )
00794                 w_i[j] += wns_i[j]/pos_count - wps_i[j]/neg_count;
00795     }
00796     else
00797     {
00798         for( int i=0 ; i<l ; i++, w_i+=w_mod, wps_i+=wps_mod, wns_i+=wns_mod )
00799             for( int j=0 ; j<w ; j++ )
00800                 w_i[j] = wns_i[j]/pos_count - wps_i[j]/neg_count;
00801     }
00802 
00803     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0))
00804         addWeightPenalty(weights, weights_gradient);
00805 }
00806 
00807 // Instead of using the statistics, we assume we have only one markov chain
00808 // runned and we update the parameters from the first 4 values of the chain
00809 void RBMMatrixConnection::petiteCulotteOlivierCD(
00810     const Vec& pos_down_values, // v_0
00811     const Vec& pos_up_values,   // h_0
00812     const Vec& neg_down_values, // v_1
00813     const Vec& neg_up_values, // h_1
00814     Mat& weights_gradient,
00815     bool accumulate)
00816 {
00817     int l = weights.length();
00818     int w = weights.width();
00819     PLASSERT( pos_up_values.length() == l );
00820     PLASSERT( neg_up_values.length() == l );
00821     PLASSERT( pos_down_values.length() == w );
00822     PLASSERT( neg_down_values.length() == w );
00823 
00824     real* w_i = weights_gradient.data();
00825     real* puv_i = pos_up_values.data();
00826     real* nuv_i = neg_up_values.data();
00827     real* pdv = pos_down_values.data();
00828     real* ndv = neg_down_values.data();
00829     int w_mod = weights_gradient.mod();
00830 
00831     if(accumulate)
00832     {
00833         for( int i=0 ; i<l ; i++, w_i += w_mod, puv_i++, nuv_i++ )
00834             for( int j=0 ; j<w ; j++ )
00835                 w_i[j] +=  *nuv_i * ndv[j] - *puv_i * pdv[j] ;
00836     }
00837     else
00838     {
00839         for( int i=0 ; i<l ; i++, w_i += w_mod, puv_i++, nuv_i++ )
00840             for( int j=0 ; j<w ; j++ )
00841                 w_i[j] =  *nuv_i * ndv[j] - *puv_i * pdv[j] ;
00842     }
00843 
00844     if(!fast_exact_is_equal(L1_penalty_factor,0) || !fast_exact_is_equal(L2_penalty_factor,0))
00845         addWeightPenalty(weights, weights_gradient);
00846 }
00847 
00849 // applyWeightPenalty //
00851 void RBMMatrixConnection::applyWeightPenalty()
00852 {
00853     // Apply penalty (decay) on weights.
00854     real delta_L1 = learning_rate * L1_penalty_factor;
00855     real delta_L2 = learning_rate * L2_penalty_factor;
00856     if (L2_decrease_type == "one_over_t")
00857         delta_L2 /= (1 + L2_decrease_constant * L2_n_updates);
00858     else if (L2_decrease_type == "sigmoid_like")
00859         delta_L2 *= sigmoid((L2_shift - L2_n_updates) * L2_decrease_constant);
00860     else
00861         PLERROR("In RBMMatrixConnection::applyWeightPenalty - Invalid value "
00862                 "for L2_decrease_type: %s", L2_decrease_type.c_str());
00863     for( int i=0; i<up_size; i++)
00864     {
00865         real* w_ = weights[i];
00866         for( int j=0; j<down_size; j++ )
00867         {
00868             if( delta_L2 != 0. )
00869                 w_[j] *= (1 - delta_L2);
00870 
00871             if( delta_L1 != 0. )
00872             {
00873                 if( w_[j] > delta_L1 )
00874                     w_[j] -= delta_L1;
00875                 else if( w_[j] < -delta_L1 )
00876                     w_[j] += delta_L1;
00877                 else
00878                     w_[j] = 0.;
00879             }
00880         }
00881     }
00882     if (delta_L2 > 0)
00883         L2_n_updates++;
00884 }
00885 
00887 // addWeightPenalty //
00889 void RBMMatrixConnection::addWeightPenalty(Mat weights, Mat weight_gradients)
00890 {
00891     // Add penalty (decay) gradient.
00892     real delta_L1 = L1_penalty_factor;
00893     real delta_L2 = L2_penalty_factor;
00894     PLASSERT_MSG( is_equal(L2_decrease_constant, 0) && is_equal(L2_shift, 100),
00895                   "L2 decrease not implemented in this method" );
00896     for( int i=0; i<weights.length(); i++)
00897     {
00898         real* w_ = weights[i];
00899         real* gw_ = weight_gradients[i];
00900         for( int j=0; j<weights.width(); j++ )
00901         {
00902             if( delta_L2 != 0. )
00903                 gw_[j] += delta_L2*w_[j];
00904 
00905             if( delta_L1 != 0. )
00906             {
00907                 if( w_[j] > 0 )
00908                     gw_[j] += delta_L1;
00909                 else if( w_[j] < 0 )
00910                     gw_[j] -= delta_L1;
00911             }
00912         }
00913     }
00914 }
00915 
00917 // forget //
00919 // Reset the parameters to the state they would be BEFORE starting training.
00920 // Note that this method is necessarily called from build().
00921 void RBMMatrixConnection::forget()
00922 {
00923     clearStats();
00924     if( initialization_method == "zero" )
00925         weights.clear();
00926     else
00927     {
00928         if( !random_gen )
00929         {
00930             PLWARNING( "RBMMatrixConnection: cannot forget() without"
00931                        " random_gen" );
00932             return;
00933         }
00934 
00935         //random_gen->manual_seed(1827);
00936 
00937         real d = 1. / max( down_size, up_size );
00938         if( initialization_method == "uniform_sqrt" )
00939             d = sqrt( d );
00940 
00941         random_gen->fill_random_uniform( weights, -d, d );
00942     }
00943     L2_n_updates = 0;
00944 }
00945 
00946 
00947 /* THIS METHOD IS OPTIONAL
00952 void RBMMatrixConnection::finalize()
00953 {
00954 }
00955 */
00956 
00958 int RBMMatrixConnection::nParameters() const
00959 {
00960     return weights.size();
00961 }
00962 
00968 Vec RBMMatrixConnection::makeParametersPointHere(const Vec& global_parameters)
00969 {
00970     int n=weights.size();
00971     int m = global_parameters.size();
00972     if (m<n)
00973         PLERROR("RBMMatrixConnection::makeParametersPointHere: argument has length %d, should be longer than nParameters()=%d",m,n);
00974     real* p = global_parameters.data();
00975     weights.makeSharedValue(p,n);
00976 
00977     return global_parameters.subVec(n,m-n);
00978 }
00979 
00980 
00981 
00982 } // end of namespace PLearn
00983 
00984 
00985 /*
00986   Local Variables:
00987   mode:c++
00988   c-basic-offset:4
00989   c-file-style:"stroustrup"
00990   c-file-offsets:((innamespace . 0)(inline-open . 0))
00991   indent-tabs-mode:nil
00992   fill-column:79
00993   End:
00994 */
00995 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines