PLearn 0.1
RBMLayer.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMLayer.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin & Dan Popovici
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin & Dan Popovici
00036 
00041 #include "RBMLayer.h"
00042 #include <plearn/math/TMat_maths.h>
00043 #include <plearn/math/PRandom.h>
00044 #include "RBMConnection.h"
00045 
00046 namespace PLearn {
00047 using namespace std;
00048 
00049 PLEARN_IMPLEMENT_ABSTRACT_OBJECT(
00050     RBMLayer,
00051     "Virtual class for a layer of an RBM",
00052     "");
00053 
00054 RBMLayer::RBMLayer( real the_learning_rate ) :
00055     learning_rate(the_learning_rate),
00056     momentum(0.),
00057     size(-1),
00058     bias_decay_type("none"),
00059     bias_decay_parameter(0),
00060     gibbs_ma_increment(0.1),
00061     gibbs_initial_ma_coefficient(0.1),
00062     batch_size(0),
00063     expectation_is_up_to_date(false),
00064     expectations_are_up_to_date(false),
00065     pos_count(0),
00066     neg_count(0)
00067 {
00068 }
00069 
00070 void RBMLayer::reset()
00071 {
00072     activation.clear();
00073     sample.clear();
00074     expectation.clear();
00075     bias_inc.clear();
00076     expectation_is_up_to_date = false;
00077     expectations_are_up_to_date = false;
00078 }
00079 
00080 void RBMLayer::clearStats()
00081 {
00082     bias_pos_stats.clear();
00083     bias_neg_stats.clear();
00084     pos_count = 0;
00085     neg_count = 0;
00086     gibbs_ma_coefficient = gibbs_initial_ma_coefficient;
00087 }
00088 
00089 void RBMLayer::forget()
00090 {
00091     bias.clear();
00092     reset();
00093     clearStats();
00094 }
00095 
00096 void RBMLayer::declareOptions(OptionList& ol)
00097 {
00098     declareOption(ol, "units_types", &RBMLayer::units_types,
00099                   OptionBase::nosave,
00100                   "Obsolete option.");
00101 
00102     declareOption(ol, "size", &RBMLayer::size,
00103                   OptionBase::buildoption,
00104                   "Number of units.");
00105 
00106     declareOption(ol, "learning_rate", &RBMLayer::learning_rate,
00107                   OptionBase::buildoption,
00108                   "Learning rate.");
00109 
00110     declareOption(ol, "momentum", &RBMLayer::momentum,
00111                   OptionBase::buildoption,
00112                   "Momentum.");
00113 
00114     declareOption(ol, "bias_decay_type", &RBMLayer::bias_decay_type,
00115                   OptionBase::buildoption,
00116                   "Bias decay type:\n"
00117                   " - none: no decay applied\n"
00118                   " - negative: pushes the biases towards -\\infty\n"
00119                   " - l2: applies an l2 penalty");
00120 
00121     declareOption(ol, "bias_decay_parameter", &RBMLayer::bias_decay_parameter,
00122                   OptionBase::buildoption,
00123                   "Bias decay parameter.");
00124 
00125     declareOption(ol, "gibbs_ma_schedule", &RBMLayer::gibbs_ma_schedule,
00126                   OptionBase::buildoption,
00127                   "Each element of this vector is a number of updates after which\n"
00128                   "the moving average coefficient is incremented (by incrementing\n"
00129                   "its inverse sigmoid by gibbs_ma_increment). After the last\n"
00130                   "increase has been made, the moving average coefficient stays constant.\n");
00131 
00132     declareOption(ol, "gibbs_ma_increment", &RBMLayer::gibbs_ma_increment,
00133                   OptionBase::buildoption,
00134                   "The increment in the inverse sigmoid of the moving average coefficient\n"
00135                   "to apply after the number of updates reaches an element of the gibbs_ma_schedule.\n");
00136 
00137     declareOption(ol, "gibbs_initial_ma_coefficient", &RBMLayer::gibbs_initial_ma_coefficient,
00138                   OptionBase::buildoption,
00139                   "Initial moving average coefficient for the negative phase statistics in the Gibbs chain.\n");
00140 
00141     declareOption(ol, "bias", &RBMLayer::bias,
00142                   OptionBase::learntoption,
00143                   "Biases of the units.");
00144 
00145     // Now call the parent class' declareOptions
00146     inherited::declareOptions(ol);
00147 
00148     redeclareOption(ol, "input_size", &RBMLayer::input_size,
00149                     OptionBase::learntoption,
00150                     "input_size = size");
00151 
00152     redeclareOption(ol, "output_size", &RBMLayer::output_size,
00153                     OptionBase::learntoption,
00154                     "output_size = size");
00155 }
00156 
00157 void RBMLayer::declareMethods(RemoteMethodMap& rmm)
00158 {
00159     // Make sure that inherited methods are declared
00160     rmm.inherited(inherited::_getRemoteMethodMap_());
00161 
00162     declareMethod(rmm, "setAllBias", &RBMLayer::setAllBias,
00163                   (BodyDoc("Set the biases values"),
00164                    ArgDoc ("bias", "the vector of biases")));
00165 
00166     declareMethod(rmm, "generateSample", &RBMLayer::generateSample,
00167                   (BodyDoc("Generate a sample, and update the sample field")));
00168     declareMethod(rmm, "getAllActivations", &RBMLayer::getAllActivations,
00169                   (BodyDoc("Uses 'rbmc' to obtain the activations of all units in this layer. \n"
00170                            "Unit 0 of this layer corresponds to unit 'offset' of 'rbmc'."),
00171                    ArgDoc("PP<RBMConnection> rbmc", "RBM Connection"),
00172                    ArgDoc("int offset", "Offset"),
00173                    ArgDoc("bool minibatch", "Use minibatch")));
00174     declareMethod(rmm, "computeExpectation", &RBMLayer::computeExpectation,
00175                   (BodyDoc("Compute expectation.")));
00176 }
00177 
00179 // build_ //
00181 void RBMLayer::build_()
00182 {
00183     if( size <= 0 )
00184         return;
00185 
00186     input_size = size;
00187     output_size = size;
00188 
00189     activation.resize( size );
00190     activations.resize( 0, size );
00191     sample.resize( size );
00192     samples.resize( 0, size );
00193     expectation.resize( size );
00194     expectations.resize( 0, size );
00195     expectation_is_up_to_date = false;
00196     expectations_are_up_to_date = false;
00197 
00198     bias.resize( size );
00199     bias_pos_stats.resize( size );
00200     bias_neg_stats.resize( size );
00201 }
00202 
00204 // build //
00206 void RBMLayer::build()
00207 {
00208     inherited::build();
00209     build_();
00210 }
00211 
00212 
00214 // makeDeepCopyFromShallowCopy //
00216 void RBMLayer::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00217 {
00218     inherited::makeDeepCopyFromShallowCopy(copies);
00219 
00220     deepCopyField(gibbs_ma_schedule,    copies);
00221     deepCopyField(bias,                 copies);
00222     deepCopyField(activation,           copies);
00223     deepCopyField(activations,          copies);
00224     deepCopyField(sample,               copies);
00225     deepCopyField(samples,              copies);
00226     deepCopyField(expectation,          copies);
00227     deepCopyField(bias_pos_stats,       copies);
00228     deepCopyField(bias_neg_stats,       copies);
00229     deepCopyField(bias_inc,             copies);
00230     deepCopyField(ones,                 copies);
00231     deepCopyField(expectations,         copies);
00232     deepCopyField(tmp,                  copies);
00233 }
00234 
00235 
00237 // setLearningRate //
00239 void RBMLayer::setLearningRate( real the_learning_rate )
00240 {
00241     learning_rate = the_learning_rate;
00242 }
00243 
00245 // setMomentum //
00247 void RBMLayer::setMomentum( real the_momentum )
00248 {
00249     momentum = the_momentum;
00250 }
00251 
00253 // setBatchSize //
00255 void RBMLayer::setBatchSize( int the_batch_size )
00256 {
00257     batch_size = the_batch_size;
00258     PLASSERT( activations.width() == size );
00259     activations.resize( batch_size, size );
00260     PLASSERT( expectations.width() == size );
00261     expectations.resize( batch_size, size );
00262     PLASSERT( samples.width() == size );
00263     samples.resize( batch_size, size );
00264 }
00265 
00266 
00268 // getUnitActivation //
00270 void RBMLayer::getUnitActivation( int i, PP<RBMConnection> rbmc, int offset )
00271 {
00272     Vec act = activation.subVec(i,1);
00273     rbmc->computeProduct( i+offset, 1, act );
00274     act[0] += bias[i];
00275     expectation_is_up_to_date = false;
00276     expectations_are_up_to_date = false;
00277 }
00278 
00280 // getAllActivations //
00282 void RBMLayer::getAllActivations( PP<RBMConnection> rbmc, int offset,
00283                                   bool minibatch)
00284 {
00285     if (minibatch) {
00286         rbmc->computeProducts( offset, size, activations );
00287         activations += bias;
00288         setBatchSize(activations.length());
00289     } else {
00290         rbmc->computeProduct( offset, size, activation );
00291         activation += bias;
00292     }
00293     expectation_is_up_to_date = false;
00294     expectations_are_up_to_date = false;
00295 }
00296 
00297 void RBMLayer::expectation_is_not_up_to_date()
00298 {
00299     expectation_is_up_to_date = false;
00300 }
00301 
00302 
00304 // getExpectations //
00306 const Mat& RBMLayer::getExpectations() {
00307     return this->expectations;
00308 }
00309 
00311 // fprop //
00313 void RBMLayer::fprop( const Vec& input, Vec& output ) const
00314 {
00315     // Note: inefficient.
00316 
00317     // Yes it's ugly, blame the const plague
00318     RBMLayer* This = const_cast<RBMLayer*>(this);
00319 
00320     PLASSERT( input.size() == This->input_size );
00321     output.resize( This->output_size );
00322 
00323     This->activation << input;
00324     This->activation += bias;
00325     This->expectation_is_up_to_date = false;
00326 
00327     This->computeExpectation();
00328 
00329     output << This->expectation;
00330 }
00331 
00332 void RBMLayer::fprop(const Mat& inputs, Mat& outputs)
00333 {
00334     // Note: inefficient.
00335     PLASSERT( inputs.width() == input_size );
00336     int mbatch_size = inputs.length();
00337     outputs.resize(mbatch_size, output_size);
00338 
00339     setBatchSize(mbatch_size);
00340     activations << inputs;
00341     for (int k = 0; k < mbatch_size; k++)
00342         activations(k) += bias;
00343 
00344     expectations_are_up_to_date = false;
00345     computeExpectations();
00346     outputs << expectations;
00347 }
00348 
00349 void RBMLayer::fprop( const Vec& input, const Vec& rbm_bias,
00350                       Vec& output ) const
00351 {
00352     PLERROR("In RBMLayer::fprop(): not implemented in subclass %s",
00353             this->classname().c_str());
00354 }
00355 
00356 void RBMLayer::bpropUpdate(const Vec& input, const Vec& rbm_bias,
00357                            const Vec& output,
00358                            Vec& input_gradient, Vec& rbm_bias_gradient,
00359                            const Vec& output_gradient)
00360 {
00361     PLERROR("In RBMLayer::bpropUpdate(): not implemented in subclass %s",
00362             this->classname().c_str());
00363 }
00364 
00365 real RBMLayer::fpropNLL(const Vec& target)
00366 {
00367     PLERROR("In RBMLayer::fpropNLL(): not implemented in subclass %s",
00368             this->classname().c_str());
00369     return REAL_MAX;
00370 }
00371 
00372 real RBMLayer::fpropNLL(const Vec& target, const Vec& cost_weights)
00373 {
00374     PLERROR("weighted version of RBMLayer::fpropNLL not implemented in subclass %s",
00375             this->classname().c_str());
00376     return REAL_MAX;
00377 }
00378 
00379 
00380 void RBMLayer::fpropNLL(const Mat& targets, const Mat& costs_column)
00381 {
00382     PLWARNING("batch version of RBMLayer::fpropNLL may not be optimized in subclass %s",
00383               this->classname().c_str());
00384     PLASSERT( targets.width() == input_size );
00385     PLASSERT( targets.length() == batch_size );
00386     PLASSERT( costs_column.width() == 1 );
00387     PLASSERT( costs_column.length() == batch_size );
00388 
00389     Mat tmp;
00390     tmp.resize(1,input_size);
00391     Vec target;
00392     target.resize(input_size);
00393 
00394     computeExpectations();
00395     expectation_is_up_to_date = false;
00396     for (int k=0;k<batch_size;k++) // loop over minibatch
00397     {
00398         selectRows(expectations, TVec<int>(1, k), tmp );
00399         expectation << tmp;
00400         selectRows( activations, TVec<int>(1, k), tmp );
00401         activation << tmp;
00402         selectRows( targets, TVec<int>(1, k), tmp );
00403         target << tmp;
00404         costs_column(k,0) = fpropNLL( target );
00405     }
00406 }
00407 
00408 void RBMLayer::bpropNLL(const Vec& target, real nll, Vec& bias_gradient)
00409 {
00410     PLERROR("In RBMLayer::bpropNLL(): not implemented in subclass %s",
00411             this->classname().c_str());
00412 }
00413 
00414 void RBMLayer::bpropNLL(const Mat& targets,  const Mat& costs_column,
00415                         Mat& bias_gradients)
00416 {
00417     PLERROR("In RBMLayer::bpropNLL(): not implemented in subclass %s",
00418             this->classname().c_str());
00419 }
00420 
00422 // accumulatePosStats //
00424 void RBMLayer::accumulatePosStats( const Vec& pos_values )
00425 {
00426     bias_pos_stats += pos_values;
00427     pos_count++;
00428 }
00429 void RBMLayer::accumulatePosStats( const Mat& pos_values )
00430 {
00431     for (int i=0;i<pos_values.length();i++)
00432         bias_pos_stats += pos_values(i);
00433     pos_count+=pos_values.length();
00434 }
00435 
00437 // accumulateNegStats //
00439 void RBMLayer::accumulateNegStats( const Vec& neg_values )
00440 {
00441     bias_neg_stats += neg_values;
00442     neg_count++;
00443 }
00444 void RBMLayer::accumulateNegStats( const Mat& neg_values )
00445 {
00446     for (int i=0;i<neg_values.length();i++)
00447         bias_neg_stats += neg_values(i);
00448     neg_count+=neg_values.length();
00449 }
00450 
00452 // update //
00454 void RBMLayer::update()
00455 {
00456     // bias += learning_rate * (bias_pos_stats/pos_count
00457     //                          - bias_neg_stats/neg_count)
00458     real pos_factor = learning_rate / pos_count;
00459     real neg_factor = -learning_rate / neg_count;
00460 
00461     real* b = bias.data();
00462     real* bps = bias_pos_stats.data();
00463     real* bns = bias_neg_stats.data();
00464 
00465     if( fast_is_equal( momentum, 0.) )
00466     {
00467         // no need to use bias_inc
00468         for( int i=0 ; i<size ; i++ )
00469             b[i] += pos_factor * bps[i] + neg_factor * bns[i];
00470     }
00471     else
00472     {
00473         // ensure that bias_inc has the right size
00474         bias_inc.resize( size );
00475 
00476         // The update rule becomes:
00477         // bias_inc = momentum * bias_inc
00478         //              + learning_rate * (bias_pos_stats/pos_count
00479         //                                  - bias_neg_stats/neg_count)
00480         // bias += bias_inc
00481         real* binc = bias_inc.data();
00482         for( int i=0 ; i<size ; i++ )
00483         {
00484             binc[i] = momentum*binc[i] + pos_factor*bps[i] + neg_factor*bns[i];
00485             b[i] += binc[i];
00486         }
00487     }
00488 
00489     applyBiasDecay();
00490 
00491     clearStats();
00492 }
00493 
00494 void RBMLayer::update( const Vec& grad )
00495 {
00496     real* b = bias.data();
00497     real* gb = grad.data();
00498     real* binc = momentum==0?0:bias_inc.data();
00499 
00500     for( int i=0 ; i<size ; i++ )
00501     {
00502         if( fast_is_equal( momentum, 0.) )
00503         {
00504             // update the bias: bias -= learning_rate * input_gradient
00505             b[i] -= learning_rate * gb[i];
00506         }
00507         else
00508         {
00509             // The update rule becomes:
00510             // bias_inc = momentum * bias_inc - learning_rate * input_gradient
00511             // bias += bias_inc
00512             binc[i] = momentum * binc[i] - learning_rate * gb[i];
00513             b[i] += binc[i];
00514         }
00515     }
00516 
00517     applyBiasDecay();
00518 }
00519 
00520 void RBMLayer::update( const Mat& grad )
00521 {
00522     int batch_size = grad.length();
00523     real* b = bias.data();
00524     real* binc = momentum==0?0:bias_inc.data();
00525     real avg_lr = learning_rate / (real)batch_size;
00526 
00527     for( int isample=0; isample<batch_size; isample++)
00528         for( int i=0 ; i<size ; i++ )
00529         {
00530             if( fast_is_equal( momentum, 0.) )
00531             {
00532                 // update the bias: bias -= learning_rate * input_gradient
00533                 b[i] -= avg_lr * grad(isample,i);
00534             }
00535             else
00536             {
00537                 // The update rule becomes:
00538                 // bias_inc = momentum * bias_inc - learning_rate * input_gradient
00539                 // bias += bias_inc
00540                 binc[i] = momentum * binc[i] - avg_lr * grad(isample,i);
00541                 b[i] += binc[i];
00542             }
00543         }
00544 }
00545 
00546 
00547 void RBMLayer::update( const Vec& pos_values, const Vec& neg_values)
00548 {
00549     // bias += learning_rate * (pos_values - neg_values)
00550     real* b = bias.data();
00551     real* pv = pos_values.data();
00552     real* nv = neg_values.data();
00553 
00554     if( fast_is_equal( momentum, 0.) )
00555     {
00556         for( int i=0 ; i<size ; i++ )
00557             b[i] += learning_rate * ( pv[i] - nv[i] );
00558     }
00559     else
00560     {
00561         bias_inc.resize( size );
00562         real* binc = bias_inc.data();
00563         for( int i=0 ; i<size ; i++ )
00564         {
00565             binc[i] = momentum*binc[i] + learning_rate*( pv[i] - nv[i] );
00566             b[i] += binc[i];
00567         }
00568     }
00569 
00570     applyBiasDecay();
00571 
00572 }
00573 
00574 void RBMLayer::update( const Mat& pos_values, const Mat& neg_values)
00575 {
00576     // bias += learning_rate * (pos_values - neg_values)
00577 
00578     int n = pos_values.length();
00579     PLASSERT( neg_values.length() == n );
00580     if (ones.length() < n) {
00581         ones.resize(n);
00582         ones.fill(1);
00583     } else if (ones.length() > n)
00584         // No need to fill with ones since we are only shrinking the vector.
00585         ones.resize(n);
00586 
00587 
00588     // We take the average gradient over the mini-batch.
00589     real avg_lr = learning_rate / n;
00590 
00591     if( fast_is_equal( momentum, 0.) )
00592     {
00593         transposeProductScaleAcc(bias, pos_values, ones,  avg_lr, real(1));
00594         transposeProductScaleAcc(bias, neg_values, ones, -avg_lr, real(1));
00595     }
00596     else
00597     {
00598         PLERROR("RBMLayer::update - Not implemented yet with momentum");
00599         /*
00600         bias_inc.resize( size );
00601         real* binc = bias_inc.data();
00602         for( int i=0 ; i<size ; i++ )
00603         {
00604             binc[i] = momentum*binc[i] + learning_rate*( pv[i] - nv[i] );
00605             b[i] += binc[i];
00606         }
00607         */
00608     }
00609 
00610     applyBiasDecay();
00611 
00612 }
00613 
00615 // updateCDandGibbs //
00617 void RBMLayer::updateCDandGibbs( const Mat& pos_values,
00618                                  const Mat& cd_neg_values,
00619                                  const Mat& gibbs_neg_values,
00620                                  real background_gibbs_update_ratio )
00621 {
00622     PLASSERT(pos_values.width()==size);
00623     PLASSERT(cd_neg_values.width()==size);
00624     PLASSERT(gibbs_neg_values.width()==size);
00625     int minibatch_size=gibbs_neg_values.length();
00626     PLASSERT(pos_values.length()==minibatch_size);
00627     PLASSERT(cd_neg_values.length()==minibatch_size);
00628     real normalize_factor=1.0/minibatch_size;
00629 
00630     // neg_stats <-- gibbs_chain_statistics_forgetting_factor * neg_stats
00631     //              +(1-gibbs_chain_statistics_forgetting_factor)
00632     //               * sumoverrows(gibbs_neg_values)
00633     tmp.resize(size);
00634     columnSum(gibbs_neg_values,tmp);
00635     if (neg_count==0)
00636         multiply(tmp,normalize_factor,bias_neg_stats);
00637     else
00638         multiplyScaledAdd(tmp,gibbs_ma_coefficient,
00639                           normalize_factor*(1-gibbs_ma_coefficient),
00640                           bias_neg_stats);
00641     neg_count++;
00642 
00643     // delta w = lrate * ( sumoverrows(pos_values)
00644     //                   - ( background_gibbs_update_ratio*neg_stats
00645     //                      +(1-background_gibbs_update_ratio)
00646     //                       * sumoverrows(cd_neg_values) ) )
00647     columnSum(pos_values,tmp);
00648     multiplyAcc(bias, tmp, learning_rate*normalize_factor);
00649     multiplyAcc(bias, bias_neg_stats,
00650                 -learning_rate*background_gibbs_update_ratio);
00651     columnSum(cd_neg_values, tmp);
00652     multiplyAcc(bias, tmp,
00653                 -learning_rate*(1-background_gibbs_update_ratio)*normalize_factor);
00654 
00655     applyBiasDecay();
00656 
00657 }
00658 
00660 // updateGibbs //
00662 void RBMLayer::updateGibbs( const Mat& pos_values,
00663                             const Mat& gibbs_neg_values)
00664 {
00665     int minibatch_size = pos_values.length();
00666     PLASSERT(pos_values.width()==size);
00667     PLASSERT(gibbs_neg_values.width()==size);
00668     PLASSERT(minibatch_size==gibbs_neg_values.length());
00669     // neg_stats <-- gibbs_chain_statistics_forgetting_factor * neg_stats
00670     //              +(1-gibbs_chain_statistics_forgetting_factor)
00671     //               * meanoverrows(gibbs_neg_values)
00672     tmp.resize(size);
00673     real normalize_factor=1.0/minibatch_size;
00674     columnSum(gibbs_neg_values,tmp);
00675     if (neg_count==0)
00676         multiply(tmp, normalize_factor, bias_neg_stats);
00677     else // bias_neg_stats <-- tmp*(1-gibbs_chain_statistics_forgetting_factor)/minibatch_size
00678         //                    +gibbs_chain_statistics_forgetting_factor*bias_neg_stats
00679         multiplyScaledAdd(tmp,gibbs_ma_coefficient,
00680                           normalize_factor*(1-gibbs_ma_coefficient),
00681                           bias_neg_stats);
00682     neg_count++;
00683 
00684     bool increase_ma=false;
00685     for (int i=0;i<gibbs_ma_schedule.length();i++)
00686         if (gibbs_ma_schedule[i]==neg_count*minibatch_size)
00687         {
00688             increase_ma=true;
00689             break;
00690         }
00691     if (increase_ma)
00692         gibbs_ma_coefficient = sigmoid(gibbs_ma_increment + inverse_sigmoid(gibbs_ma_coefficient));
00693 
00694 
00695     // delta w = lrate * ( meanoverrows(pos_values) - neg_stats )
00696     columnSum(pos_values,tmp);
00697     multiplyAcc(bias, tmp, learning_rate*normalize_factor);
00698     multiplyAcc(bias, bias_neg_stats, -learning_rate);
00699 
00700     applyBiasDecay();
00701 
00702 }
00703 
00705 // setAllBias //
00707 void RBMLayer::setAllBias(const Vec& rbm_bias)
00708 {
00709     PLASSERT( rbm_bias.size() == size );
00710     bias << rbm_bias;
00711 }
00712 
00714 // setExpectation //
00716 void RBMLayer::setExpectation(const Vec& the_expectation)
00717 {
00718     expectation << the_expectation;
00719     expectation_is_up_to_date=true;
00720 }
00721 
00723 // setExpectationByRef //
00725 void RBMLayer::setExpectationByRef(const Vec& the_expectation)
00726 {
00727     expectation = the_expectation;
00728     expectation_is_up_to_date=true;
00729 }
00730 
00732 // setExpectations //
00734 void RBMLayer::setExpectations(const Mat& the_expectations)
00735 {
00736     batch_size = the_expectations.length();
00737     setBatchSize( batch_size );
00738     expectations << the_expectations;
00739     expectations_are_up_to_date=true;
00740 }
00741 
00743 // setExpectationsByRef //
00745 void RBMLayer::setExpectationsByRef(const Mat& the_expectations)
00746 {
00747     batch_size = the_expectations.length();
00748     setBatchSize( batch_size );
00749     expectations = the_expectations;
00750     expectations_are_up_to_date=true;
00751 }
00752 
00754 // bpropCD //
00756 void RBMLayer::bpropCD(Vec& bias_gradient)
00757 {
00758     // grad = -bias_pos_stats/pos_count + bias_neg_stats/neg_count
00759 
00760     real* bg = bias_gradient.data();
00761     real* bps = bias_pos_stats.data();
00762     real* bns = bias_neg_stats.data();
00763 
00764     for( int i=0 ; i<size ; i++ )
00765         bg[i] = -bps[i]/pos_count + bns[i]/neg_count;
00766 
00767     addBiasDecay(bias_gradient);
00768 
00769 }
00770 
00771 void RBMLayer::bpropCD(const Vec& pos_values, const Vec& neg_values,
00772                        Vec& bias_gradient)
00773 {
00774     // grad = -bias_pos_stats/pos_count + bias_neg_stats/neg_count
00775 
00776     real* bg = bias_gradient.data();
00777     real* bps = pos_values.data();
00778     real* bns = neg_values.data();
00779 
00780     for( int i=0 ; i<size ; i++ )
00781         bg[i] = -bps[i] + bns[i];
00782 
00783     addBiasDecay(bias_gradient);
00784 
00785 }
00786 
00787 real RBMLayer::energy(const Vec& unit_values) const
00788 {
00789     PLERROR("RBMLayer::energy(Vec) not implemented in subclass %s\n",classname().c_str());
00790     return 0;
00791 }
00792 
00793 real RBMLayer::freeEnergyContribution(const Vec& unit_activations) const
00794 {
00795     PLERROR("RBMLayer::freeEnergyContribution(Vec) not implemented in subclass %s\n",classname().c_str());
00796     return 0;
00797 }
00798 
00799 void RBMLayer::freeEnergyContributionGradient(const Vec& unit_activations,
00800                                               Vec& unit_activations_gradient,
00801                                               real output_gradient,
00802                                               bool accumulate ) const
00803 {
00804     PLERROR("RBMLayer::freeEnergyContributionGradient(Vec, Vec) not implemented in subclass %s\n",classname().c_str());
00805 }
00806 
00807 int RBMLayer::getConfigurationCount()
00808 {
00809     PLERROR("RBMLayer::getConfigurationCount() not implemented in subclass %s\n",classname().c_str());
00810     return 0;
00811 }
00812 
00813 void RBMLayer::getConfiguration(int conf_index, Vec& output)
00814 {
00815     PLERROR("RBMLayer::getConfiguration(int, Vec) not implemented in subclass %s\n",classname().c_str());
00816 }
00817 
00818 void RBMLayer::addBiasDecay(Vec& bias_gradient)
00819 {
00820     PLASSERT(bias_gradient.size()==size);
00821 
00822     real *bg = bias_gradient.data();
00823     real *b = bias.data();
00824     bias_decay_type = lowerstring(bias_decay_type);
00825 
00826     if (bias_decay_type=="none")
00827         {}
00828     else if (bias_decay_type=="negative")  // Pushes the biases towards -\infty
00829         for( int i=0 ; i<size ; i++ )
00830             bg[i] += learning_rate * bias_decay_parameter;
00831     else if (bias_decay_type=="l2")  // L2 penalty on the biases
00832         for (int i=0 ; i<size ; i++ )
00833             bg[i] += learning_rate * bias_decay_parameter * b[i];
00834     else
00835         PLERROR("RBMLayer::addBiasDecay(string) bias_decay_type %s is not in"
00836                 " the list, in subclass %s\n",bias_decay_type.c_str(),classname().c_str());
00837 
00838 }
00839 
00840 void RBMLayer::addBiasDecay(Mat& bias_gradients)
00841 {
00842     PLASSERT(bias_gradients.width()==size);
00843     if (bias_decay_type=="none")
00844         return;
00845 
00846     real avg_lr = learning_rate / bias_gradients.length();
00847 
00848     for(int b=0; b<bias_gradients.length(); b++)
00849     {
00850         real *bg = bias_gradients[b];
00851         real *b = bias.data();
00852         bias_decay_type = lowerstring(bias_decay_type);
00853 
00854         if (bias_decay_type=="negative")  // Pushes the biases towards -\infty
00855             for( int i=0 ; i<size ; i++ )
00856                 bg[i] += avg_lr * bias_decay_parameter;
00857         else if (bias_decay_type=="l2")  // L2 penalty on the biases
00858             for (int i=0 ; i<size ; i++ )
00859                 bg[i] += avg_lr * bias_decay_parameter * b[i];
00860         else
00861             PLERROR("RBMLayer::addBiasDecay(string) bias_decay_type %s is not in"
00862                     " the list, in subclass %s\n",bias_decay_type.c_str(),classname().c_str());
00863     }
00864 }
00865 
00866 void RBMLayer::applyBiasDecay()
00867 {
00868 
00869     PLASSERT(bias.size()==size);
00870 
00871     real* b = bias.data();
00872     bias_decay_type = lowerstring(bias_decay_type);
00873 
00874     if (bias_decay_type=="none")
00875         {}
00876     else if (bias_decay_type=="negative") // Pushes the biases towards -\infty
00877         for( int i=0 ; i<size ; i++ )
00878             b[i] -= learning_rate * bias_decay_parameter;
00879     else if (bias_decay_type=="l2") // L2 penalty on the biases
00880         bias *= (1 - learning_rate * bias_decay_parameter);
00881     else
00882         PLERROR("RBMLayer::applyBiasDecay(string) bias_decay_type %s is not in"
00883                 " the list, in subclass %s\n",bias_decay_type.c_str(),classname().c_str());
00884 
00885 }
00886 
00887 } // end of namespace PLearn
00888 
00889 
00890 /*
00891   Local Variables:
00892   mode:c++
00893   c-basic-offset:4
00894   c-file-style:"stroustrup"
00895   c-file-offsets:((innamespace . 0)(inline-open . 0))
00896   indent-tabs-mode:nil
00897   fill-column:79
00898   End:
00899 */
00900 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines