PLearn 0.1
RBMGaussianLayer.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMGaussianLayer.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin & Dan Popovici
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin & Dan Popovici
00036 
00041 #include "RBMGaussianLayer.h"
00042 #include <plearn/math/TMat_maths.h>
00043 #include "RBMConnection.h"
00044 
00045 namespace PLearn {
00046 using namespace std;
00047 
00048 PLEARN_IMPLEMENT_OBJECT(
00049     RBMGaussianLayer,
00050     "Layer in an RBM, consisting in Gaussian units",
00051     "");
00052 
00053 RBMGaussianLayer::RBMGaussianLayer( real the_learning_rate ) :
00054     inherited( the_learning_rate ),
00055     min_quad_coeff( 0. ),
00056     share_quad_coeff( false ),
00057     size_quad_coeff( 0 ),
00058     fixed_std_deviation( -1 ),
00059     compute_mse_instead_of_nll( false ),
00060     sigma_is_up_to_date( false )
00061 {
00062 }
00063 
00064 RBMGaussianLayer::RBMGaussianLayer( int the_size, real the_learning_rate ) :
00065     inherited( the_learning_rate ),
00066     min_quad_coeff( 0. ),
00067     share_quad_coeff( false ),
00068     size_quad_coeff( 0 ),
00069     fixed_std_deviation( -1 ),
00070     compute_mse_instead_of_nll( false ),
00071     quad_coeff( the_size, 1. ), // or 1./M_SQRT2 ?
00072     quad_coeff_pos_stats( the_size ),
00073     quad_coeff_neg_stats( the_size ),
00074     sigma( the_size ),
00075     sigma_is_up_to_date( false )
00076 {
00077     size = the_size;
00078     activation.resize( the_size );
00079     sample.resize( the_size );
00080     expectation.resize( the_size );
00081     bias.resize( the_size );
00082     bias_pos_stats.resize( the_size );
00083     bias_neg_stats.resize( the_size );
00084 }
00085 
00086 RBMGaussianLayer::RBMGaussianLayer( int the_size, real the_learning_rate,
00087                                     bool do_share_quad_coeff ) :
00088     inherited( the_learning_rate ),
00089     min_quad_coeff( 0. ),
00090     fixed_std_deviation( -1 ),
00091     compute_mse_instead_of_nll( false ),
00092     quad_coeff_pos_stats( the_size ),
00093     quad_coeff_neg_stats( the_size ),
00094     sigma_is_up_to_date( false )
00095 {
00096     size = the_size;
00097     activation.resize( the_size );
00098     sample.resize( the_size );
00099     expectation.resize( the_size );
00100     bias.resize( the_size );
00101     bias_pos_stats.resize( the_size );
00102     bias_neg_stats.resize( the_size );
00103     share_quad_coeff = do_share_quad_coeff;
00104     if ( share_quad_coeff )
00105        size_quad_coeff=1;
00106     else
00107        size_quad_coeff=size;
00108     quad_coeff=Vec(size_quad_coeff,1.);
00109     sigma=Vec(size_quad_coeff);
00110 }
00111 
00112 
00113 void RBMGaussianLayer::generateSample()
00114 {
00115     PLASSERT_MSG(random_gen,
00116                  "random_gen should be initialized before generating samples");
00117 
00118     PLCHECK_MSG(expectation_is_up_to_date, "Expectation should be computed "
00119             "before calling generateSample()");
00120 
00121     computeStdDeviation();
00122     if(share_quad_coeff)
00123         for( int i=0 ; i<size ; i++ )
00124             sample[i] = random_gen->gaussian_mu_sigma( expectation[i], sigma[0] );
00125     else
00126         for( int i=0 ; i<size ; i++ )
00127             sample[i] = random_gen->gaussian_mu_sigma( expectation[i], sigma[i] );
00128 }
00129 
00130 void RBMGaussianLayer::generateSamples()
00131 {
00132     PLASSERT_MSG(random_gen,
00133                  "random_gen should be initialized before generating samples");
00134 
00135     PLCHECK_MSG(expectations_are_up_to_date, "Expectations should be computed "
00136             "before calling generateSamples()");
00137 
00138     computeStdDeviation();
00139     PLASSERT( samples.width() == size && samples.length() == batch_size );
00140 
00141     if(share_quad_coeff)
00142         for (int k = 0; k < batch_size; k++)
00143             for (int i=0 ; i<size ; i++)
00144                 samples(k, i) = random_gen->gaussian_mu_sigma( expectations(k, i), sigma[0] );
00145     else
00146         for (int k = 0; k < batch_size; k++)
00147             for (int i=0 ; i<size ; i++)
00148                 samples(k, i) = random_gen->gaussian_mu_sigma( expectations(k, i), sigma[i] );
00149 }
00150 
00151 
00152 void RBMGaussianLayer::computeExpectation()
00153 {
00154     if( expectation_is_up_to_date )
00155         return;
00156 
00157     // mu = activations[i] / (2 * quad_coeff[i]^2)
00158     if(share_quad_coeff)
00159     {
00160         real a_i = quad_coeff[0];
00161         for( int i=0 ; i<size ; i++ )
00162         {
00163             expectation[i] = activation[i] / (2 * a_i * a_i);
00164         }
00165     }
00166     else
00167         for( int i=0 ; i<size ; i++ )
00168         {
00169             real a_i = quad_coeff[i];
00170             expectation[i] = activation[i] / (2 * a_i * a_i);
00171         }
00172 
00173     expectation_is_up_to_date = true;
00174 }
00175 
00176 void RBMGaussianLayer::computeExpectations()
00177 {
00178     if( expectations_are_up_to_date )
00179         return;
00180 
00181     PLASSERT( expectations.width() == size
00182               && expectations.length() == batch_size );
00183 
00184     if(share_quad_coeff)
00185     {
00186         real a_i = quad_coeff[0];
00187         for (int k = 0; k < batch_size; k++)
00188             for (int i = 0 ; i < size ; i++)
00189                 {
00190                     expectations(k, i) = activations(k, i) / (2 * a_i * a_i) ;
00191                 }
00192     }
00193     else
00194         for (int k = 0; k < batch_size; k++)
00195             for (int i = 0 ; i < size ; i++)
00196                 {
00197                     real a_i = quad_coeff[i];
00198                     expectations(k, i) = activations(k, i) / (2 * a_i * a_i) ;
00199                 }
00200     expectations_are_up_to_date = true;
00201 }
00202 
00203 
00204 void RBMGaussianLayer::computeStdDeviation()
00205 {
00206     if( sigma_is_up_to_date )
00207         return;
00208 
00209     // sigma = 1 / (sqrt(2) * quad_coeff[i])
00210     if(share_quad_coeff)
00211         sigma[0] = 1 / (M_SQRT2 * quad_coeff[0]);
00212     else
00213         for( int i=0 ; i<size ; i++ )
00214             sigma[i] = 1 / (M_SQRT2 * quad_coeff[i]);
00215 
00216     sigma_is_up_to_date = true;
00217 }
00218 
00219 void RBMGaussianLayer::fprop( const Vec& input, Vec& output ) const
00220 {
00221     PLASSERT( input.size() == input_size );
00222     output.resize( output_size );
00223 
00224     if(share_quad_coeff)
00225     {
00226         real a_i = quad_coeff[0];
00227         for( int i=0 ; i<size ; i++ )
00228         {
00229             output[i] = (input[i] + bias[i]) / (2 * a_i * a_i);
00230         }
00231     }
00232     else
00233         for( int i=0 ; i<size ; i++ )
00234         {
00235             real a_i = quad_coeff[i];
00236             output[i] = (input[i] + bias[i]) / (2 * a_i * a_i);
00237         }
00238 }
00239 
00240 void RBMGaussianLayer::bpropUpdate(const Vec& input, const Vec& output,
00241                                    Vec& input_gradient,
00242                                    const Vec& output_gradient,
00243                                    bool accumulate)
00244 {
00245     PLASSERT( input.size() == size );
00246     PLASSERT( output.size() == size );
00247     PLASSERT( output_gradient.size() == size );
00248 
00249     if( accumulate )
00250     {
00251         PLASSERT_MSG( input_gradient.size() == size,
00252                       "Cannot resize input_gradient AND accumulate into it" );
00253     }
00254     else
00255     {
00256         input_gradient.resize( size );
00257         input_gradient.clear();
00258     }
00259 
00260     if( momentum != 0. )
00261     {
00262         bias_inc.resize( size );
00263         //quad_coeff_inc.resize( size );//quad_coeff_inc.resize( 1 );
00264     }
00265 
00266     // real two_lr = 2 * learning_rate;
00267     real a_i = quad_coeff[0];
00268     for( int i=0 ; i<size ; ++i )
00269     {
00270         if(!share_quad_coeff)
00271             a_i = quad_coeff[i];
00272         real in_grad_i = output_gradient[i] / (2 * a_i * a_i);
00273         input_gradient[i] += in_grad_i;
00274 
00275         if( momentum == 0. )
00276         {
00277             // bias -= learning_rate * input_gradient
00278             bias[i] -= learning_rate * in_grad_i;
00279 
00280             /* For the moment, we do not want to change the quadratic
00281                coefficient during the gradient descent phase.
00282 
00283             // update the quadratic coefficient:
00284             // a_i -= learning_rate * out_grad_i * (b_i + input_i) / a_i^3
00285             // (or a_i -= 2 * learning_rate * in_grad_i * (b_i + input_i) / a_i
00286             a_i -= two_lr * in_grad_i * (bias[i] + input[i])
00287                                                     / a_i;
00288             if( a_i < min_quad_coeff )
00289                 a_i = min_quad_coeff;
00290             */
00291         }
00292         else
00293         {
00294             // bias_inc = momentum * bias_inc - learning_rate * input_gradient
00295             // bias += bias_inc
00296             bias_inc[i] = momentum * bias_inc[i] - learning_rate * in_grad_i;
00297             bias[i] += bias_inc[i];
00298 
00299             /*
00300             // The update rule becomes:
00301             // a_inc_i = momentum * a_i_inc - learning_rate * out_grad_i
00302             //                                  * (b_i + input_i) / a_i^3
00303             // a_i += a_inc_i
00304             quad_coeff_inc[i] = momentum * quad_coeff_inc[i]
00305                 - two_lr * in_grad_i * (bias[i] + input[i])
00306                                          / a_i;
00307             a_i += quad_coeff_inc[i];
00308             if( a_i < min_quad_coeff )
00309                 a_i = min_quad_coeff;
00310             */
00311         }
00312     }
00313 
00314     applyBiasDecay();
00315 }
00316 
00317 void RBMGaussianLayer::reset()
00318 {
00319     inherited::reset();
00320     sigma.clear();
00321     sigma_is_up_to_date = false;
00322 }
00323 
00324 void RBMGaussianLayer::clearStats()
00325 {
00326     quad_coeff_pos_stats.clear();
00327     quad_coeff_neg_stats.clear();
00328 
00329     inherited::clearStats();
00330 }
00331 
00332 void RBMGaussianLayer::forget()
00333 {
00334     clearStats();
00335 
00336     if( fixed_std_deviation > 0 )
00337         quad_coeff.fill( 1 / ( M_SQRT2 * fixed_std_deviation ) );
00338     else
00339         quad_coeff.fill( 1. );
00340     inherited::forget();
00341 }
00342 
00344 // declareOptions //
00346 void RBMGaussianLayer::declareOptions(OptionList& ol)
00347 {
00348     declareOption(ol, "min_quad_coeff", &RBMGaussianLayer::min_quad_coeff,
00349                   OptionBase::buildoption,
00350                   "Minimum bound on the value of the quadratic coefficients.");
00351 
00352     declareOption(ol, "quad_coeff", &RBMGaussianLayer::quad_coeff,
00353                   OptionBase::learntoption,
00354                   "Quadratic coefficients of the units.");
00355 
00356     declareOption(ol, "sigma", &RBMGaussianLayer::sigma,
00357                   OptionBase::learntoption,
00358                   "Standard deviations.");
00359 
00360     declareOption(ol, "share_quad_coeff", &RBMGaussianLayer::share_quad_coeff,
00361                   OptionBase::buildoption,
00362                   "Should all the units share the same quadratic coefficients?\n"
00363                   "Suitable to avoid unstability (overfitting)  in cases where\n"
00364                   "all the units have the same 'meaning'  (pixels of an image)");
00365 
00366     declareOption(ol, "fixed_std_deviation", &RBMGaussianLayer::fixed_std_deviation,
00367                   OptionBase::buildoption,
00368                   "Value for the usually learned standard deviation, "
00369                   "if it should not be learned.\n"
00370                   "This will fix the value of the quad coeffs to the "
00371                   "appropriate value.\n"
00372                   "If <= 0, then this option is ignored.\n");
00373 
00374     declareOption(ol, "compute_mse_instead_of_nll", &RBMGaussianLayer::compute_mse_instead_of_nll,
00375                   OptionBase::buildoption,
00376                   "Indication that fpropNLL should compute the MSE instead of the NLL..\n"
00377                   "bpropNLL will also give the appropriate gradient. Might want to\n"
00378                   "set fixed_std_deviation to 1 in this case.\n");
00379 
00380 
00381     // Now call the parent class' declareOptions
00382     inherited::declareOptions(ol);
00383 }
00384 
00386 // build_ //
00388 void RBMGaussianLayer::build_()
00389 {
00390     bool needs_forget = false;
00391 
00392     if(share_quad_coeff)
00393         size_quad_coeff=1;
00394     else
00395         size_quad_coeff=size;
00396 
00397     if (sigma.size() != size_quad_coeff)
00398     {
00399         sigma.resize( size_quad_coeff );
00400         sigma_is_up_to_date = false;
00401         quad_coeff.resize( size_quad_coeff );
00402         needs_forget = true;
00403     }
00404 
00405     if (fixed_std_deviation > 0)
00406     {
00407         if (share_quad_coeff)
00408             PLERROR("In RBMGaussianLayer::build_(): fixed_std_deviation should not "
00409                     "be > 0 when share_quad_coeff is true.");
00410 
00411         quad_coeff.fill( 1 / ( M_SQRT2 * fixed_std_deviation ) );
00412     }
00413 
00414 
00415     quad_coeff_pos_stats.resize( size );
00416     quad_coeff_neg_stats.resize( size );
00417 
00418     if (needs_forget)
00419         forget();
00420 
00421     clearStats();
00422 }
00423 
00425 // build //
00427 void RBMGaussianLayer::build()
00428 {
00429     inherited::build();
00430     build_();
00431 }
00432 
00433 void RBMGaussianLayer::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00434 {
00435     inherited::makeDeepCopyFromShallowCopy(copies);
00436 
00437     deepCopyField(quad_coeff, copies);
00438     deepCopyField(quad_coeff_pos_stats, copies);
00439     deepCopyField(quad_coeff_neg_stats, copies);
00440     deepCopyField(quad_coeff_inc, copies);
00441     deepCopyField(sigma, copies);
00442 }
00443 
00444 
00445 
00446 void RBMGaussianLayer::accumulatePosStats( const Vec& pos_values )
00447 {
00448     if ( fixed_std_deviation <= 0 )
00449     {
00450         if (share_quad_coeff)
00451             for( int i=0 ; i<size ; i++ )
00452             {
00453            real x_i = pos_values[i];
00454            quad_coeff_pos_stats[i] += 2 * quad_coeff[0] * x_i * x_i;
00455             }
00456         else
00457             for( int i=0 ; i<size ; i++ )
00458             {
00459                 real x_i = pos_values[i];
00460                 quad_coeff_pos_stats[i] += 2 * quad_coeff[i] * x_i * x_i;
00461             }
00462     }
00463 
00464     inherited::accumulatePosStats( pos_values );
00465 }
00466 
00467 void RBMGaussianLayer::accumulateNegStats( const Vec& neg_values )
00468 {
00469     if ( fixed_std_deviation <= 0 )
00470     {
00471         if (share_quad_coeff)
00472             for( int i=0 ; i<size ; i++ )
00473             {
00474                 real x_i = neg_values[i];
00475                 quad_coeff_neg_stats[i] += 2 * quad_coeff[0] * x_i * x_i;
00476             }
00477         else
00478             for( int i=0 ; i<size ; i++ )
00479             {
00480                 real x_i = neg_values[i];
00481                 quad_coeff_neg_stats[i] += 2 * quad_coeff[i] * x_i * x_i;
00482             }
00483     }
00484     inherited::accumulateNegStats( neg_values );
00485 }
00486 
00487 void RBMGaussianLayer::update()
00488 {
00489     // quad_coeff -= learning_rate * (quad_coeff_pos_stats/pos_count
00490     //                                - quad_coeff_neg_stats/neg_count)
00491     if ( fixed_std_deviation <= 0 )
00492     {
00493         real pos_factor = -learning_rate / pos_count;
00494         real neg_factor = learning_rate / neg_count;
00495 
00496         real* a = quad_coeff.data();
00497         real* aps = quad_coeff_pos_stats.data();
00498         real* ans = quad_coeff_neg_stats.data();
00499 
00500         if( momentum == 0. )
00501         {
00502             if(share_quad_coeff)
00503             {
00504                 real update=0;
00505                 for( int i=0 ; i<size ; i++ )
00506                 {
00507                     update += pos_factor * aps[i] + neg_factor * ans[i];
00508                 }
00509                 a[0] += update/(real)size;
00510                 if( a[0] < min_quad_coeff )
00511                     a[0] = min_quad_coeff;
00512             }
00513             else
00514                 for( int i=0 ; i<size ; i++ )
00515                 {
00516                     a[i] += pos_factor * aps[i] + neg_factor * ans[i];
00517                     if( a[i] < min_quad_coeff )
00518                         a[i] = min_quad_coeff;
00519                 }
00520         }
00521         else
00522         {
00523             if(share_quad_coeff)
00524             {
00525                 quad_coeff_inc.resize( 1 );
00526                 real* ainc = quad_coeff_inc.data();
00527                 for( int i=0 ; i<size ; i++ )
00528                 {
00529                     ainc[0] = momentum*ainc[0] + pos_factor*aps[i] + neg_factor*ans[i];
00530                     ainc[0] /= (real)size;
00531                     a[0] += ainc[0];
00532                 }
00533                 if( a[0] < min_quad_coeff )
00534                     a[0] = min_quad_coeff;
00535             }
00536             else
00537             {
00538                 quad_coeff_inc.resize( size );
00539                 real* ainc = quad_coeff_inc.data();
00540                 for( int i=0 ; i<size ; i++ )
00541                 {
00542                     ainc[i] = momentum*ainc[i] + pos_factor*aps[i] + neg_factor*ans[i];
00543                     a[i] += ainc[i];
00544                     if( a[i] < min_quad_coeff )
00545                         a[i] = min_quad_coeff;
00546                 }
00547             }
00548         }
00549 
00550         // We will need to recompute sigma
00551         sigma_is_up_to_date = false;
00552     }
00553 
00554     // will update the bias, and clear the statistics
00555     inherited::update();
00556 }
00557 
00558 void RBMGaussianLayer::update( const Vec& pos_values, const Vec& neg_values )
00559 {
00560     // quad_coeff[i] -= learning_rate * 2 * quad_coeff[i] * (pos_values[i]^2
00561     //                                                       - neg_values[i]^2)
00562     if ( fixed_std_deviation <= 0 )
00563     {
00564         real two_lr = 2 * learning_rate;
00565         real* a = quad_coeff.data();
00566         real* pv = pos_values.data();
00567         real* nv = neg_values.data();
00568 
00569         if( momentum == 0. )
00570         {
00571             if (share_quad_coeff)
00572             {
00573                 real update=0;
00574                 for( int i=0 ; i<size ; i++ )
00575                 {
00576                     update += two_lr * a[0] * (nv[i]*nv[i] - pv[i]*pv[i]);
00577                 }
00578                 a[0] += update/(real)size;
00579                 if( a[0] < min_quad_coeff )
00580                     a[0] = min_quad_coeff;
00581             }
00582             else
00583                 for( int i=0 ; i<size ; i++ )
00584                 {
00585                     a[i] += two_lr * a[i] * (nv[i]*nv[i] - pv[i]*pv[i]);
00586                     if( a[i] < min_quad_coeff )
00587                         a[i] = min_quad_coeff;
00588                 }
00589         }
00590         else
00591         {
00592             real* ainc = quad_coeff_inc.data();
00593             if(share_quad_coeff)
00594             {
00595                 quad_coeff_inc.resize( 1 );
00596                 for( int i=0 ; i<size ; i++ )
00597                 {
00598                     ainc[0] = momentum*ainc[0]
00599                         + two_lr * a[0] * (nv[i]*nv[i] - pv[i]*pv[i]);
00600                     ainc[0] /= (real)size;
00601                     a[0] += ainc[0];
00602                 }
00603                 if( a[0] < min_quad_coeff )
00604                     a[0] = min_quad_coeff;
00605             }
00606             else
00607             {
00608                 quad_coeff_inc.resize( size );
00609                 for( int i=0 ; i<size ; i++ )
00610                 {
00611                     ainc[i] = momentum*ainc[i]
00612                         + two_lr * a[i] * (nv[i]*nv[i] - pv[i]*pv[i]);
00613                     a[i] += ainc[i];
00614                     if( a[i] < min_quad_coeff )
00615                         a[i] = min_quad_coeff;
00616                 }
00617             }
00618         }
00619 
00620         // We will need to recompute sigma
00621         sigma_is_up_to_date = false;
00622     }
00623 
00624     // update the bias
00625     inherited::update( pos_values, neg_values );
00626 }
00627 
00628 void RBMGaussianLayer::update( const Mat& pos_values, const Mat& neg_values )
00629 {
00630 
00631     PLASSERT( pos_values.width() == size );
00632     PLASSERT( neg_values.width() == size );
00633 
00634     int batch_size = pos_values.length();
00635     PLASSERT( neg_values.length() == batch_size );
00636 
00637     // quad_coeff[i] -= learning_rate * 2 * quad_coeff[i] * (pos_values[i]^2
00638     //                                                       - neg_values[i]^2)
00639     if ( fixed_std_deviation <= 0 )
00640     {
00641         real two_lr = 2 * learning_rate / batch_size;
00642         real* a = quad_coeff.data();
00643 
00644         if( momentum == 0. )
00645         {
00646             if (share_quad_coeff)
00647                 for( int k=0; k<batch_size; k++ )
00648                 {
00649                     real *pv_k = pos_values[k];
00650                     real *nv_k = neg_values[k];
00651                     real update=0;
00652                     for( int i=0; i<size; i++ )
00653                     {
00654                         update += two_lr * a[0] * (nv_k[i]*nv_k[i] - pv_k[i]*pv_k[i]);
00655                     }
00656                     a[0] += update/(real)size;
00657                     if( a[0] < min_quad_coeff )
00658                         a[0] = min_quad_coeff;
00659                 }
00660             else
00661                 for( int k=0; k<batch_size; k++ )
00662                 {
00663                     real *pv_k = pos_values[k];
00664                     real *nv_k = neg_values[k];
00665                     for( int i=0; i<size; i++ )
00666                     {
00667                         a[i] += two_lr * a[i] * (nv_k[i]*nv_k[i] - pv_k[i]*pv_k[i]);
00668                         if( a[i] < min_quad_coeff )
00669                             a[i] = min_quad_coeff;
00670                     }
00671                 }
00672         }
00673         else
00674             PLCHECK_MSG( false,
00675                          "momentum and minibatch are not compatible yet" );
00676 
00677         // We will need to recompute sigma
00678         sigma_is_up_to_date = false;
00679     }
00680 
00681     // Update the bias
00682     inherited::update( pos_values, neg_values );
00683 }
00684 
00685 real RBMGaussianLayer::energy(const Vec& unit_values) const
00686 {
00687     PLASSERT( unit_values.length() == size );
00688 
00689     real en = 0.;
00690     real tmp;
00691     if (size > 0)
00692     {
00693         real* v = unit_values.data();
00694         real* a = quad_coeff.data();
00695         real* b = bias.data();
00696         if(share_quad_coeff)
00697             for(register int i=0; i<size; i++)
00698             {
00699                 tmp = a[0]*v[i];
00700                 en += tmp*tmp - b[i]*v[i];
00701             }
00702         else
00703             for(register int i=0; i<size; i++)
00704             {
00705                 tmp = a[i]*v[i];
00706                 en += tmp*tmp - b[i]*v[i];
00707             }
00708     }
00709     return en;
00710 }
00711 
00712 real RBMGaussianLayer::freeEnergyContribution(const Vec& unit_activations)
00713     const
00714 {
00715     PLASSERT( unit_activations.size() == size );
00716 
00717     // result = \sum_{i=0}^{size-1} (-(a_i/(2 q_i))^2 + log(q_i)) - n/2 log(Pi)
00718     real result = -0.5 * size * LogPi;
00719     for (int i=0; i<size; i++)
00720     {
00721         real a_i = unit_activations[i];
00722         real q_i = share_quad_coeff ? quad_coeff[i] : quad_coeff[0];
00723         result += pl_log(q_i);
00724         result -= a_i * a_i / (4 * q_i * q_i);
00725     }
00726     return result;
00727 }
00728 
00729 real RBMGaussianLayer::fpropNLL(const Vec& target)
00730 {
00731     PLASSERT( target.size() == input_size );
00732     computeExpectation();
00733     computeStdDeviation();
00734 
00735     real ret = 0;
00736     if( compute_mse_instead_of_nll )
00737     {
00738         real r;
00739         for( int i=0 ; i<size ; i++ )
00740         {
00741             r = (target[i] - expectation[i]);
00742             ret += r * r;
00743         }
00744     }
00745     else
00746     {
00747         if(share_quad_coeff)
00748             for( int i=0 ; i<size ; i++ )
00749             {
00750                 real r = (target[i] - expectation[i]) * quad_coeff[0];
00751                 ret += r * r + pl_log(sigma[0]);
00752             }
00753         else
00754             for( int i=0 ; i<size ; i++ )
00755             {
00756                 // ret += (target[i]-expectation[i])^2/(2 sigma[i]^2)
00757                 //      + log(sqrt(2*Pi) * sigma[i])
00758                 real r = (target[i] - expectation[i]) * quad_coeff[i];
00759                 ret += r * r + pl_log(sigma[i]);
00760 
00761             }
00762         ret += 0.5*size*Log2Pi;
00763     }
00764     return ret;
00765 }
00766 
00767 void RBMGaussianLayer::fpropNLL(const Mat& targets, const Mat& costs_column)
00768 {
00769 
00770     PLASSERT( targets.width() == input_size );
00771     PLASSERT( targets.length() == batch_size );
00772     PLASSERT( costs_column.width() == 1 );
00773     PLASSERT( costs_column.length() == batch_size );
00774 
00775     computeExpectations();
00776     computeStdDeviation();
00777 
00778     real nll;
00779     real *expectation, *target;
00780 
00781     if( compute_mse_instead_of_nll )
00782     {
00783         for (int k=0;k<batch_size;k++) // loop over minibatch
00784         {
00785             nll = 0;
00786             expectation = expectations[k];
00787             target = targets[k];
00788             real r;
00789             for( register int i=0 ; i<size ; i++ ) // loop over outputs
00790             {
00791                 r = (target[i] - expectation[i]);
00792                 nll += r * r;
00793             }
00794             costs_column(k,0) = nll;
00795         }
00796     }
00797     else
00798     {
00799         if(share_quad_coeff)
00800             for (int k=0;k<batch_size;k++) // loop over minibatch
00801             {
00802                 nll = 0;
00803                 expectation = expectations[k];
00804                 target = targets[k];
00805                 for( register int i=0 ; i<size ; i++ ) // loop over outputs
00806                 {
00807                     real r = (target[i] - expectation[i]) * quad_coeff[0];
00808                     nll += r * r + pl_log(sigma[0]);
00809                 }
00810                 nll += 0.5*size*Log2Pi;
00811                 costs_column(k,0) = nll;
00812             }
00813         else
00814             for (int k=0;k<batch_size;k++) // loop over minibatch
00815             {
00816                 nll = 0;
00817                 expectation = expectations[k];
00818                 target = targets[k];
00819                 for( register int i=0 ; i<size ; i++ ) // loop over outputs
00820                 {
00821                     // nll += (target[i]-expectation[i])^2/(2 sigma[i]^2)
00822                     //      + log(sqrt(2*Pi) * sigma[i])
00823                     real r = (target[i] - expectation[i]) * quad_coeff[i];
00824                     nll += r * r + pl_log(sigma[i]);
00825                 }
00826                 nll += 0.5*size*Log2Pi;
00827                 costs_column(k,0) = nll;
00828             }
00829     }
00830 }
00831 
00832 void RBMGaussianLayer::bpropNLL(const Vec& target, real nll, Vec& bias_gradient)
00833 {
00834     computeExpectation();
00835 
00836     PLASSERT( target.size() == input_size );
00837     bias_gradient.resize( size );
00838 
00839     // bias_gradient = expectation - target
00840     substract(expectation, target, bias_gradient);
00841 
00842     if( compute_mse_instead_of_nll )
00843         bias_gradient *= 2.;
00844     addBiasDecay(bias_gradient);
00845 
00846 }
00847 
00848 void RBMGaussianLayer::bpropNLL(const Mat& targets, const Mat& costs_column,
00849                                 Mat& bias_gradients)
00850 {
00851     computeExpectations();
00852 
00853     PLASSERT( targets.width() == input_size );
00854     PLASSERT( targets.length() == batch_size );
00855     PLASSERT( costs_column.width() == 1 );
00856     PLASSERT( costs_column.length() == batch_size );
00857     bias_gradients.resize( batch_size, size );
00858 
00859     // bias_gradients = expectations - targets
00860     substract(expectations, targets, bias_gradients);
00861 
00862     if( compute_mse_instead_of_nll )
00863         bias_gradients *= 2.;
00864     addBiasDecay(bias_gradients);
00865 
00866 }
00867 
00868 
00869 } // end of namespace PLearn
00870 
00871 
00872 /*
00873   Local Variables:
00874   mode:c++
00875   c-basic-offset:4
00876   c-file-style:"stroustrup"
00877   c-file-offsets:((innamespace . 0)(inline-open . 0))
00878   indent-tabs-mode:nil
00879   fill-column:79
00880   End:
00881 */
00882 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines