PLearn 0.1
RBMMultinomialLayer.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMMultinomialLayer.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin & Dan Popovici
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin & Dan Popovici
00036 
00041 #include "RBMMultinomialLayer.h"
00042 #include <plearn/math/TMat_maths.h>
00043 #include "RBMConnection.h"
00044 
00045 namespace PLearn {
00046 using namespace std;
00047 
00048 PLEARN_IMPLEMENT_OBJECT(
00049     RBMMultinomialLayer,
00050     "Layer in an RBM, consisting in one multinomial unit",
00051     "");
00052 
00053 RBMMultinomialLayer::RBMMultinomialLayer( real the_learning_rate ) :
00054     inherited( the_learning_rate )
00055 {
00056 }
00057 
00058 RBMMultinomialLayer::RBMMultinomialLayer( int the_size,
00059                                           real the_learning_rate ) :
00060     inherited( the_learning_rate )
00061 {
00062     size = the_size;
00063     activation.resize( the_size );
00064     sample.resize( the_size );
00065     expectation.resize( the_size );
00066     bias.resize( the_size );
00067     bias_pos_stats.resize( the_size );
00068     bias_neg_stats.resize( the_size );
00069 }
00070 
00071 void RBMMultinomialLayer::generateSample()
00072 {
00073     PLASSERT_MSG(random_gen,
00074                  "random_gen should be initialized before generating samples");
00075 
00076     PLCHECK_MSG(expectation_is_up_to_date, "Expectation should be computed "
00077             "before calling generateSample()");
00078 
00079     int i = random_gen->multinomial_sample( expectation );
00080     fill_one_hot( sample, i, real(0.), real(1.) );
00081 }
00082 
00083 void RBMMultinomialLayer::generateSamples()
00084 {
00085     PLASSERT_MSG(random_gen,
00086                  "random_gen should be initialized before generating samples");
00087 
00088     PLCHECK_MSG(expectations_are_up_to_date, "Expectations should be computed "
00089                         "before calling generateSamples()");
00090 
00091     PLASSERT( samples.width() == size && samples.length() == batch_size );
00092 
00093     for (int k = 0; k < batch_size; k++)
00094     {
00095         int i = random_gen->multinomial_sample( expectations(k) );
00096         fill_one_hot( samples(k), i, real(0.), real(1.) );
00097     }
00098 }
00099 
00100 void RBMMultinomialLayer::computeExpectation()
00101 {
00102     if( expectation_is_up_to_date )
00103         return;
00104 
00105     // expectation = softmax(-activation)
00106     softmax(activation, expectation);
00107     expectation_is_up_to_date = true;
00108 }
00109 
00110 void RBMMultinomialLayer::computeExpectations()
00111 {
00112     if( expectations_are_up_to_date )
00113         return;
00114 
00115     PLASSERT( expectations.width() == size
00116               && expectations.length() == batch_size );
00117 
00118     // expectation = softmax(-activation)
00119     for (int k = 0; k < batch_size; k++)
00120         softmax(activations(k), expectations(k));
00121 
00122     expectations_are_up_to_date = true;
00123 }
00124 
00125 
00126 void RBMMultinomialLayer::fprop( const Vec& input, Vec& output ) const
00127 {
00128     PLASSERT( input.size() == input_size );
00129     output.resize( output_size );
00130 
00131     // inefficient
00132     softmax( input+bias, output );
00133 }
00134 
00136 // fprop //
00138 void RBMMultinomialLayer::fprop( const Vec& input, const Vec& rbm_bias,
00139                                  Vec& output ) const
00140 {
00141     PLASSERT( input.size() == input_size );
00142     PLASSERT( rbm_bias.size() == input_size );
00143     output.resize( output_size );
00144 
00145     // inefficient
00146     softmax( input+rbm_bias, output );
00147 }
00148 
00150 // bpropUpdate //
00152 void RBMMultinomialLayer::bpropUpdate(const Vec& input, const Vec& output,
00153                                       Vec& input_gradient,
00154                                       const Vec& output_gradient,
00155                                       bool accumulate)
00156 {
00157     PLASSERT( input.size() == size );
00158     PLASSERT( output.size() == size );
00159     PLASSERT( output_gradient.size() == size );
00160 
00161     if( accumulate )
00162     {
00163         PLASSERT_MSG( input_gradient.size() == size,
00164                       "Cannot resize input_gradient AND accumulate into it" );
00165     }
00166     else
00167     {
00168         input_gradient.resize( size );
00169         input_gradient.clear();
00170     }
00171 
00172     if( momentum != 0. )
00173         bias_inc.resize( size );
00174 
00175     // input_gradient[i] =
00176     //      (output_gradient[i] - output_gradient . output) output[i]
00177     real outg_dot_out = dot( output_gradient, output );
00178     real* out = output.data();
00179     real* outg = output_gradient.data();
00180     real* ing = input_gradient.data();
00181     real* b = bias.data();
00182     real* binc = momentum==0?0:bias_inc.data();
00183 
00184     for( int i=0 ; i<size ; i++ )
00185     {
00186         real ing_i = (outg[i] - outg_dot_out) * out[i];
00187         ing[i] += ing_i;
00188 
00189         if( momentum == 0. )
00190         {
00191             // update the bias: bias -= learning_rate * input_gradient
00192             b[i] -= learning_rate * ing_i;
00193         }
00194         else
00195         {
00196             // The update rule becomes:
00197             // bias_inc = momentum * bias_inc - learning_rate * input_gradient
00198             // bias += bias_inc
00199             binc[i] = momentum * binc[i] - learning_rate * ing_i;
00200             b[i] += binc[i];
00201         }
00202     }
00203     applyBiasDecay();
00204 }
00205 
00206 void RBMMultinomialLayer::bpropUpdate(const Mat& inputs, const Mat& outputs,
00207                                       Mat& input_gradients,
00208                                       const Mat& output_gradients,
00209                                       bool accumulate)
00210 {
00211     PLASSERT( inputs.width() == size );
00212     PLASSERT( outputs.width() == size );
00213     PLASSERT( output_gradients.width() == size );
00214 
00215     int mbatch_size = inputs.length();
00216     PLASSERT( outputs.length() == mbatch_size );
00217     PLASSERT( output_gradients.length() == mbatch_size );
00218 
00219     if( accumulate )
00220     {
00221         PLASSERT_MSG( input_gradients.width() == size &&
00222                 input_gradients.length() == inputs.length(),
00223                 "Cannot resize input_gradient and accumulate into it." );
00224     }
00225     else
00226     {
00227         input_gradients.resize(inputs.length(), size);
00228         input_gradients.clear();
00229     }
00230 
00231 
00232     if( momentum != 0. )
00233         bias_inc.resize( size );
00234 
00235     // TODO see if we can have a speed-up by reorganizing the different steps
00236 
00237     // input_gradients[k][i] =
00238     //   (output_gradients[k][i]-output_gradients[k].outputs[k]) outputs[k][i]
00239     real mean_lr = learning_rate / mbatch_size;
00240     for( int k=0; k<mbatch_size; k++ )
00241     {
00242         real outg_dot_out = dot( output_gradients(k), outputs(k) );
00243         real* out = outputs(k).data();
00244         real* outg = output_gradients(k).data();
00245         real* ing = input_gradients(k).data();
00246         real* b = bias.data();
00247         real* binc = momentum==0?0:bias_inc.data();
00248 
00249         for( int i=0 ; i<size ; i++ )
00250         {
00251             real ing_ki = (outg[i] - outg_dot_out) * out[i];
00252             ing[i] += ing_ki;
00253 
00254             if( momentum == 0. )
00255             {
00256                 // update the bias: bias -= learning_rate * input_gradient
00257                 b[i] -= mean_lr * ing_ki;
00258             }
00259             else
00260             {
00261                 PLCHECK_MSG(false,
00262                             "Momentum not correctly implemented with batch");
00263                 // The update rule becomes:
00264                 // bias_inc = momentum*bias_inc - learning_rate*input_gradient
00265                 // bias += bias_inc
00266                 binc[i] = momentum * binc[i] - mean_lr * ing_ki;
00267                 b[i] += binc[i];
00268             }
00269         }
00270     }
00271     applyBiasDecay();
00272 }
00273 
00275 void RBMMultinomialLayer::bpropUpdate(const Vec& input, const Vec& rbm_bias,
00276                                       const Vec& output,
00277                                       Vec& input_gradient,
00278                                       Vec& rbm_bias_gradient,
00279                                       const Vec& output_gradient)
00280 {
00281     PLASSERT( input.size() == size );
00282     PLASSERT( rbm_bias.size() == size );
00283     PLASSERT( output.size() == size );
00284     PLASSERT( output_gradient.size() == size );
00285     input_gradient.resize( size );
00286     rbm_bias_gradient.resize( size );
00287 
00288     // input_gradient[i] =
00289     //      (output_gradient . output - output_gradient[i] ) output[i]
00290     real outg_dot_out = dot( output_gradient, output );
00291     real* out = output.data();
00292     real* outg = output_gradient.data();
00293     real* ing = input_gradient.data();
00294     for( int i=0 ; i<size ; i++ )
00295         ing[i] = (outg[i] - outg_dot_out) * out[i];
00296 
00297     rbm_bias_gradient << input_gradient;
00298 }
00299 
00301 // fpropNLL //
00303 real RBMMultinomialLayer::fpropNLL(const Vec& target)
00304 {
00305     computeExpectation();
00306 
00307     PLASSERT( target.size() == input_size );
00308 
00309 #ifdef BOUNDCHECK
00310     if (!target.hasMissing())
00311     {
00312         PLASSERT_MSG( min(target) >= 0.,
00313                       "Elements of \"target\" should be positive" );
00314         // Ensure the distribution probabilities sum to 1. We relax a
00315         // bit the default tolerance as probabilities using
00316         // exponentials could suffer numerical imprecisions.
00317         if (!is_equal( sum(target), 1., 1., 1e-5, 1e-5 ))
00318             PLERROR("In RBMMultinomialLayer::fpropNLL - Elements of \"target\""
00319                     " should sum to 1 (found a sum = %f)",
00320                     sum(target));
00321     }
00322 #endif
00323 
00324     real nll = 0;
00325     real target_i, expectation_i;
00326     for (int i=0; i<size; i++)
00327     {
00328         target_i = target[i];
00329         expectation_i = expectation[i];
00330         if(!fast_exact_is_equal(target_i, 0.0))
00331             nll -= target_i * pl_log(expectation_i);
00332     }
00333     return nll;
00334 }
00335 
00336 void RBMMultinomialLayer::fpropNLL(const Mat& targets, const Mat& costs_column)
00337 {
00338     computeExpectations();
00339 
00340     PLASSERT( targets.width() == input_size );
00341     PLASSERT( targets.length() == batch_size );
00342     PLASSERT( costs_column.width() == 1 );
00343     PLASSERT( costs_column.length() == batch_size );
00344 
00345     real target_i, expectation_i;
00346     for (int k=0; k<batch_size; k++) // loop over minibatch
00347     {
00348 #ifdef BOUNDCHECK
00349         if (!targets(k).hasMissing())
00350         {
00351             PLASSERT_MSG( min(targets(k)) >= 0.,
00352                           "Elements of \"targets\" should be positive" );
00353             // Ensure the distribution probabilities sum to 1. We relax a
00354             // bit the default tolerance as probabilities using
00355             // exponentials could suffer numerical imprecisions.
00356             if (!is_equal( sum(targets(k)), 1., 1., 1e-5, 1e-5 ))
00357                 PLERROR("In RBMMultinomialLayer::fpropNLL - Elements of"
00358                         " \"target\" should sum to 1 (found a sum = %f at row"
00359                         " %d)",
00360                         sum(targets(k)), k);
00361         }
00362 #endif
00363         real nll = 0;
00364         real* expectation = expectations[k];
00365         real* target = targets[k];
00366         for(int i=0; i<size; i++)
00367         {
00368             target_i = target[i];
00369             expectation_i = expectation[i];
00370             if(!fast_exact_is_equal(target_i, 0.0))
00371                 nll -= target_i * pl_log(expectation_i);
00372         }
00373         costs_column(k, 0) = nll;
00374     }
00375 }
00376 
00377 void RBMMultinomialLayer::bpropNLL(const Vec& target, real nll,
00378                                    Vec& bias_gradient)
00379 {
00380     computeExpectation();
00381 
00382     PLASSERT( target.size() == input_size );
00383     bias_gradient.resize( size );
00384 
00385     // bias_gradient = expectation - target
00386     substract(expectation, target, bias_gradient);
00387 }
00388 
00389 void RBMMultinomialLayer::bpropNLL(const Mat& targets, const Mat& costs_column,
00390                                    Mat& bias_gradients)
00391 {
00392     computeExpectations();
00393 
00394     PLASSERT( targets.width() == input_size );
00395     PLASSERT( targets.length() == batch_size );
00396     PLASSERT( costs_column.width() == 1 );
00397     PLASSERT( costs_column.length() == batch_size );
00398     bias_gradients.resize( batch_size, size );
00399 
00400     // bias_gradients = expectations - targets
00401     substract(expectations, targets, bias_gradients);
00402 }
00403 
00404 void RBMMultinomialLayer::declareOptions(OptionList& ol)
00405 {
00406 /*
00407     declareOption(ol, "size", &RBMMultinomialLayer::size,
00408                   OptionBase::buildoption,
00409                   "Number of units.");
00410 */
00411     // Now call the parent class' declareOptions
00412     inherited::declareOptions(ol);
00413 }
00414 
00415 void RBMMultinomialLayer::build_()
00416 {
00417 }
00418 
00419 void RBMMultinomialLayer::build()
00420 {
00421     inherited::build();
00422     build_();
00423 }
00424 
00425 
00426 void RBMMultinomialLayer::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00427 {
00428     inherited::makeDeepCopyFromShallowCopy(copies);
00429     deepCopyField(tmp_softmax, copies);
00430 }
00431 
00432 real RBMMultinomialLayer::energy(const Vec& unit_values) const
00433 {
00434     return -dot(unit_values, bias);
00435 }
00436 
00437 real RBMMultinomialLayer::freeEnergyContribution(const Vec& unit_activations)
00438     const
00439 {
00440     // result = -log(\sum_{i=0}^{size-1} exp(a_i))
00441     return -logadd(unit_activations);
00442 }
00443 
00444 void RBMMultinomialLayer::freeEnergyContributionGradient(
00445     const Vec& unit_activations,
00446     Vec& unit_activations_gradient,
00447     real output_gradient, bool accumulate) const
00448 {
00449     PLASSERT( unit_activations.size() == size );
00450     unit_activations_gradient.resize( size );
00451     if( !accumulate ) unit_activations_gradient.clear();
00452     tmp_softmax.resize( size );
00453     softmax(unit_activations, tmp_softmax);
00454     real* ga = unit_activations_gradient.data();
00455     real* s = tmp_softmax.data();
00456     for (int i=0; i<size; i++)
00457         ga[i] -= output_gradient * s[i];
00458 }
00459 
00460 int RBMMultinomialLayer::getConfigurationCount()
00461 {
00462     return size;
00463 }
00464 
00465 void RBMMultinomialLayer::getConfiguration(int conf_index, Vec& output)
00466 {
00467     PLASSERT( output.length() == size );
00468     PLASSERT( conf_index >= 0 && conf_index < getConfigurationCount() );
00469 
00470     for ( int i = 0; i < size; ++i ) {
00471         output[i] = i == conf_index ? 1 : 0;
00472     }
00473 }
00474 
00475 
00476 } // end of namespace PLearn
00477 
00478 
00479 /*
00480   Local Variables:
00481   mode:c++
00482   c-basic-offset:4
00483   c-file-style:"stroustrup"
00484   c-file-offsets:((innamespace . 0)(inline-open . 0))
00485   indent-tabs-mode:nil
00486   fill-column:79
00487   End:
00488 */
00489 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines