PLearn 0.1
RBMRateLayer.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMRateLayer.cc
00004 //
00005 // Copyright (C) 2008 Hugo Larochelle
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Hugo Larochelle
00036 
00041 #include "RBMRateLayer.h"
00042 #include <plearn/math/TMat_maths.h>
00043 #include "RBMConnection.h"
00044 
00045 namespace PLearn {
00046 using namespace std;
00047 
00048 PLEARN_IMPLEMENT_OBJECT(
00049     RBMRateLayer,
00050     "Layer in an RBM consisting in rate-coded units",
00051     "");
00052 
00053 RBMRateLayer::RBMRateLayer( real the_learning_rate ) :
00054     inherited( the_learning_rate ),
00055     n_spikes( 10 )
00056 {
00057 }
00058 
00059 void RBMRateLayer::generateSample()
00060 {
00061     PLASSERT_MSG(random_gen,
00062                  "random_gen should be initialized before generating samples");
00063 
00064     PLCHECK_MSG(expectation_is_up_to_date, "Expectation should be computed "
00065             "before calling generateSample()");
00066 
00067     real exp_i = 0;
00068     for( int i=0; i<size; i++)
00069     {
00070         exp_i = expectation[i];
00071         sample[i] = round(random_gen->gaussian_mu_sigma(
00072                               exp_i,exp_i*(1-exp_i/n_spikes)) );
00073     }
00074 }
00075 
00076 void RBMRateLayer::generateSamples()
00077 {
00078     PLASSERT_MSG(random_gen,
00079                  "random_gen should be initialized before generating samples");
00080 
00081     PLCHECK_MSG(expectations_are_up_to_date, "Expectations should be computed "
00082                         "before calling generateSamples()");
00083 
00084     PLASSERT( samples.width() == size && samples.length() == batch_size );
00085 
00086     real exp_i = 0;
00087     for (int k = 0; k < batch_size; k++)
00088     {
00089         for( int i=0; i<size; i++)
00090         {
00091             exp_i = expectations(k,i);
00092             samples(k,i) = round(random_gen->gaussian_mu_sigma(
00093                                      exp_i,exp_i*(1-exp_i/n_spikes)) );
00094         }
00095     }
00096 }
00097 
00098 void RBMRateLayer::computeExpectation()
00099 {
00100     if( expectation_is_up_to_date )
00101         return;
00102 
00103     if (use_fast_approximations)
00104         for(int i=0; i<size; i++)
00105             expectation[i] = n_spikes*fastsigmoid(activation[i]);
00106     else
00107         for(int i=0; i<size; i++)
00108             expectation[i] = n_spikes*sigmoid(activation[i]);
00109     expectation_is_up_to_date = true;
00110 }
00111 
00112 void RBMRateLayer::computeExpectations()
00113 {
00114     if( expectations_are_up_to_date )
00115         return;
00116 
00117     PLASSERT( expectations.width() == size
00118               && expectations.length() == batch_size );
00119 
00120     if (use_fast_approximations)
00121         for (int k = 0; k < batch_size; k++)
00122             for(int i=0; i<size; i++)
00123                 expectations(k,i) =  n_spikes*fastsigmoid(activations(k,i));
00124     else
00125         for (int k = 0; k < batch_size; k++)
00126             for(int i=0; i<size; i++)
00127                 expectations(k,i) = n_spikes*sigmoid(activations(k,i));
00128     expectations_are_up_to_date = true;
00129 }
00130 
00131 
00132 void RBMRateLayer::fprop( const Vec& input, Vec& output ) const
00133 {
00134     PLASSERT( input.size() == input_size );
00135     output.resize( output_size );
00136     if (use_fast_approximations)
00137         for(int i=0; i<size; i++)
00138             output[i] = n_spikes*fastsigmoid(input[i]+bias[i]);
00139     else
00140         for(int i=0; i<size; i++)
00141             output[i] = n_spikes*sigmoid(input[i]+bias[i]);
00142 }
00143 
00145 // bpropUpdate //
00147 void RBMRateLayer::bpropUpdate(const Vec& input, const Vec& output,
00148                                       Vec& input_gradient,
00149                                       const Vec& output_gradient,
00150                                       bool accumulate)
00151 {
00152     PLASSERT( input.size() == size );
00153     PLASSERT( output.size() == size );
00154     PLASSERT( output_gradient.size() == size );
00155 
00156     if( accumulate )
00157     {
00158         PLASSERT_MSG( input_gradient.size() == size,
00159                       "Cannot resize input_gradient AND accumulate into it" );
00160     }
00161     else
00162     {
00163         input_gradient.resize( size );
00164         input_gradient.clear();
00165     }
00166 
00167     if( momentum != 0. )
00168         bias_inc.resize( size );
00169     
00170     for( int i=0 ; i<size ; i++ )
00171     {
00172         real output_i = output[i];
00173         real in_grad_i;
00174         in_grad_i = output_i * (1-output_i) * output_gradient[i] * n_spikes;
00175         input_gradient[i] += in_grad_i;
00176         
00177         if( momentum == 0. )
00178         {
00179             // update the bias: bias -= learning_rate * input_gradient
00180             bias[i] -= learning_rate * in_grad_i;
00181         }
00182         else
00183         {
00184             // The update rule becomes:
00185             // bias_inc = momentum * bias_inc - learning_rate * input_gradient
00186             // bias += bias_inc
00187             bias_inc[i] = momentum * bias_inc[i] - learning_rate * in_grad_i;
00188             bias[i] += bias_inc[i];
00189         }
00190     }
00191     applyBiasDecay();
00192 }
00193 
00194 void RBMRateLayer::bpropUpdate(const Mat& inputs, const Mat& outputs,
00195                              Mat& input_gradients,
00196                              const Mat& output_gradients,
00197                              bool accumulate)
00198 {
00199     PLERROR("In RBMRateLayer::bpropUpdate(): mini-batch version of bpropUpdate is not "
00200             "implemented yet");
00201 }
00202 
00204 // fpropNLL //
00206 real RBMRateLayer::fpropNLL(const Vec& target)
00207 {
00208     PLERROR("In RBMRateLayer::fpropNLL(): not implemented");
00209     PLASSERT( target.size() == input_size );
00210     real ret = 0;
00211     real target_i, activation_i;
00212     if(use_fast_approximations){
00213         for( int i=0 ; i<size ; i++ )
00214         {
00215             target_i = target[i];
00216             activation_i = activation[i];
00217             ret += tabulated_softplus(activation_i) - target_i * activation_i;
00218             // nll = - target*log(sigmoid(act)) -(1-target)*log(1-sigmoid(act))
00219             // but it is numerically unstable, so use instead the following identity:
00220             //     = target*softplus(-act) +(1-target)*(act+softplus(-act))
00221             //     = act + softplus(-act) - target*act
00222             //     = softplus(act) - target*act
00223         }
00224     } else {
00225         for( int i=0 ; i<size ; i++ )
00226         {
00227             target_i = target[i];
00228             activation_i = activation[i];
00229             ret += softplus(activation_i) - target_i * activation_i;
00230         }
00231     }
00232 
00233     return ret;
00234 }
00235 
00236 void RBMRateLayer::bpropNLL(const Vec& target, real nll,
00237                                    Vec& bias_gradient)
00238 {
00239     PLERROR("In RBMRateLayer::bpropNLL(): not implemented");
00240     computeExpectation();
00241 
00242     PLASSERT( target.size() == input_size );
00243     bias_gradient.resize( size );
00244 
00245     // bias_gradient = expectation - target
00246     substract(expectation, target, bias_gradient);
00247 }
00248 
00249 void RBMRateLayer::declareOptions(OptionList& ol)
00250 {
00251 
00252     declareOption(ol, "n_spikes", &RBMRateLayer::n_spikes,
00253                   OptionBase::buildoption,
00254                   "Maximum number of spikes for each neuron.\n");
00255 
00256     // Now call the parent class' declareOptions
00257     inherited::declareOptions(ol);
00258 }
00259 
00260 void RBMRateLayer::build_()
00261 {
00262     if( n_spikes < 1 )
00263         PLERROR("In RBMRateLayer::build_(): n_spikes should be positive");
00264 }
00265 
00266 void RBMRateLayer::build()
00267 {
00268     inherited::build();
00269     build_();
00270 }
00271 
00272 
00273 void RBMRateLayer::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00274 {
00275     inherited::makeDeepCopyFromShallowCopy(copies);
00276     //deepCopyField(tmp_softmax, copies);
00277 }
00278 
00279 real RBMRateLayer::energy(const Vec& unit_values) const
00280 {
00281     return -dot(unit_values, bias);
00282 }
00283 
00284 real RBMRateLayer::freeEnergyContribution(const Vec& unit_activations)
00285     const
00286 {
00287     PLASSERT( unit_activations.size() == size );
00288 
00289     // result = -\sum_{i=0}^{size-1} softplus(a_i)
00290     real result = 0;
00291     real* a = unit_activations.data();
00292     for (int i=0; i<size; i++)
00293     {
00294         if (use_fast_approximations)
00295             result -= n_spikes*tabulated_softplus(a[i]);
00296         else
00297             result -= n_spikes*softplus(a[i]);
00298     }
00299     return result;
00300 }
00301 
00302 void RBMRateLayer::freeEnergyContributionGradient(
00303     const Vec& unit_activations,
00304     Vec& unit_activations_gradient,
00305     real output_gradient, bool accumulate) const
00306 {
00307     PLASSERT( unit_activations.size() == size );
00308     unit_activations_gradient.resize( size );
00309     if( !accumulate ) unit_activations_gradient.clear();
00310     real* a = unit_activations.data();
00311     real* ga = unit_activations_gradient.data();
00312     for (int i=0; i<size; i++)
00313     {
00314         if (use_fast_approximations)
00315             ga[i] -= output_gradient * n_spikes *
00316                 fastsigmoid( a[i] );
00317         else
00318             ga[i] -= output_gradient * n_spikes *
00319                 sigmoid( a[i] );
00320     }
00321 }
00322 
00323 int RBMRateLayer::getConfigurationCount()
00324 {
00325     return INFINITE_CONFIGURATIONS;
00326 }
00327 
00328 void RBMRateLayer::getConfiguration(int conf_index, Vec& output)
00329 {
00330     PLERROR("In RBMRateLayer::getConfiguration(): not implemented");
00331 }
00332 
00333 
00334 } // end of namespace PLearn
00335 
00336 
00337 /*
00338   Local Variables:
00339   mode:c++
00340   c-basic-offset:4
00341   c-file-style:"stroustrup"
00342   c-file-offsets:((innamespace . 0)(inline-open . 0))
00343   indent-tabs-mode:nil
00344   fill-column:79
00345   End:
00346 */
00347 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines