PLearn 0.1
RBMBinomialLayer.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMBinomialLayer.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin & Dan Popovici
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin & Dan Popovici
00036 
00041 #include "RBMBinomialLayer.h"
00042 #include <plearn/math/TMat_maths.h>
00043 #include "RBMConnection.h"
00044 
00045 namespace PLearn {
00046 using namespace std;
00047 
00048 PLEARN_IMPLEMENT_OBJECT(
00049     RBMBinomialLayer,
00050     "Layer in an RBM formed with binomial units.",
00051     "");
00052 
00053 RBMBinomialLayer::RBMBinomialLayer( real the_learning_rate ) :
00054     inherited( the_learning_rate ),
00055     use_signed_samples( false )
00056 {
00057 }
00058 
00059 RBMBinomialLayer::RBMBinomialLayer( int the_size, real the_learning_rate ) :
00060     inherited( the_learning_rate ),
00061     use_signed_samples( false )
00062 {
00063     size = the_size;
00064     activation.resize( the_size );
00065     sample.resize( the_size );
00066     expectation.resize( the_size );
00067     bias.resize( the_size );
00068     bias_pos_stats.resize( the_size );
00069     bias_neg_stats.resize( the_size );
00070 }
00071 
00073 // generateSample //
00075 void RBMBinomialLayer::generateSample()
00076 {
00077     PLASSERT_MSG(random_gen,
00078                  "random_gen should be initialized before generating samples");
00079 
00080     PLCHECK_MSG(expectation_is_up_to_date, "Expectation should be computed "
00081             "before calling generateSample()");
00082 
00083     //random_gen->manual_seed(1827);
00084 
00085     if( use_signed_samples )
00086         for( int i=0 ; i<size ; i++ )
00087             sample[i] = 2*random_gen->binomial_sample( (expectation[i]+1)/2 )-1;
00088     else
00089         for( int i=0 ; i<size ; i++ )
00090             sample[i] = random_gen->binomial_sample( expectation[i] );
00091 }
00092 
00094 // generateSamples //
00096 void RBMBinomialLayer::generateSamples()
00097 {
00098     PLASSERT_MSG(random_gen,
00099                  "random_gen should be initialized before generating samples");
00100 
00101     PLCHECK_MSG(expectations_are_up_to_date, "Expectations should be computed "
00102             "before calling generateSamples()");
00103 
00104     PLASSERT( samples.width() == size && samples.length() == batch_size );
00105 
00106     //random_gen->manual_seed(1827);
00107 
00108     if( use_signed_samples )
00109         for (int k = 0; k < batch_size; k++) {
00110             for (int i=0 ; i<size ; i++)
00111                 samples(k, i) = 2*random_gen->binomial_sample( (expectations(k, i)+1)/2 )-1;
00112         }
00113     else
00114         for (int k = 0; k < batch_size; k++) {
00115             for (int i=0 ; i<size ; i++)
00116                 samples(k, i) = random_gen->binomial_sample( expectations(k, i) );
00117         }
00118 
00119 }
00120 
00122 // computeExpectation //
00124 void RBMBinomialLayer::computeExpectation()
00125 {
00126     if( expectation_is_up_to_date )
00127         return;
00128 
00129     if( use_signed_samples )
00130         if (use_fast_approximations)
00131             for( int i=0 ; i<size ; i++ )
00132                 expectation[i] = fasttanh( activation[i] );
00133         else
00134             for( int i=0 ; i<size ; i++ )
00135                 expectation[i] = tanh( activation[i] );
00136     else
00137         if (use_fast_approximations)
00138             for( int i=0 ; i<size ; i++ )
00139                 expectation[i] = fastsigmoid( activation[i] );
00140         else
00141             for( int i=0 ; i<size ; i++ )
00142                 expectation[i] = sigmoid( activation[i] );
00143 
00144     expectation_is_up_to_date = true;
00145 }
00146 
00148 // computeExpectations //
00150 void RBMBinomialLayer::computeExpectations()
00151 {
00152     PLASSERT( activations.length() == batch_size );
00153     if( expectations_are_up_to_date )
00154         return;
00155 
00156     PLASSERT( expectations.width() == size
00157               && expectations.length() == batch_size );
00158     if( use_signed_samples )
00159         if (use_fast_approximations)
00160             for (int k = 0; k < batch_size; k++)
00161                 for (int i = 0 ; i < size ; i++)
00162                     expectations(k, i) = fasttanh(activations(k, i));
00163         else
00164             for (int k = 0; k < batch_size; k++)
00165                 for (int i = 0 ; i < size ; i++)
00166                     expectations(k, i) = tanh(activations(k, i));
00167     else
00168         if (use_fast_approximations)
00169             for (int k = 0; k < batch_size; k++)
00170                 for (int i = 0 ; i < size ; i++)
00171                     expectations(k, i) = fastsigmoid(activations(k, i));
00172         else
00173             for (int k = 0; k < batch_size; k++)
00174                 for (int i = 0 ; i < size ; i++)
00175                     expectations(k, i) = sigmoid(activations(k, i));
00176 
00177     expectations_are_up_to_date = true;
00178 }
00179 
00181 // fprop //
00183 void RBMBinomialLayer::fprop( const Vec& input, Vec& output ) const
00184 {
00185     PLASSERT( input.size() == input_size );
00186     output.resize( output_size );
00187    
00188     if( use_signed_samples )
00189         if (use_fast_approximations)
00190             for( int i=0 ; i<size ; i++ )
00191                 output[i] = fasttanh( input[i] + bias[i] );
00192         else
00193             for( int i=0 ; i<size ; i++ )
00194                 output[i] = tanh( input[i] + bias[i] );
00195     else
00196         if (use_fast_approximations)
00197             for( int i=0 ; i<size ; i++ )
00198                 output[i] = fastsigmoid( input[i] + bias[i] );
00199         else
00200             for( int i=0 ; i<size ; i++ )
00201                 output[i] = sigmoid( input[i] + bias[i] );
00202 }
00203 
00204 void RBMBinomialLayer::fprop( const Mat& inputs, Mat& outputs )
00205 {
00206     int mbatch_size = inputs.length();
00207     PLASSERT( inputs.width() == size );
00208     outputs.resize( mbatch_size, size );
00209 
00210     if( use_signed_samples )
00211         if (use_fast_approximations)
00212             for( int k = 0; k < mbatch_size; k++ )
00213                 for( int i = 0; i < size; i++ )
00214                     outputs(k,i) = fasttanh( inputs(k,i) + bias[i] );
00215         else
00216             for( int k = 0; k < mbatch_size; k++ )
00217                 for( int i = 0; i < size; i++ )
00218                     outputs(k,i) = tanh( inputs(k,i) + bias[i] );
00219     else
00220         if (use_fast_approximations)
00221             for( int k = 0; k < mbatch_size; k++ )
00222                 for( int i = 0; i < size; i++ )
00223                     outputs(k,i) = fastsigmoid( inputs(k,i) + bias[i] );
00224         else
00225             for( int k = 0; k < mbatch_size; k++ )
00226                 for( int i = 0; i < size; i++ )
00227                     outputs(k,i) = sigmoid( inputs(k,i) + bias[i] );
00228 
00229 }
00230 
00231 void RBMBinomialLayer::fprop( const Vec& input, const Vec& rbm_bias,
00232                               Vec& output ) const
00233 {
00234     PLASSERT( input.size() == input_size );
00235     PLASSERT( rbm_bias.size() == input_size );
00236     output.resize( output_size );
00237 
00238     if( use_signed_samples )
00239         if (use_fast_approximations)
00240             for( int i=0 ; i<size ; i++ )
00241                 output[i] = fasttanh( input[i] + rbm_bias[i]);
00242         else
00243             for( int i=0 ; i<size ; i++ )
00244                 output[i] =tanh( input[i] + rbm_bias[i]);
00245     else
00246         if (use_fast_approximations)
00247             for( int i=0 ; i<size ; i++ )
00248                 output[i] = fastsigmoid( input[i] + rbm_bias[i]);
00249         else
00250             for( int i=0 ; i<size ; i++ )
00251                 output[i] = sigmoid( input[i] + rbm_bias[i]);
00252 }
00253 
00255 // bpropUpdate //
00257 void RBMBinomialLayer::bpropUpdate(const Vec& input, const Vec& output,
00258                                    Vec& input_gradient,
00259                                    const Vec& output_gradient,
00260                                    bool accumulate)
00261 {
00262     PLASSERT( input.size() == size );
00263     PLASSERT( output.size() == size );
00264     PLASSERT( output_gradient.size() == size );
00265 
00266     if( accumulate )
00267     {
00268         PLASSERT_MSG( input_gradient.size() == size,
00269                       "Cannot resize input_gradient AND accumulate into it" );
00270     }
00271     else
00272     {
00273         input_gradient.resize( size );
00274         input_gradient.clear();
00275     }
00276 
00277     if( momentum != 0. )
00278         bias_inc.resize( size );
00279 
00280     if( use_signed_samples )
00281     {
00282         for( int i=0 ; i<size ; i++ )
00283         {
00284             real output_i = output[i];
00285             real in_grad_i;
00286             in_grad_i = (1 -  output_i * output_i) * output_gradient[i];
00287             input_gradient[i] += in_grad_i;
00288 
00289             if( momentum == 0. )
00290             {
00291                 // update the bias: bias -= learning_rate * input_gradient
00292                 bias[i] -= learning_rate * in_grad_i;
00293             }
00294             else
00295             {
00296                 // The update rule becomes:
00297                 // bias_inc = momentum * bias_inc - learning_rate * input_gradient
00298                 // bias += bias_inc
00299                 bias_inc[i] = momentum * bias_inc[i] - learning_rate * in_grad_i;
00300                 bias[i] += bias_inc[i];
00301             }
00302         }
00303     }
00304     else
00305     {
00306         for( int i=0 ; i<size ; i++ )
00307         {
00308             real output_i = output[i];
00309             real in_grad_i;
00310             in_grad_i = output_i * (1-output_i) * output_gradient[i];
00311             input_gradient[i] += in_grad_i;
00312 
00313             if( momentum == 0. )
00314             {
00315                 // update the bias: bias -= learning_rate * input_gradient
00316                 bias[i] -= learning_rate * in_grad_i;
00317             }
00318             else
00319             {
00320                 // The update rule becomes:
00321                 // bias_inc = momentum * bias_inc - learning_rate * input_gradient
00322                 // bias += bias_inc
00323                 bias_inc[i] = momentum * bias_inc[i] - learning_rate * in_grad_i;
00324                 bias[i] += bias_inc[i];
00325             }
00326         }
00327     }
00328     applyBiasDecay();
00329 }
00330 
00331 void RBMBinomialLayer::bpropUpdate(const Mat& inputs, const Mat& outputs,
00332                                    Mat& input_gradients,
00333                                    const Mat& output_gradients,
00334                                    bool accumulate)
00335 {
00336     PLASSERT( inputs.width() == size );
00337     PLASSERT( outputs.width() == size );
00338     PLASSERT( output_gradients.width() == size );
00339 
00340     int mbatch_size = inputs.length();
00341     PLASSERT( outputs.length() == mbatch_size );
00342     PLASSERT( output_gradients.length() == mbatch_size );
00343 
00344     if( accumulate )
00345     {
00346         PLASSERT_MSG( input_gradients.width() == size &&
00347                 input_gradients.length() == mbatch_size,
00348                 "Cannot resize input_gradients and accumulate into it" );
00349     }
00350     else
00351     {
00352         input_gradients.resize(mbatch_size, size);
00353         input_gradients.clear();
00354     }
00355 
00356     if( momentum != 0. )
00357         bias_inc.resize( size );
00358 
00359     // TODO Can we do this more efficiently? (using BLAS)
00360 
00361     // We use the average gradient over the mini-batch.
00362     real avg_lr = learning_rate / inputs.length();
00363 
00364     if( use_signed_samples )
00365     {
00366         for (int j = 0; j < mbatch_size; j++)
00367         {
00368             for( int i=0 ; i<size ; i++ )
00369             {
00370                 real output_i = outputs(j, i);
00371                 real in_grad_i;
00372                 in_grad_i = (1 - output_i * output_i) * output_gradients(j, i);
00373                 input_gradients(j, i) += in_grad_i;
00374 
00375                 if( momentum == 0. )
00376                 {
00377                     // update the bias: bias -= learning_rate * input_gradient
00378                     bias[i] -= avg_lr * in_grad_i;
00379                 }
00380                 else
00381                 {
00382                     PLERROR("In RBMBinomialLayer:bpropUpdate - Not implemented for "
00383                             "momentum with mini-batches");
00384                     // The update rule becomes:
00385                     // bias_inc = momentum * bias_inc - learning_rate * input_gradient
00386                     // bias += bias_inc
00387                     bias_inc[i] = momentum * bias_inc[i] - learning_rate * in_grad_i;
00388                     bias[i] += bias_inc[i];
00389                 }
00390             }
00391         }
00392     }
00393     else
00394     {
00395         for (int j = 0; j < mbatch_size; j++)
00396         {
00397             for( int i=0 ; i<size ; i++ )
00398             {
00399                 real output_i = outputs(j, i);
00400                 real in_grad_i;
00401                 in_grad_i = output_i * (1-output_i) * output_gradients(j, i);
00402                 input_gradients(j, i) += in_grad_i;
00403 
00404                 if( momentum == 0. )
00405                 {
00406                     // update the bias: bias -= learning_rate * input_gradient
00407                     bias[i] -= avg_lr * in_grad_i;
00408                 }
00409                 else
00410                 {
00411                     PLERROR("In RBMBinomialLayer:bpropUpdate - Not implemented for "
00412                             "momentum with mini-batches");
00413                     // The update rule becomes:
00414                     // bias_inc = momentum * bias_inc - learning_rate * input_gradient
00415                     // bias += bias_inc
00416                     bias_inc[i] = momentum * bias_inc[i] - learning_rate * in_grad_i;
00417                     bias[i] += bias_inc[i];
00418                 }
00419             }
00420         }
00421     }
00422     applyBiasDecay();
00423 }
00424 
00425 
00427 void RBMBinomialLayer::bpropUpdate(const Vec& input, const Vec& rbm_bias,
00428                                    const Vec& output,
00429                                    Vec& input_gradient, Vec& rbm_bias_gradient,
00430                                    const Vec& output_gradient)
00431 {
00432     PLASSERT( input.size() == size );
00433     PLASSERT( rbm_bias.size() == size );
00434     PLASSERT( output.size() == size );
00435     PLASSERT( output_gradient.size() == size );
00436     input_gradient.resize( size );
00437     rbm_bias_gradient.resize( size );
00438 
00439     if( use_signed_samples )
00440     {
00441         for( int i=0 ; i<size ; i++ )
00442         {
00443             real output_i = output[i];
00444 
00445             input_gradient[i] = ( 1 - output_i * output_i ) * output_gradient[i];
00446         }
00447     }
00448     else
00449     {
00450         for( int i=0 ; i<size ; i++ )
00451         {
00452             real output_i = output[i];
00453             input_gradient[i] = output_i * (1-output_i) * output_gradient[i];
00454         }
00455     }
00456 
00457     rbm_bias_gradient << input_gradient;
00458 }
00459 
00460 real RBMBinomialLayer::fpropNLL(const Vec& target)
00461 {
00462     PLASSERT( target.size() == input_size );
00463     real ret = 0;
00464     real target_i, activation_i;
00465     if( use_signed_samples )
00466     {
00467         if(use_fast_approximations){
00468             for( int i=0 ; i<size ; i++ )
00469             {
00470                 target_i = (target[i]+1)/2;
00471                 activation_i = 2*activation[i];
00472 
00473                 ret += tabulated_softplus(activation_i) - target_i * activation_i;
00474                 // nll = - target*log(sigmoid(act)) -(1-target)*log(1-sigmoid(act))
00475                 // but it is numerically unstable, so use instead the following identity:
00476                 //     = target*softplus(-act) +(1-target)*(act+softplus(-act))
00477                 //     = act + softplus(-act) - target*act
00478                 //     = softplus(act) - target*act
00479             }
00480         } else {
00481             for( int i=0 ; i<size ; i++ )
00482             {
00483                 target_i = (target[i]+1)/2;
00484                 activation_i = 2*activation[i];
00485                 ret += softplus(activation_i) - target_i * activation_i;
00486             }
00487         }
00488     }
00489     else
00490     {
00491         if(use_fast_approximations){
00492             for( int i=0 ; i<size ; i++ )
00493             {
00494                 target_i = target[i];
00495                 activation_i = activation[i];
00496                 ret += tabulated_softplus(activation_i) - target_i * activation_i;
00497                 // nll = - target*log(sigmoid(act)) -(1-target)*log(1-sigmoid(act))
00498                 // but it is numerically unstable, so use instead the following identity:
00499                 //     = target*softplus(-act) +(1-target)*(act+softplus(-act))
00500                 //     = act + softplus(-act) - target*act
00501                 //     = softplus(act) - target*act
00502             }
00503         } else {
00504             for( int i=0 ; i<size ; i++ )
00505             {
00506                 target_i = target[i];
00507                 activation_i = activation[i];
00508                 ret += softplus(activation_i) - target_i * activation_i;
00509             }
00510         }
00511     }
00512 
00513     return ret;
00514 }
00515 
00516 real RBMBinomialLayer::fpropNLL(const Vec& target, const Vec& cost_weights)
00517 {
00518     PLASSERT( target.size() == input_size );
00519     PLASSERT( target.size() == cost_weights.size() );
00520     PLASSERT (cost_weights.size() == size );
00521 
00522     real ret = 0;
00523     real target_i, activation_i;
00524     if( use_signed_samples )
00525     {
00526         if(use_fast_approximations){
00527             for( int i=0 ; i<size ; i++ )
00528             {
00529                 if(cost_weights[i] != 0)
00530                 {
00531                     target_i = (target[i]+1)/2;
00532                     activation_i = 2*activation[i];
00533 
00534                     ret += cost_weights[i]*(tabulated_softplus(activation_i) - target_i * activation_i);
00535                 }
00536                 // nll = - target*log(sigmoid(act)) -(1-target)*log(1-sigmoid(act))
00537                 // but it is numerically unstable, so use instead the following identity:
00538                 //     = target*softplus(-act) +(1-target)*(act+softplus(-act))
00539                 //     = act + softplus(-act) - target*act
00540                 //     = softplus(act) - target*act
00541             }
00542         } else {
00543             for( int i=0 ; i<size ; i++ )
00544             {
00545                 if(cost_weights[i] != 0)
00546                 {
00547                     target_i = (target[i]+1)/2;
00548                     activation_i = 2*activation[i];
00549                     ret += cost_weights[i]*(softplus(activation_i) - target_i * activation_i);
00550                 }
00551             }
00552         }
00553     }
00554     else
00555     {
00556         if(use_fast_approximations){
00557             for( int i=0 ; i<size ; i++ )
00558             {
00559                 if(cost_weights[i] != 0)
00560                 {
00561                     target_i = target[i];
00562                     activation_i = activation[i];
00563                     ret += cost_weights[i]*(tabulated_softplus(activation_i) - target_i * activation_i);
00564                 }
00565                 // nll = - target*log(sigmoid(act)) -(1-target)*log(1-sigmoid(act))
00566                 // but it is numerically unstable, so use instead the following identity:
00567                 //     = target*softplus(-act) +(1-target)*(act+softplus(-act))
00568                 //     = act + softplus(-act) - target*act
00569                 //     = softplus(act) - target*act
00570             }
00571         } else {
00572             for( int i=0 ; i<size ; i++ )
00573             {
00574                 if(cost_weights[i] != 0)
00575                 {
00576                     target_i = target[i];
00577                     activation_i = activation[i];
00578                     ret += cost_weights[i]*(softplus(activation_i) - target_i * activation_i);
00579                 }
00580             }
00581         }
00582     }
00583 
00584     return ret;
00585 }
00586 
00587 
00588 void RBMBinomialLayer::fpropNLL(const Mat& targets, const Mat& costs_column)
00589 {
00590     PLASSERT( targets.width() == input_size );
00591     PLASSERT( targets.length() == batch_size );
00592     PLASSERT( costs_column.width() == 1 );
00593     PLASSERT( costs_column.length() == batch_size );
00594 
00595     if( use_signed_samples )
00596     {
00597         for (int k=0;k<batch_size;k++) // loop over minibatch
00598         {
00599             real nll = 0;
00600             real* activation = activations[k];
00601             real* target = targets[k];
00602             if(use_fast_approximations){
00603                 for( int i=0 ; i<size ; i++ ) // loop over outputs
00604                 {
00605                     nll += tabulated_softplus(2*activation[i])
00606                         - (target[i]+1) * activation[i] ;
00607                 }
00608             } else {
00609                 for( int i=0 ; i<size ; i++ ) // loop over outputs
00610                 {
00611                     nll += softplus(2*activation[i]) - (target[i]+1)*activation[i] ;
00612                 }
00613             }
00614             costs_column(k,0) = nll;
00615         }
00616     }
00617     else
00618     {
00619         for (int k=0;k<batch_size;k++) // loop over minibatch
00620         {
00621             real nll = 0;
00622             real* activation = activations[k];
00623             real* target = targets[k];
00624             if(use_fast_approximations){
00625                 for( int i=0 ; i<size ; i++ ) // loop over outputs
00626                 {
00627                     nll += tabulated_softplus(activation[i])
00628                         -target[i] * activation[i] ;
00629                 }
00630             } else {
00631                 for( int i=0 ; i<size ; i++ ) // loop over outputs
00632                 {
00633                     nll += softplus(activation[i]) - target[i] * activation[i] ;
00634                 }
00635             }
00636             costs_column(k,0) = nll;
00637         }
00638     }
00639 }
00640 
00641 void RBMBinomialLayer::bpropNLL(const Vec& target, real nll, Vec& bias_gradient)
00642 {
00643     computeExpectation();
00644 
00645     PLASSERT( target.size() == input_size );
00646     bias_gradient.resize( size );
00647 
00648     // bias_gradient = expectation - target
00649     substract(expectation, target, bias_gradient);
00650 }
00651 
00652 void RBMBinomialLayer::bpropNLL(const Mat& targets, const Mat& costs_column,
00653                                 Mat& bias_gradients)
00654 {
00655     computeExpectations();
00656 
00657     PLASSERT( targets.width() == input_size );
00658     PLASSERT( targets.length() == batch_size );
00659     PLASSERT( costs_column.width() == 1 );
00660     PLASSERT( costs_column.length() == batch_size );
00661     bias_gradients.resize( batch_size, size );
00662 
00663     // bias_gradients = expectations - targets
00664     substract(expectations, targets, bias_gradients);
00665 }
00666 
00667 void RBMBinomialLayer::declareOptions(OptionList& ol)
00668 {
00669 
00670     declareOption(ol, "use_signed_samples", &RBMBinomialLayer::use_signed_samples,
00671                   OptionBase::buildoption,
00672                   "Indication that samples should be in {-1,1}, not {0,1}.\n");
00673 
00674     // Now call the parent class' declareOptions
00675     inherited::declareOptions(ol);
00676 }
00677 
00678 void RBMBinomialLayer::build_()
00679 {
00680 }
00681 
00682 void RBMBinomialLayer::build()
00683 {
00684     inherited::build();
00685     build_();
00686 }
00687 
00688 
00689 void RBMBinomialLayer::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00690 {
00691     inherited::makeDeepCopyFromShallowCopy(copies);
00692 }
00693 
00694 real RBMBinomialLayer::energy(const Vec& unit_values) const
00695 {
00696     return -dot(unit_values, bias);
00697 }
00698 
00699 real RBMBinomialLayer::freeEnergyContribution(const Vec& unit_activations)
00700     const
00701 {
00702     PLASSERT( unit_activations.size() == size );
00703 
00704     // result = -\sum_{i=0}^{size-1} softplus(a_i)
00705     real result = 0;
00706     real* a = unit_activations.data();
00707     if( use_signed_samples )
00708     {
00709         for (int i=0; i<size; i++)
00710         {
00711             if (use_fast_approximations)
00712                 result -= tabulated_softplus(2*a[i]) - a[i];
00713             else
00714                 result -= softplus(2*a[i]) - a[i];
00715         }
00716     }
00717     else
00718     {
00719         for (int i=0; i<size; i++)
00720         {
00721             if (use_fast_approximations)
00722                 result -= tabulated_softplus(a[i]);
00723             else
00724                 result -= softplus(a[i]);
00725         }
00726     }
00727     return result;
00728 }
00729 
00730 void RBMBinomialLayer::freeEnergyContributionGradient(
00731     const Vec& unit_activations,
00732     Vec& unit_activations_gradient,
00733     real output_gradient, bool accumulate) const
00734 {
00735     PLASSERT( unit_activations.size() == size );
00736     unit_activations_gradient.resize( size );
00737     if( !accumulate ) unit_activations_gradient.clear();
00738     real* a = unit_activations.data();
00739     real* ga = unit_activations_gradient.data();
00740     if( use_signed_samples )
00741     {
00742         for (int i=0; i<size; i++)
00743         {
00744             if (use_fast_approximations)
00745                 ga[i] -= output_gradient *
00746                     ( fasttanh( a[i] ) );
00747             else
00748                 ga[i] -= output_gradient *
00749                     ( tanh( a[i] ) );
00750         }
00751     }
00752     else
00753     {
00754         for (int i=0; i<size; i++)
00755         {
00756             if (use_fast_approximations)
00757                 ga[i] -= output_gradient *
00758                     fastsigmoid( a[i] );
00759             else
00760                 ga[i] -= output_gradient *
00761                     sigmoid( a[i] );
00762         }
00763     }
00764 }
00765 
00766 int RBMBinomialLayer::getConfigurationCount()
00767 {
00768     return size < 31 ? 1<<size : INFINITE_CONFIGURATIONS;
00769 }
00770 
00771 void RBMBinomialLayer::getConfiguration(int conf_index, Vec& output)
00772 {
00773     PLASSERT( output.length() == size );
00774     PLASSERT( conf_index >= 0 && conf_index < getConfigurationCount() );
00775 
00776     if( use_signed_samples )
00777     {
00778         for ( int i = 0; i < size; ++i ) {
00779             output[i] = 2 * (conf_index & 1) - 1;
00780             conf_index >>= 1;
00781         }
00782     }
00783     else
00784     {
00785         for ( int i = 0; i < size; ++i ) {
00786             output[i] = conf_index & 1;
00787             conf_index >>= 1;
00788         }
00789     }
00790 }
00791 
00792 } // end of namespace PLearn
00793 
00794 
00795 /*
00796   Local Variables:
00797   mode:c++
00798   c-basic-offset:4
00799   c-file-style:"stroustrup"
00800   c-file-offsets:((innamespace . 0)(inline-open . 0))
00801   indent-tabs-mode:nil
00802   fill-column:79
00803   End:
00804 */
00805 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines