PLearn 0.1
TopDownAsymetricDeepNetwork.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // TopDownAsymetricDeepNetwork.cc
00004 //
00005 // Copyright (C) 2008 Hugo Larochelle
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Hugo Larochelle
00036 
00040 #define PL_LOG_MODULE_NAME "TopDownAsymetricDeepNetwork"
00041 #include <plearn/io/pl_log.h>
00042 
00043 #include "TopDownAsymetricDeepNetwork.h"
00044 #include <plearn/vmat/VMat_computeNearestNeighbors.h>
00045 #include <plearn/vmat/GetInputVMatrix.h>
00046 #include <plearn_learners/online/RBMMixedLayer.h>
00047 #include <plearn_learners/online/RBMMixedConnection.h>
00048 
00049 namespace PLearn {
00050 using namespace std;
00051 
00052 PLEARN_IMPLEMENT_OBJECT(
00053     TopDownAsymetricDeepNetwork,
00054     "Neural net, trained layer-wise in a greedy but focused fashion using autoassociators/RBMs and a supervised non-parametric gradient.",
00055     "It is highly inspired by the StackedFocusedAutoassociators class,\n"
00056     "and can use use the same RBMLayer and RBMConnection components.\n"
00057     );
00058 
00059 TopDownAsymetricDeepNetwork::TopDownAsymetricDeepNetwork() :
00060     cd_learning_rate( 0. ),
00061     cd_decrease_ct( 0. ),
00062     greedy_learning_rate( 0. ),
00063     greedy_decrease_ct( 0. ),
00064     fine_tuning_learning_rate( 0. ),
00065     fine_tuning_decrease_ct( 0. ),
00066     n_classes( -1 ),
00067     output_weights_l1_penalty_factor(0),
00068     output_weights_l2_penalty_factor(0),
00069     fraction_of_masked_inputs( 0 ),
00070     n_layers( 0 ),
00071     currently_trained_layer( 0 )
00072 {
00073     // random_gen will be initialized in PLearner::build_()
00074     random_gen = new PRandom();
00075     nstages = 0;
00076 }
00077 
00078 void TopDownAsymetricDeepNetwork::declareOptions(OptionList& ol)
00079 {
00080     declareOption(ol, "cd_learning_rate", 
00081                   &TopDownAsymetricDeepNetwork::cd_learning_rate,
00082                   OptionBase::buildoption,
00083                   "The learning rate used during the RBM "
00084                   "contrastive divergence training.\n");
00085 
00086     declareOption(ol, "cd_decrease_ct", 
00087                   &TopDownAsymetricDeepNetwork::cd_decrease_ct,
00088                   OptionBase::buildoption,
00089                   "The decrease constant of the learning rate used during "
00090                   "the RBMs contrastive\n"
00091                   "divergence training. When a hidden layer has finished "
00092                   "its training,\n"
00093                   "the learning rate is reset to it's initial value.\n");
00094 
00095     declareOption(ol, "greedy_learning_rate", 
00096                   &TopDownAsymetricDeepNetwork::greedy_learning_rate,
00097                   OptionBase::buildoption,
00098                   "The learning rate used during the autoassociator "
00099                   "gradient descent training.\n");
00100 
00101     declareOption(ol, "greedy_decrease_ct", 
00102                   &TopDownAsymetricDeepNetwork::greedy_decrease_ct,
00103                   OptionBase::buildoption,
00104                   "The decrease constant of the learning rate used during "
00105                   "the autoassociator\n"
00106                   "gradient descent training. When a hidden layer has finished "
00107                   "its training,\n"
00108                   "the learning rate is reset to it's initial value.\n");
00109 
00110     declareOption(ol, "fine_tuning_learning_rate", 
00111                   &TopDownAsymetricDeepNetwork::fine_tuning_learning_rate,
00112                   OptionBase::buildoption,
00113                   "The learning rate used during the fine tuning "
00114                   "gradient descent.\n");
00115 
00116     declareOption(ol, "fine_tuning_decrease_ct", 
00117                   &TopDownAsymetricDeepNetwork::fine_tuning_decrease_ct,
00118                   OptionBase::buildoption,
00119                   "The decrease constant of the learning rate used during "
00120                   "fine tuning\n"
00121                   "gradient descent.\n");
00122 
00123     declareOption(ol, "training_schedule", 
00124                   &TopDownAsymetricDeepNetwork::training_schedule,
00125                   OptionBase::buildoption,
00126                   "Number of examples to use during each phase "
00127                   "of greedy pre-training.\n"
00128                   "The number of fine-tunig steps is defined by nstages.\n"
00129         );
00130 
00131     declareOption(ol, "layers", &TopDownAsymetricDeepNetwork::layers,
00132                   OptionBase::buildoption,
00133                   "The layers of units in the network. The first element\n"
00134                   "of this vector should be the input layer and the\n"
00135                   "subsequent elements should be the hidden layers. The\n"
00136                   "output layer should not be included in layers.\n"
00137                   "These layers will be used only for bottom up inference.\n");
00138 
00139     declareOption(ol, "top_down_layers", 
00140                   &TopDownAsymetricDeepNetwork::top_down_layers,
00141                   OptionBase::buildoption,
00142                   "The layers of units used for top down inference during\n"
00143                   "greedy training of an RBM/autoencoder.");
00144 
00145     declareOption(ol, "connections", &TopDownAsymetricDeepNetwork::connections,
00146                   OptionBase::buildoption,
00147                   "The weights of the connections between the layers");
00148 
00149     declareOption(ol, "reconstruction_connections", 
00150                   &TopDownAsymetricDeepNetwork::reconstruction_connections,
00151                   OptionBase::buildoption,
00152                   "The reconstruction weights of the autoassociators");
00153 
00154     declareOption(ol, "n_classes", 
00155                   &TopDownAsymetricDeepNetwork::n_classes,
00156                   OptionBase::buildoption,
00157                   "Number of classes.");
00158 
00159     declareOption(ol, "output_weights_l1_penalty_factor", 
00160                   &TopDownAsymetricDeepNetwork::output_weights_l1_penalty_factor,
00161                   OptionBase::buildoption,
00162                   "Output weights l1_penalty_factor.\n");
00163 
00164     declareOption(ol, "output_weights_l2_penalty_factor", 
00165                   &TopDownAsymetricDeepNetwork::output_weights_l2_penalty_factor,
00166                   OptionBase::buildoption,
00167                   "Output weights l2_penalty_factor.\n");
00168 
00169     declareOption(ol, "fraction_of_masked_inputs", 
00170                   &TopDownAsymetricDeepNetwork::fraction_of_masked_inputs,
00171                   OptionBase::buildoption,
00172                   "Fraction of the autoassociators' random input components "
00173                   "that are\n"
00174                   "masked, i.e. unsused to reconstruct the input.\n");
00175 
00176     declareOption(ol, "greedy_stages", 
00177                   &TopDownAsymetricDeepNetwork::greedy_stages,
00178                   OptionBase::learntoption,
00179                   "Number of training samples seen in the different greedy "
00180                   "phases.\n"
00181         );
00182 
00183     declareOption(ol, "n_layers", &TopDownAsymetricDeepNetwork::n_layers,
00184                   OptionBase::learntoption,
00185                   "Number of layers"
00186         );
00187 
00188     declareOption(ol, "final_module", 
00189                   &TopDownAsymetricDeepNetwork::final_module,
00190                   OptionBase::learntoption,
00191                   "Output layer of neural net"
00192         );
00193 
00194     declareOption(ol, "final_cost", 
00195                   &TopDownAsymetricDeepNetwork::final_cost,
00196                   OptionBase::learntoption,
00197                   "Cost on output layer of neural net"
00198         );
00199 
00200     // Now call the parent class' declareOptions
00201     inherited::declareOptions(ol);
00202 }
00203 
00204 void TopDownAsymetricDeepNetwork::build_()
00205 {
00206     // ### This method should do the real building of the object,
00207     // ### according to set 'options', in *any* situation.
00208     // ### Typical situations include:
00209     // ###  - Initial building of an object from a few user-specified options
00210     // ###  - Building of a "reloaded" object: i.e. from the complete set of
00211     // ###    all serialised options.
00212     // ###  - Updating or "re-building" of an object after a few "tuning"
00213     // ###    options have been modified.
00214     // ### You should assume that the parent class' build_() has already been
00215     // ### called.
00216 
00217     MODULE_LOG << "build_() called" << endl;
00218 
00219     if(inputsize_ > 0 && targetsize_ > 0)
00220     {
00221         // Initialize some learnt variables
00222         n_layers = layers.length();
00223         
00224         if( n_classes <= 0 )
00225             PLERROR("TopDownAsymetricDeepNetwork::build_() - \n"
00226                     "n_classes should be > 0.\n");
00227 
00228         if( weightsize_ > 0 )
00229             PLERROR("TopDownAsymetricDeepNetwork::build_() - \n"
00230                     "usage of weighted samples (weight size > 0) is not\n"
00231                     "implemented yet.\n");
00232 
00233         if( training_schedule.length() != n_layers-1 )        
00234             PLERROR("TopDownAsymetricDeepNetwork::build_() - \n"
00235                     "training_schedule should have %d elements.\n",
00236                     n_layers-1);
00237         
00238         if(greedy_stages.length() == 0)
00239         {
00240             greedy_stages.resize(n_layers-1);
00241             greedy_stages.clear();
00242         }
00243 
00244         if(stage > 0)
00245             currently_trained_layer = n_layers;
00246         else
00247         {            
00248             currently_trained_layer = n_layers-1;
00249             while(currently_trained_layer>1
00250                   && greedy_stages[currently_trained_layer-1] <= 0)
00251                 currently_trained_layer--;
00252         }
00253 
00254         build_layers_and_connections();
00255 
00256         if( !final_module || !final_cost )
00257             build_output_layer_and_cost();
00258     }
00259 }
00260 
00261 void TopDownAsymetricDeepNetwork::build_output_layer_and_cost()
00262 {
00263     GradNNetLayerModule* gnl = new GradNNetLayerModule();
00264     gnl->input_size = layers[n_layers-1]->size;
00265     gnl->output_size = n_classes;
00266     gnl->L1_penalty_factor = output_weights_l1_penalty_factor;
00267     gnl->L2_penalty_factor = output_weights_l2_penalty_factor;
00268     gnl->random_gen = random_gen;
00269     gnl->build();
00270 
00271     SoftmaxModule* sm = new SoftmaxModule();
00272     sm->input_size = n_classes;
00273     sm->random_gen = random_gen;
00274     sm->build();
00275 
00276     ModuleStackModule* msm = new ModuleStackModule();
00277     msm->modules.resize(2);
00278     msm->modules[0] = gnl;
00279     msm->modules[1] = sm;
00280     msm->random_gen = random_gen;
00281     msm->build();
00282     final_module = msm;
00283 
00284     final_module->forget();
00285 
00286     NLLCostModule* nll = new NLLCostModule();
00287     nll->input_size = n_classes;
00288     nll->random_gen = random_gen;
00289     nll->build();
00290     
00291     ClassErrorCostModule* class_error = new ClassErrorCostModule();
00292     class_error->input_size = n_classes;
00293     class_error->random_gen = random_gen;
00294     class_error->build();
00295 
00296     CombiningCostsModule* comb_costs = new CombiningCostsModule();
00297     comb_costs->cost_weights.resize(2);
00298     comb_costs->cost_weights[0] = 1;
00299     comb_costs->cost_weights[1] = 0;
00300     comb_costs->sub_costs.resize(2);
00301     comb_costs->sub_costs[0] = nll;
00302     comb_costs->sub_costs[1] = class_error;
00303     comb_costs->build();
00304 
00305     final_cost = comb_costs;
00306     final_cost->forget();
00307 }
00308 
00309 void TopDownAsymetricDeepNetwork::build_layers_and_connections()
00310 {
00311     MODULE_LOG << "build_layers_and_connections() called" << endl;
00312 
00313     if( connections.length() != n_layers-1 )
00314         PLERROR("TopDownAsymetricDeepNetwork::build_layers_and_connections() - \n"
00315                 "there should be %d connections.\n",
00316                 n_layers-1);
00317 
00318     if( !fast_exact_is_equal( greedy_learning_rate, 0 ) 
00319         && reconstruction_connections.length() != n_layers-1 )
00320         PLERROR("TopDownAsymetricDeepNetwork::build_layers_and_connections() - \n"
00321                 "there should be %d reconstruction connections.\n",
00322                 n_layers-1);
00323     
00324     if(  !( reconstruction_connections.length() == 0
00325             || reconstruction_connections.length() == n_layers-1 ) )
00326         PLERROR("TopDownAsymetricDeepNetwork::build_layers_and_connections() - \n"
00327                 "there should be either 0 or %d reconstruction connections.\n",
00328                 n_layers-1);
00329     
00330     
00331     if(top_down_layers.length() != n_layers 
00332        && top_down_layers.length() != 0)
00333         PLERROR("TopDownAsymetricDeepNetwork::build_layers_and_connections() - \n"
00334                 "there should be either 0 of %d top_down_layers.\n",
00335                 n_layers);
00336         
00337     if(layers[0]->size != inputsize_)
00338         PLERROR("TopDownAsymetricDeepNetwork::build_layers_and_connections() - \n"
00339                 "layers[0] should have a size of %d.\n",
00340                 inputsize_);
00341     
00342     if(top_down_layers[0]->size != inputsize_)
00343         PLERROR("TopDownAsymetricDeepNetwork::build_layers_and_connections() - \n"
00344                 "top_down_layers[0] should have a size of %d.\n",
00345                 inputsize_);
00346     
00347     if( fraction_of_masked_inputs < 0 )
00348         PLERROR("TopDownAsymetricDeepNetwork::build_()"
00349                 " - \n"
00350                 "fraction_of_masked_inputs should be > or equal to 0.\n");
00351 
00352     activations.resize( n_layers );
00353     expectations.resize( n_layers );
00354     activation_gradients.resize( n_layers );
00355     expectation_gradients.resize( n_layers );
00356 
00357     for( int i=0 ; i<n_layers-1 ; i++ )
00358     {
00359         if( layers[i]->size != connections[i]->down_size )
00360             PLERROR("TopDownAsymetricDeepNetwork::build_layers_and_connections() "
00361                     "- \n"
00362                     "connections[%i] should have a down_size of %d.\n",
00363                     i, layers[i]->size);
00364 
00365         if( top_down_layers[i]->size != connections[i]->down_size )
00366             PLERROR("TopDownAsymetricDeepNetwork::build_layers_and_connections() "
00367                     "- \n"
00368                     "top_down_layers[%i] should have a size of %d.\n",
00369                     i, connections[i]->down_size);
00370 
00371         if( connections[i]->up_size != layers[i+1]->size )
00372             PLERROR("TopDownAsymetricDeepNetwork::build_layers_and_connections() "
00373                     "- \n"
00374                     "connections[%i] should have a up_size of %d.\n",
00375                     i, layers[i+1]->size);
00376 
00377         if( connections[i]->up_size != top_down_layers[i+1]->size )
00378             PLERROR("TopDownAsymetricDeepNetwork::build_layers_and_connections() "
00379                     "- \n"
00380                     "top_down_layers[%i] should have a up_size of %d.\n",
00381                     i, connections[i]->up_size);
00382 
00383         if( reconstruction_connections.length() != 0 )
00384         {
00385             if( layers[i+1]->size != reconstruction_connections[i]->down_size )
00386                 PLERROR("TopDownAsymetricDeepNetwork::build_layers_and_connections() "
00387                         "- \n"
00388                         "recontruction_connections[%i] should have a down_size of "
00389                             "%d.\n",
00390                         i, layers[i+1]->size);
00391             
00392             if( reconstruction_connections[i]->up_size != layers[i]->size )
00393                 PLERROR("TopDownAsymetricDeepNetwork::build_layers_and_connections() "
00394                         "- \n"
00395                         "recontruction_connections[%i] should have a up_size of "
00396                         "%d.\n",
00397                         i, layers[i]->size);
00398         }
00399         
00400         if( !(layers[i]->random_gen) )
00401         {
00402             layers[i]->random_gen = random_gen;
00403             layers[i]->forget();
00404         }
00405 
00406         if( !(top_down_layers[i]->random_gen) )
00407         {
00408             top_down_layers[i]->random_gen = random_gen;
00409             top_down_layers[i]->forget();
00410         }
00411 
00412         if( !(connections[i]->random_gen) )
00413         {
00414             connections[i]->random_gen = random_gen;
00415             connections[i]->forget();
00416         }
00417 
00418         if( reconstruction_connections.length() != 0
00419             && !(reconstruction_connections[i]->random_gen) )
00420         {
00421             reconstruction_connections[i]->random_gen = random_gen;
00422             reconstruction_connections[i]->forget();
00423         }        
00424 
00425         activations[i].resize( layers[i]->size );
00426         expectations[i].resize( layers[i]->size );
00427         activation_gradients[i].resize( layers[i]->size );
00428         expectation_gradients[i].resize( layers[i]->size );
00429     }
00430 
00431     if( !(layers[n_layers-1]->random_gen) )
00432     {
00433         layers[n_layers-1]->random_gen = random_gen;
00434         layers[n_layers-1]->forget();
00435     }
00436     if( !(top_down_layers[n_layers-1]->random_gen) )
00437     {
00438         top_down_layers[n_layers-1]->random_gen = random_gen;
00439         top_down_layers[n_layers-1]->forget();
00440     }
00441     activations[n_layers-1].resize( layers[n_layers-1]->size );
00442     expectations[n_layers-1].resize( layers[n_layers-1]->size );
00443     activation_gradients[n_layers-1].resize( layers[n_layers-1]->size );
00444     expectation_gradients[n_layers-1].resize( layers[n_layers-1]->size );
00445 }
00446 
00447 // ### Nothing to add here, simply calls build_
00448 void TopDownAsymetricDeepNetwork::build()
00449 {
00450     inherited::build();
00451     build_();
00452 }
00453 
00454 
00455 void TopDownAsymetricDeepNetwork::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00456 {
00457     inherited::makeDeepCopyFromShallowCopy(copies);
00458 
00459     // deepCopyField(, copies);
00460 
00461     // Public options
00462     deepCopyField(training_schedule, copies);
00463     deepCopyField(layers, copies);
00464     deepCopyField(top_down_layers, copies);
00465     deepCopyField(connections, copies);
00466     deepCopyField(reconstruction_connections, copies);
00467 
00468     // Protected options
00469     deepCopyField(activations, copies);
00470     deepCopyField(expectations, copies);
00471     deepCopyField(activation_gradients, copies);
00472     deepCopyField(expectation_gradients, copies);
00473     deepCopyField(reconstruction_activations, copies);
00474     deepCopyField(reconstruction_activation_gradients, copies);
00475     deepCopyField(reconstruction_expectation_gradients, copies);
00476     deepCopyField(input_representation, copies);
00477     deepCopyField(masked_autoassociator_input, copies);
00478     deepCopyField(autoassociator_input_indices, copies);
00479     deepCopyField(pos_down_val, copies);
00480     deepCopyField(pos_up_val, copies);
00481     deepCopyField(neg_down_val, copies);
00482     deepCopyField(neg_up_val, copies);
00483     deepCopyField(final_cost_input, copies);
00484     deepCopyField(final_cost_value, copies);
00485     deepCopyField(final_cost_gradient, copies);
00486     deepCopyField(greedy_stages, copies);
00487     deepCopyField(final_module, copies);
00488     deepCopyField(final_cost, copies);
00489 }
00490 
00491 
00492 int TopDownAsymetricDeepNetwork::outputsize() const
00493 {
00494 //    if(currently_trained_layer < n_layers)
00495 //        return layers[currently_trained_layer]->size;
00496     return n_classes;
00497 }
00498 
00499 void TopDownAsymetricDeepNetwork::forget()
00500 {
00501     inherited::forget();
00502 
00503     for( int i=0 ; i<n_layers ; i++ )
00504         layers[i]->forget();
00505     
00506     for( int i=0 ; i<n_layers ; i++ )
00507         top_down_layers[i]->forget();
00508     
00509     for( int i=0 ; i<n_layers-1 ; i++ )
00510         connections[i]->forget();
00511     
00512     for( int i=0; i<reconstruction_connections.length(); i++)
00513         reconstruction_connections[i]->forget();
00514 
00515     build_output_layer_and_cost();
00516 
00517     stage = 0;
00518     greedy_stages.clear();
00519 }
00520 
00521 void TopDownAsymetricDeepNetwork::train()
00522 {
00523     MODULE_LOG << "train() called " << endl;
00524     MODULE_LOG << "  training_schedule = " << training_schedule << endl;
00525 
00526     Vec input( inputsize() );
00527     Vec target( targetsize() );
00528     real weight; // unused
00529 
00530     TVec<string> train_cost_names = getTrainCostNames() ;
00531     Vec train_costs( train_cost_names.length() );
00532     train_costs.fill(MISSING_VALUE) ;
00533 
00534     int nsamples = train_set->length();
00535     int sample;
00536 
00537     PP<ProgressBar> pb;
00538 
00539     // clear stats of previous epoch
00540     train_stats->forget();
00541 
00542     int init_stage;
00543 
00544     /***** initial greedy training *****/
00545     for( int i=0 ; i<n_layers-1 ; i++ )
00546     {
00547         MODULE_LOG << "Training connection weights between layers " << i
00548             << " and " << i+1 << endl;
00549 
00550         int end_stage = training_schedule[i];
00551         int* this_stage = greedy_stages.subVec(i,1).data();
00552         init_stage = *this_stage;
00553 
00554         MODULE_LOG << "  stage = " << *this_stage << endl;
00555         MODULE_LOG << "  end_stage = " << end_stage << endl;
00556         MODULE_LOG << "  greedy_learning_rate = " << greedy_learning_rate << endl;
00557         MODULE_LOG << "  cd_learning_rate = " << cd_learning_rate << endl;
00558 
00559         if( report_progress && *this_stage < end_stage )
00560             pb = new ProgressBar( "Training layer "+tostring(i)
00561                                   +" of "+classname(),
00562                                   end_stage - init_stage );
00563 
00564         train_costs.fill(MISSING_VALUE);
00565         reconstruction_activations.resize(layers[i]->size);
00566         reconstruction_activation_gradients.resize(layers[i]->size);
00567         reconstruction_expectation_gradients.resize(layers[i]->size);
00568 
00569         input_representation.resize(layers[i]->size);
00570         pos_down_val.resize(layers[i]->size);
00571         pos_up_val.resize(layers[i+1]->size);
00572         neg_down_val.resize(layers[i]->size);
00573         neg_up_val.resize(layers[i+1]->size);
00574         if( fraction_of_masked_inputs > 0 )
00575         {
00576             masked_autoassociator_input.resize(layers[i]->size);
00577             autoassociator_input_indices.resize(layers[i]->size);
00578             for( int j=0 ; j < autoassociator_input_indices.length() ; j++ )
00579                 autoassociator_input_indices[j] = j;
00580         }
00581 
00582         for( ; *this_stage<end_stage ; (*this_stage)++ )
00583         {
00584             
00585             sample = *this_stage % nsamples;
00586             train_set->getExample(sample, input, target, weight);
00587             greedyStep( input, target, i, train_costs, *this_stage);
00588             train_stats->update( train_costs );
00589 
00590             if( pb )
00591                 pb->update( *this_stage - init_stage + 1 );
00592         }
00593     }
00594 
00595     /***** fine-tuning by gradient descent *****/
00596     if( stage < nstages )
00597     {
00598 
00599         MODULE_LOG << "Fine-tuning all parameters, by gradient descent" << endl;
00600         MODULE_LOG << "  stage = " << stage << endl;
00601         MODULE_LOG << "  nstages = " << nstages << endl;
00602         MODULE_LOG << "  fine_tuning_learning_rate = " << 
00603             fine_tuning_learning_rate << endl;
00604 
00605         init_stage = stage;
00606         if( report_progress && stage < nstages )
00607             pb = new ProgressBar( "Fine-tuning parameters of all layers of "
00608                                   + classname(),
00609                                   nstages - init_stage );
00610 
00611         setLearningRate( fine_tuning_learning_rate );
00612         train_costs.fill(MISSING_VALUE);
00613 
00614         final_cost_input.resize(n_classes);
00615         final_cost_value.resize(2); // Should be resized anyways
00616         final_cost_gradient.resize(n_classes);
00617         input_representation.resize(layers.last()->size);
00618         for( ; stage<nstages ; stage++ )
00619         {
00620             sample = stage % nsamples;
00621             if( !fast_exact_is_equal( fine_tuning_decrease_ct, 0. ) )
00622                 setLearningRate( fine_tuning_learning_rate
00623                                  / (1. + fine_tuning_decrease_ct * stage ) );
00624 
00625             train_set->getExample( sample, input, target, weight );
00626 
00627             fineTuningStep( input, target, train_costs);
00628             train_stats->update( train_costs );
00629 
00630             if( pb )
00631                 pb->update( stage - init_stage + 1 );
00632         }
00633     }
00634     
00635     train_stats->finalize();
00636     MODULE_LOG << "  train costs = " << train_stats->getMean() << endl;
00637 
00638 
00639     // Update currently_trained_layer
00640     if(stage > 0)
00641         currently_trained_layer = n_layers;
00642     else
00643     {            
00644         currently_trained_layer = n_layers-1;
00645         while(currently_trained_layer>1 
00646               && greedy_stages[currently_trained_layer-1] <= 0)
00647             currently_trained_layer--;
00648     }
00649 }
00650 
00651 void TopDownAsymetricDeepNetwork::greedyStep( 
00652     const Vec& input, const Vec& target, int index, 
00653     Vec train_costs, int this_stage)
00654 {
00655     PLASSERT( index < n_layers );
00656     real lr;
00657 
00658     // Get example representation
00659     computeRepresentation(input, input_representation, 
00660                           index);
00661     // Autoassociator learning
00662     if( !fast_exact_is_equal( greedy_learning_rate, 0 ) )
00663     {
00664         if( !fast_exact_is_equal( greedy_decrease_ct , 0 ) )
00665             lr = greedy_learning_rate/(1 + greedy_decrease_ct 
00666                                        * this_stage); 
00667         else
00668             lr = greedy_learning_rate;
00669 
00670         if( fraction_of_masked_inputs > 0 )
00671             random_gen->shuffleElements(autoassociator_input_indices);
00672 
00673         top_down_layers[index]->setLearningRate( lr );
00674         connections[index]->setLearningRate( lr );
00675         reconstruction_connections[index]->setLearningRate( lr );
00676         layers[index+1]->setLearningRate( lr );
00677 
00678         if( fraction_of_masked_inputs > 0 )
00679         {
00680             masked_autoassociator_input << input_representation;
00681             for( int j=0 ; j < round(fraction_of_masked_inputs*layers[index]->size) ; j++)
00682                 masked_autoassociator_input[ autoassociator_input_indices[j] ] = 0; 
00683             connections[index]->fprop( masked_autoassociator_input, activations[index+1]);
00684         }
00685         else
00686             connections[index]->fprop(input_representation,
00687                                       activations[index+1]);
00688         layers[index+1]->fprop(activations[index+1], expectations[index+1]);
00689 
00690         reconstruction_connections[ index ]->fprop( expectations[index+1],
00691                                                     reconstruction_activations);
00692         top_down_layers[ index ]->fprop( reconstruction_activations,
00693                                 top_down_layers[ index ]->expectation);
00694         
00695         top_down_layers[ index ]->activation << reconstruction_activations;
00696         top_down_layers[ index ]->setExpectationByRef(
00697             top_down_layers[ index ]->expectation);
00698         real rec_err = top_down_layers[ index ]->fpropNLL(
00699             input_representation);
00700         train_costs[index] = rec_err;
00701         
00702         top_down_layers[ index ]->bpropNLL(
00703             input_representation, rec_err,
00704             reconstruction_activation_gradients);
00705     }
00706 
00707     // RBM learning
00708     if( !fast_exact_is_equal( cd_learning_rate, 0 ) )
00709     {
00710         connections[index]->setAsDownInput( input_representation );
00711         layers[index+1]->getAllActivations( connections[index] );
00712         layers[index+1]->computeExpectation();
00713         layers[index+1]->generateSample();
00714         
00715         // accumulate positive stats using the expectation
00716         // we deep-copy because the value will change during negative phase
00717         pos_down_val = expectations[index];
00718         pos_up_val << layers[index+1]->expectation;
00719         
00720         // down propagation, starting from a sample of layers[index+1]
00721         connections[index]->setAsUpInput( layers[index+1]->sample );
00722         
00723         top_down_layers[index]->getAllActivations( connections[index] );
00724         top_down_layers[index]->computeExpectation();
00725         top_down_layers[index]->generateSample();
00726         
00727         // negative phase
00728         connections[index]->setAsDownInput( top_down_layers[index]->sample );
00729         layers[index+1]->getAllActivations( connections[index] );
00730         layers[index+1]->computeExpectation();
00731         // accumulate negative stats
00732         // no need to deep-copy because the values won't change before update
00733         neg_down_val = top_down_layers[index]->sample;
00734         neg_up_val = layers[index+1]->expectation;
00735     }
00736     
00737     // Update hidden layer bias and weights
00738 
00739     if( !fast_exact_is_equal( greedy_learning_rate, 0 ) )
00740     {
00741         top_down_layers[ index ]->update(reconstruction_activation_gradients);
00742     
00743         reconstruction_connections[ index ]->bpropUpdate( 
00744             expectations[index+1],
00745             reconstruction_activations, 
00746             reconstruction_expectation_gradients, 
00747             reconstruction_activation_gradients);
00748 
00749         layers[ index+1 ]->bpropUpdate( 
00750             activations[index+1],
00751             expectations[index+1],
00752             // reused
00753             reconstruction_activation_gradients,
00754             reconstruction_expectation_gradients);
00755         
00756         if( fraction_of_masked_inputs > 0 )
00757             connections[ index ]->bpropUpdate( 
00758                 masked_autoassociator_input,
00759                 activations[index+1],
00760                 reconstruction_expectation_gradients, //reused
00761                 reconstruction_activation_gradients);
00762         else
00763             connections[ index ]->bpropUpdate( 
00764                 input_representation,
00765                 activations[index+1],
00766                 reconstruction_expectation_gradients, //reused
00767                 reconstruction_activation_gradients);
00768     }
00769      
00770 
00771     // RBM updates
00772     if( !fast_exact_is_equal( cd_learning_rate, 0 ) )
00773     {
00774         if( !fast_exact_is_equal( cd_decrease_ct , 0 ) )
00775             lr = cd_learning_rate/(1 + cd_decrease_ct 
00776                                        * this_stage); 
00777         else
00778             lr = cd_learning_rate;
00779 
00780         top_down_layers[index]->setLearningRate( lr );
00781         connections[index]->setLearningRate( lr );
00782         layers[index+1]->setLearningRate( lr );
00783 
00784         top_down_layers[index]->update( pos_down_val, neg_down_val );
00785         connections[index]->update( pos_down_val, pos_up_val,
00786                                     neg_down_val, neg_up_val );
00787         layers[index+1]->update( pos_up_val, neg_up_val );
00788     }
00789 }
00790 
00791 void TopDownAsymetricDeepNetwork::fineTuningStep( 
00792     const Vec& input, const Vec& target,
00793     Vec& train_costs)
00794 {
00795     // Get example representation
00796     computeRepresentation(input, input_representation, 
00797                           n_layers-1);
00798 
00799     final_module->fprop( input_representation, final_cost_input );
00800     final_cost->fprop( final_cost_input, target, final_cost_value );
00801         
00802     final_cost->bpropUpdate( final_cost_input, target,
00803                              final_cost_value[0],
00804                              final_cost_gradient );
00805     final_module->bpropUpdate( input_representation,
00806                                final_cost_input,
00807                                expectation_gradients[ n_layers-1 ],
00808                                final_cost_gradient );
00809     train_costs.last() = final_cost_value[0];
00810     for( int i=n_layers-1 ; i>0 ; i-- )
00811     {
00812         layers[i]->bpropUpdate( activations[i],
00813                                 expectations[i],
00814                                 activation_gradients[i],
00815                                 expectation_gradients[i] );
00816         
00817         
00818         connections[i-1]->bpropUpdate( expectations[i-1],
00819                                        activations[i],
00820                                        expectation_gradients[i-1],
00821                                        activation_gradients[i] );
00822     }        
00823 }
00824 
00825 void TopDownAsymetricDeepNetwork::computeRepresentation(
00826     const Vec& input,
00827     Vec& representation,
00828     int layer) const
00829 {
00830     if(layer == 0)
00831     {
00832         representation.resize(input.length());
00833         expectations[0] << input;
00834         representation << input;
00835         return;
00836     }
00837 
00838     expectations[0] << input;
00839     for( int i=0 ; i<layer; i++ )
00840     {
00841         connections[i]->fprop( expectations[i], activations[i+1] );
00842         layers[i+1]->fprop(activations[i+1],expectations[i+1]);
00843     }
00844     representation.resize(expectations[layer].length());
00845     representation << expectations[layer];
00846 }
00847 
00848 void TopDownAsymetricDeepNetwork::computeOutput(
00849     const Vec& input, Vec& output) const
00850 {
00851     computeRepresentation(input,input_representation, 
00852                           min(currently_trained_layer,n_layers-1));
00853     final_module->fprop( input_representation, final_cost_input );
00854     output[0] = argmax(final_cost_input);
00855 }
00856 
00857 void TopDownAsymetricDeepNetwork::computeCostsFromOutputs(
00858     const Vec& input, const Vec& output,
00859     const Vec& target, Vec& costs) const
00860 {
00861 
00862     //Assumes that computeOutput has been called
00863     costs.resize( getTestCostNames().length() );
00864     costs.fill( MISSING_VALUE );
00865 
00866     if( currently_trained_layer<n_layers 
00867         && reconstruction_connections.length() != 0 )
00868     {
00869         reconstruction_connections[ currently_trained_layer-1 ]->fprop( 
00870             expectations[currently_trained_layer],
00871             reconstruction_activations);
00872         top_down_layers[ currently_trained_layer-1 ]->fprop( 
00873             reconstruction_activations,
00874             top_down_layers[ currently_trained_layer-1 ]->expectation);
00875         
00876         top_down_layers[ currently_trained_layer-1 ]->activation << 
00877             reconstruction_activations;
00878         top_down_layers[ currently_trained_layer-1 ]->setExpectationByRef( 
00879             top_down_layers[ currently_trained_layer-1 ]->expectation);
00880         costs[ currently_trained_layer-1 ]  = 
00881             top_down_layers[ currently_trained_layer-1 ]->fpropNLL(
00882                 expectations[currently_trained_layer-1]);
00883     }
00884 
00885     if( ((int)round(output[0])) == ((int)round(target[0])) )
00886         costs[n_layers-1] = 0;
00887     else
00888         costs[n_layers-1] = 1;
00889 }
00890 
00891 TVec<string> TopDownAsymetricDeepNetwork::getTestCostNames() const
00892 {
00893     // Return the names of the costs computed by computeCostsFromOutputs
00894     // (these may or may not be exactly the same as what's returned by
00895     // getTrainCostNames).
00896 
00897     TVec<string> cost_names(0);
00898 
00899     for( int i=0; i<layers.size()-1; i++)
00900         cost_names.push_back("reconstruction_error_" + tostring(i+1));
00901         
00902     cost_names.append( "class_error" );
00903 
00904     return cost_names;
00905 }
00906 
00907 TVec<string> TopDownAsymetricDeepNetwork::getTrainCostNames() const
00908 {
00909     TVec<string> cost_names = getTestCostNames();
00910     cost_names.append( "NLL" );
00911     return cost_names;    
00912 }
00913 
00914 //#####  Helper functions  ##################################################
00915 
00916 void TopDownAsymetricDeepNetwork::setLearningRate( real the_learning_rate )
00917 {
00918     for( int i=0 ; i<n_layers-1 ; i++ )
00919     {
00920         layers[i]->setLearningRate( the_learning_rate );
00921         top_down_layers[i]->setLearningRate( the_learning_rate );
00922         connections[i]->setLearningRate( the_learning_rate );
00923     }
00924     layers[n_layers-1]->setLearningRate( the_learning_rate );
00925     top_down_layers[n_layers-1]->setLearningRate( the_learning_rate );
00926 
00927     final_module->setLearningRate( the_learning_rate );
00928     final_cost->setLearningRate( the_learning_rate );
00929 }
00930 
00931 
00932 } // end of namespace PLearn
00933 
00934 
00935 /*
00936   Local Variables:
00937   mode:c++
00938   c-basic-offset:4
00939   c-file-style:"stroustrup"
00940   c-file-offsets:((innamespace . 0)(inline-open . 0))
00941   indent-tabs-mode:nil
00942   fill-column:79
00943   End:
00944 */
00945 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines