PLearn 0.1
DiscriminativeDeepBeliefNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // DiscriminativeDeepBeliefNet.cc
00004 //
00005 // Copyright (C) 2007 Hugo Larochelle
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Hugo Larochelle
00036 
00040 #define PL_LOG_MODULE_NAME "DiscriminativeDeepBeliefNet"
00041 #include <plearn/io/pl_log.h>
00042 
00043 #include "DiscriminativeDeepBeliefNet.h"
00044 #include <plearn/vmat/VMat_computeNearestNeighbors.h>
00045 #include <plearn/vmat/GetInputVMatrix.h>
00046 #include <plearn_learners/online/RBMMixedLayer.h>
00047 #include <plearn_learners/online/RBMMixedConnection.h>
00048 
00049 namespace PLearn {
00050 using namespace std;
00051 
00052 PLEARN_IMPLEMENT_OBJECT(
00053     DiscriminativeDeepBeliefNet,
00054     "Neural net, trained layer-wise in a greedy but focused fashion using autoassociators/RBMs and a supervised non-parametric gradient.",
00055     "It is highly inspired by the StackedFocusedAutoassociators class,\n"
00056     "and can use use the same RBMLayer and RBMConnection components.\n"
00057     );
00058 
00059 DiscriminativeDeepBeliefNet::DiscriminativeDeepBeliefNet() :
00060     cd_learning_rate( 0. ),
00061     cd_decrease_ct( 0. ),
00062     fine_tuning_learning_rate( 0. ),
00063     fine_tuning_decrease_ct( 0. ),
00064     k_neighbors( 1 ),
00065     n_classes( -1 ),
00066     discriminative_criteria_weight( 0. ), 
00067     output_weights_l1_penalty_factor(0),
00068     output_weights_l2_penalty_factor(0),
00069     compare_joint_in_discriminative_criteria( false ),
00070     do_not_use_generative_criteria( false ),
00071 //    cancel_normalization_terms( false ),
00072     n_layers( 0 ),
00073     nearest_neighbors_are_up_to_date( false ),
00074     currently_trained_layer( 0 )
00075 {
00076     // random_gen will be initialized in PLearner::build_()
00077     random_gen = new PRandom();
00078     nstages = 0;
00079 }
00080 
00081 void DiscriminativeDeepBeliefNet::declareOptions(OptionList& ol)
00082 {
00083     declareOption(ol, "cd_learning_rate", 
00084                   &DiscriminativeDeepBeliefNet::cd_learning_rate,
00085                   OptionBase::buildoption,
00086                   "The learning rate used during the RBM "
00087                   "contrastive divergence training.\n");
00088 
00089     declareOption(ol, "cd_decrease_ct", 
00090                   &DiscriminativeDeepBeliefNet::cd_decrease_ct,
00091                   OptionBase::buildoption,
00092                   "The decrease constant of the learning rate used during "
00093                   "the RBMs contrastive\n"
00094                   "divergence training. When a hidden layer has finished "
00095                   "its training,\n"
00096                   "the learning rate is reset to it's initial value.\n");
00097 
00098     declareOption(ol, "fine_tuning_learning_rate", 
00099                   &DiscriminativeDeepBeliefNet::fine_tuning_learning_rate,
00100                   OptionBase::buildoption,
00101                   "The learning rate used during the fine tuning gradient descent.\n");
00102 
00103     declareOption(ol, "fine_tuning_decrease_ct", 
00104                   &DiscriminativeDeepBeliefNet::fine_tuning_decrease_ct,
00105                   OptionBase::buildoption,
00106                   "The decrease constant of the learning rate used during "
00107                   "fine tuning\n"
00108                   "gradient descent.\n");
00109 
00110     declareOption(ol, "training_schedule", 
00111                   &DiscriminativeDeepBeliefNet::training_schedule,
00112                   OptionBase::buildoption,
00113                   "Number of examples to use during each phase of greedy pre-training.\n"
00114                   "The number of fine-tunig steps is defined by nstages.\n"
00115         );
00116 
00117     declareOption(ol, "layers", &DiscriminativeDeepBeliefNet::layers,
00118                   OptionBase::buildoption,
00119                   "The layers of units in the network. The first element\n"
00120                   "of this vector should be the input layer and the\n"
00121                   "subsequent elements should be the hidden layers. The\n"
00122                   "output layer should not be included in layers.\n");
00123 
00124     declareOption(ol, "connections", &DiscriminativeDeepBeliefNet::connections,
00125                   OptionBase::buildoption,
00126                   "The weights of the connections between the layers.\n");
00127 
00128     declareOption(ol, "unsupervised_layers", 
00129                   &DiscriminativeDeepBeliefNet::unsupervised_layers,
00130                   OptionBase::buildoption,
00131                   "Additional units for greedy unsupervised learning.\n");
00132 
00133     declareOption(ol, "unsupervised_connections", 
00134                   &DiscriminativeDeepBeliefNet::unsupervised_connections,
00135                   OptionBase::buildoption,
00136                   "Additional connections for greedy unsupervised learning.\n");
00137 
00138     declareOption(ol, "k_neighbors", 
00139                   &DiscriminativeDeepBeliefNet::k_neighbors,
00140                   OptionBase::buildoption,
00141                   "Number of good nearest neighbors to attract and bad nearest "
00142                   "neighbors to repel.\n");
00143 
00144     declareOption(ol, "n_classes", 
00145                   &DiscriminativeDeepBeliefNet::n_classes,
00146                   OptionBase::buildoption,
00147                   "Number of classes.\n");
00148 
00149     declareOption(ol, "discriminative_criteria_weight", 
00150                   &DiscriminativeDeepBeliefNet::discriminative_criteria_weight,
00151                   OptionBase::buildoption,
00152                   "Weight of the discriminative criteria.\n");
00153 
00154     declareOption(ol, "output_weights_l1_penalty_factor", 
00155                   &DiscriminativeDeepBeliefNet::output_weights_l1_penalty_factor,
00156                   OptionBase::buildoption,
00157                   "Output weights l1_penalty_factor.\n");
00158 
00159     declareOption(ol, "output_weights_l2_penalty_factor", 
00160                   &DiscriminativeDeepBeliefNet::output_weights_l2_penalty_factor,
00161                   OptionBase::buildoption,
00162                   "Output weights l2_penalty_factor.\n");
00163 
00164     declareOption(ol, "compare_joint_in_discriminative_criteria", 
00165                   &DiscriminativeDeepBeliefNet::compare_joint_in_discriminative_criteria,
00166                   OptionBase::buildoption,
00167                   "Indication that the discriminative criteria should use the joint\n"
00168                   "over the input and the hidden units, instead of the conditional\n"
00169                   "over the hidden units given the input units.\n");
00170 
00171     declareOption(ol, "do_not_use_generative_criteria", 
00172                   &DiscriminativeDeepBeliefNet::do_not_use_generative_criteria,
00173                   OptionBase::buildoption,
00174                   "Indication that the generative criteria should not be used during learning\n"
00175                   "(does not work with compare_joint_in_discriminative_criteria = true).\n");
00176 
00177 //    declareOption(ol, "cancel_normalization_terms", 
00178 //                  &DiscriminativeDeepBeliefNet::cancel_normalization_terms,
00179 //                  OptionBase::buildoption,
00180 //                  "Indication that the discriminative and generative criteria should cancel\n"
00181 //                  "their normalization terms. This is for the "
00182 //                  "compare_joint_in_discriminative_criteria\n"
00183 //                  "option, and this option ignores the value of discriminative_criteria_weight.\n");
00184 
00185     declareOption(ol, "greedy_stages", 
00186                   &DiscriminativeDeepBeliefNet::greedy_stages,
00187                   OptionBase::learntoption,
00188                   "Number of training samples seen in the different greedy "
00189                   "phases.\n"
00190         );
00191 
00192     declareOption(ol, "n_layers", &DiscriminativeDeepBeliefNet::n_layers,
00193                   OptionBase::learntoption,
00194                   "Number of layers.\n"
00195         );
00196 
00197     declareOption(ol, "final_module", 
00198                   &DiscriminativeDeepBeliefNet::final_module,
00199                   OptionBase::learntoption,
00200                   "Output layer of neural net.\n"
00201         );
00202 
00203     declareOption(ol, "final_cost", 
00204                   &DiscriminativeDeepBeliefNet::final_cost,
00205                   OptionBase::learntoption,
00206                   "Cost on output layer of neural net.\n"
00207         );
00208 
00209     // Now call the parent class' declareOptions
00210     inherited::declareOptions(ol);
00211 }
00212 
00213 void DiscriminativeDeepBeliefNet::build_()
00214 {
00215     // ### This method should do the real building of the object,
00216     // ### according to set 'options', in *any* situation.
00217     // ### Typical situations include:
00218     // ###  - Initial building of an object from a few user-specified options
00219     // ###  - Building of a "reloaded" object: i.e. from the complete set of
00220     // ###    all serialised options.
00221     // ###  - Updating or "re-building" of an object after a few "tuning"
00222     // ###    options have been modified.
00223     // ### You should assume that the parent class' build_() has already been
00224     // ### called.
00225 
00226     MODULE_LOG << "build_() called" << endl;
00227 
00228     if(inputsize_ > 0 && targetsize_ > 0)
00229     {
00230         // Initialize some learnt variables
00231         n_layers = layers.length();
00232         
00233         if( n_classes <= 0 )
00234             PLERROR("DiscriminativeDeepBeliefNet::build_() - \n"
00235                     "n_classes should be > 0.\n");
00236 
00237         if( k_neighbors <= 0 )
00238             PLERROR("DiscriminativeDeepBeliefNet::build_() - \n"
00239                     "k_neighbors should be > 0.\n");
00240 
00241         if( weightsize_ > 0 )
00242             PLERROR("DiscriminativeDeepBeliefNet::build_() - \n"
00243                     "usage of weighted samples (weight size > 0) is not\n"
00244                     "implemented yet.\n");
00245 
00246         if( training_schedule.length() != n_layers-1 )        
00247             PLERROR("DiscriminativeDeepBeliefNet::build_() - \n"
00248                     "training_schedule should have %d elements.\n",
00249                     n_layers-1);
00250         
00251         if( compare_joint_in_discriminative_criteria && do_not_use_generative_criteria)
00252             PLERROR("DiscriminativeDeepBeliefNet::build_() - \n"
00253                     "compare_joint_in_discriminative_criteria can't be used with\n"
00254                     "do_not_use_generative_criteria.\n");
00255 
00256 //        if( (!compare_joint_in_discriminative_criteria || do_not_use_generative_criteria)
00257 //            && cancel_normalization_terms )
00258 //            PLERROR("DiscriminativeDeepBeliefNet::build_() - \n"
00259 //                    "cancel_normalization_terms should be used with\n"
00260 //                    "compare_joint_in_discriminative_criteria and \n"
00261 //                    "do_not_use_generative_criteria without .\n");
00262             
00263         if(greedy_stages.length() == 0)
00264         {
00265             greedy_stages.resize(n_layers-1);
00266             greedy_stages.clear();
00267         }
00268 
00269         if(stage > 0)
00270             currently_trained_layer = n_layers;
00271         else
00272         {            
00273             currently_trained_layer = n_layers-1;
00274             while(currently_trained_layer>1
00275                   && greedy_stages[currently_trained_layer-1] <= 0)
00276                 currently_trained_layer--;
00277         }
00278 
00279         build_layers_and_connections();
00280 
00281         if( !final_module || !final_cost )
00282             build_output_layer_and_cost();
00283     }
00284 }
00285 
00286 void DiscriminativeDeepBeliefNet::build_output_layer_and_cost()
00287 {
00288     GradNNetLayerModule* gnl = new GradNNetLayerModule();
00289     gnl->input_size = layers[n_layers-1]->size;
00290     gnl->output_size = n_classes;
00291     gnl->L1_penalty_factor = output_weights_l1_penalty_factor;
00292     gnl->L2_penalty_factor = output_weights_l2_penalty_factor;
00293     gnl->random_gen = random_gen;
00294     gnl->build();
00295 
00296     SoftmaxModule* sm = new SoftmaxModule();
00297     sm->input_size = n_classes;
00298     sm->random_gen = random_gen;
00299     sm->build();
00300 
00301     ModuleStackModule* msm = new ModuleStackModule();
00302     msm->modules.resize(2);
00303     msm->modules[0] = gnl;
00304     msm->modules[1] = sm;
00305     msm->random_gen = random_gen;
00306     msm->build();
00307     final_module = msm;
00308 
00309     final_module->forget();
00310 
00311     NLLCostModule* nll = new NLLCostModule();
00312     nll->input_size = n_classes;
00313     nll->random_gen = random_gen;
00314     nll->build();
00315     
00316     ClassErrorCostModule* class_error = new ClassErrorCostModule();
00317     class_error->input_size = n_classes;
00318     class_error->random_gen = random_gen;
00319     class_error->build();
00320 
00321     CombiningCostsModule* comb_costs = new CombiningCostsModule();
00322     comb_costs->cost_weights.resize(2);
00323     comb_costs->cost_weights[0] = 1;
00324     comb_costs->cost_weights[1] = 0;
00325     comb_costs->sub_costs.resize(2);
00326     comb_costs->sub_costs[0] = nll;
00327     comb_costs->sub_costs[1] = class_error;
00328     comb_costs->build();
00329 
00330     final_cost = comb_costs;
00331     final_cost->forget();
00332 }
00333 
00334 void DiscriminativeDeepBeliefNet::build_layers_and_connections()
00335 {
00336     MODULE_LOG << "build_layers_and_connections() called" << endl;
00337 
00338     if( connections.length() != n_layers-1 )
00339         PLERROR("DiscriminativeDeepBeliefNet::build_layers_and_connections() - \n"
00340                 "there should be %d connections.\n",
00341                 n_layers-1);
00342      
00343     if(unsupervised_layers.length() != n_layers-1 
00344        && unsupervised_layers.length() != 0)
00345         PLERROR("DiscriminativeDeepBeliefNet::build_layers_and_connections() - \n"
00346                 "there should be either 0 of %d unsupervised_layers.\n",
00347                 n_layers-1);
00348         
00349     if(unsupervised_connections.length() != n_layers-1 
00350        && unsupervised_connections.length() != 0)
00351         PLERROR("DiscriminativeDeepBeliefNet::build_layers_and_connections() - \n"
00352                 "there should be either 0 of %d unsupervised_connections.\n",
00353                 n_layers-1);
00354         
00355     if(unsupervised_connections.length() != unsupervised_layers.length())
00356         PLERROR("DiscriminativeDeepBeliefNet::build_layers_and_connections() - \n"
00357                 "there should be as many unsupervised_connections and "
00358                 "unsupervised_layers.\n");
00359         
00360 
00361     if(layers[0]->size != inputsize_)
00362         PLERROR("DiscriminativeDeepBeliefNet::build_layers_and_connections() - \n"
00363                 "layers[0] should have a size of %d.\n",
00364                 inputsize_);
00365     
00366 
00367     activations.resize( n_layers );
00368     expectations.resize( n_layers );
00369     activation_gradients.resize( n_layers );
00370     expectation_gradients.resize( n_layers );
00371 
00372     greedy_layers.resize(n_layers-1);
00373     greedy_connections.resize(n_layers-1);
00374     for( int i=0 ; i<n_layers-1 ; i++ )
00375     {
00376         if( layers[i]->size != connections[i]->down_size )
00377             PLERROR("DiscriminativeDeepBeliefNet::build_layers_and_connections() "
00378                     "- \n"
00379                     "connections[%i] should have a down_size of %d.\n",
00380                     i, layers[i]->size);
00381 
00382         if( connections[i]->up_size != layers[i+1]->size )
00383             PLERROR("DiscriminativeDeepBeliefNet::build_layers_and_connections() "
00384                     "- \n"
00385                     "connections[%i] should have a up_size of %d.\n",
00386                     i, layers[i+1]->size);
00387 
00388         if(unsupervised_layers.length() != 0 &&
00389            unsupervised_connections.length() != 0 && 
00390            unsupervised_layers[i] && unsupervised_connections[i])
00391         {
00392             if( layers[i]->size != 
00393                 unsupervised_connections[i]->down_size )
00394                 PLERROR("DiscriminativeDeepBeliefNet::build_layers_and_connections() "
00395                         "- \n"
00396                         "connections[%i] should have a down_size of %d.\n",
00397                         i, unsupervised_layers[i]->size);
00398             
00399             if( unsupervised_connections[i]->up_size != 
00400                 unsupervised_layers[i]->size )
00401                 PLERROR("DiscriminativeDeepBeliefNet::build_layers_and_connections() "
00402                         "- \n"
00403                         "connections[%i] should have a up_size of %d.\n",
00404                         i, unsupervised_layers[i+1]->size);
00405             
00406             if( !(unsupervised_layers[i]->random_gen) )
00407             {
00408                 unsupervised_layers[i]->random_gen = random_gen;
00409                 unsupervised_layers[i]->forget();
00410             }
00411             
00412             if( !(unsupervised_connections[i]->random_gen) )
00413             {
00414                 unsupervised_connections[i]->random_gen = random_gen;
00415                 unsupervised_connections[i]->forget();
00416             }
00417 
00418             PP<RBMMixedLayer> greedy_layer = new RBMMixedLayer();
00419             greedy_layer->sub_layers.resize(2);
00420             greedy_layer->sub_layers[0] = layers[i+1];
00421             greedy_layer->sub_layers[1] = unsupervised_layers[i];
00422             greedy_layer->size = layers[i+1]->size + unsupervised_layers[i]->size;
00423             greedy_layer->build();
00424 
00425             PP<RBMMixedConnection> greedy_connection = new RBMMixedConnection();
00426             greedy_connection->sub_connections.resize(2,1);
00427             greedy_connection->sub_connections(0,0) = connections[i];
00428             greedy_connection->sub_connections(1,0) = unsupervised_connections[i];
00429             greedy_connection->build();
00430             
00431             greedy_layers[i] = greedy_layer;
00432             greedy_connections[i] = greedy_connection;
00433         }
00434         else
00435         {
00436             greedy_layers[i] = layers[i+1];
00437             greedy_connections[i] = connections[i];
00438         }
00439 
00440         if( !(layers[i]->random_gen) )
00441         {
00442             layers[i]->random_gen = random_gen;
00443             layers[i]->forget();
00444         }
00445 
00446         if( !(connections[i]->random_gen) )
00447         {
00448             connections[i]->random_gen = random_gen;
00449             connections[i]->forget();
00450         }
00451 
00452         activations[i].resize( layers[i]->size );
00453         expectations[i].resize( layers[i]->size );
00454         activation_gradients[i].resize( layers[i]->size );
00455         expectation_gradients[i].resize( layers[i]->size );
00456     }
00457 
00458     if( !(layers[n_layers-1]->random_gen) )
00459     {
00460         layers[n_layers-1]->random_gen = random_gen;
00461         layers[n_layers-1]->forget();
00462     }
00463     activations[n_layers-1].resize( layers[n_layers-1]->size );
00464     expectations[n_layers-1].resize( layers[n_layers-1]->size );
00465     activation_gradients[n_layers-1].resize( layers[n_layers-1]->size );
00466     expectation_gradients[n_layers-1].resize( layers[n_layers-1]->size );
00467 }
00468 
00469 // ### Nothing to add here, simply calls build_
00470 void DiscriminativeDeepBeliefNet::build()
00471 {
00472     inherited::build();
00473     build_();
00474 }
00475 
00476 
00477 void DiscriminativeDeepBeliefNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00478 {
00479     inherited::makeDeepCopyFromShallowCopy(copies);
00480 
00481     // deepCopyField(, copies);
00482 
00483     // Public options
00484     deepCopyField(training_schedule, copies);
00485     deepCopyField(layers, copies);
00486     deepCopyField(connections, copies);
00487     deepCopyField(unsupervised_layers, copies);
00488     deepCopyField(unsupervised_connections, copies);
00489 
00490     // Protected options
00491     deepCopyField(activations, copies);
00492     deepCopyField(expectations, copies);
00493     deepCopyField(activation_gradients, copies);
00494     deepCopyField(expectation_gradients, copies);
00495     deepCopyField(greedy_layers, copies);
00496     deepCopyField(greedy_connections, copies);
00497     deepCopyField(dissimilar_example_representation, copies);
00498     deepCopyField(input_representation, copies);
00499     deepCopyField(pos_down_val, copies);
00500     deepCopyField(pos_up_val, copies);
00501     deepCopyField(neg_down_val, copies);
00502     deepCopyField(neg_up_val, copies);
00503     deepCopyField(disc_pos_down_val1, copies);
00504     deepCopyField(disc_pos_up_val1, copies);
00505     deepCopyField(disc_pos_down_val2, copies);
00506     deepCopyField(disc_pos_up_val2, copies);
00507     deepCopyField(disc_neg_down_val, copies);
00508     deepCopyField(disc_neg_up_val, copies);
00509     deepCopyField(final_cost_input, copies);
00510     deepCopyField(final_cost_value, copies);
00511     deepCopyField(final_cost_gradient, copies);
00512     deepCopyField(other_class_datasets, copies);
00513     deepCopyField(nearest_neighbors_indices, copies);
00514     deepCopyField(greedy_stages, copies);
00515     deepCopyField(final_module, copies);
00516     deepCopyField(final_cost, copies);
00517 }
00518 
00519 
00520 int DiscriminativeDeepBeliefNet::outputsize() const
00521 {
00522     if( currently_trained_layer>n_layers-1 )
00523         return 1;
00524     else
00525         return layers[currently_trained_layer]->size;
00526 }
00527 
00528 void DiscriminativeDeepBeliefNet::forget()
00529 {
00533 
00540     inherited::forget();
00541 
00542     for( int i=0 ; i<n_layers ; i++ )
00543         layers[i]->forget();
00544     
00545     for( int i=0 ; i<n_layers-1 ; i++ )
00546         connections[i]->forget();
00547     
00548     if(unsupervised_layers.length() != 0)
00549         for( int i=0 ; i<n_layers-1 ; i++ )
00550             unsupervised_layers[i]->forget();
00551     
00552     if(unsupervised_connections.length() != 0)
00553         for( int i=0 ; i<n_layers-1 ; i++ )
00554             unsupervised_connections[i]->forget();
00555     
00556     build_output_layer_and_cost();
00557 
00558     stage = 0;
00559     greedy_stages.clear();
00560 }
00561 
00562 void DiscriminativeDeepBeliefNet::train()
00563 {
00564     MODULE_LOG << "train() called " << endl;
00565     MODULE_LOG << "  training_schedule = " << training_schedule << endl;
00566 
00567     Vec input( inputsize() );
00568     Vec dissimilar_example( inputsize() );
00569     Vec target( targetsize() );
00570     Vec target2( targetsize() );
00571     real weight; // unused
00572     real weight2; // unused
00573     
00574     TVec<string> train_cost_names = getTrainCostNames() ;
00575     Vec train_costs( train_cost_names.length() );
00576     train_costs.fill(MISSING_VALUE) ;
00577 
00578     int nsamples = train_set->length();
00579     int sample;
00580 
00581     PP<ProgressBar> pb;
00582 
00583     // clear stats of previous epoch
00584     train_stats->forget();
00585 
00586     int init_stage;
00587 
00588     /***** initial greedy training *****/
00589     for( int i=0 ; i<n_layers-1 ; i++ )
00590     {
00591         updateNearestNeighbors();
00592             
00593         MODULE_LOG << "Training connection weights between layers " << i
00594             << " and " << i+1 << endl;
00595 
00596         int end_stage = training_schedule[i];
00597         int* this_stage = greedy_stages.subVec(i,1).data();
00598         init_stage = *this_stage;
00599 
00600         MODULE_LOG << "  stage = " << *this_stage << endl;
00601         MODULE_LOG << "  end_stage = " << end_stage << endl;
00602 
00603         if( report_progress && *this_stage < end_stage )
00604             pb = new ProgressBar( "Training layer "+tostring(i)
00605                                   +" of "+classname(),
00606                                   end_stage - init_stage );
00607 
00608         train_costs.fill(MISSING_VALUE);
00609  
00610         dissimilar_example_representation.resize(layers[i]->size);
00611         input_representation.resize(layers[i]->size);
00612 
00613         pos_down_val.resize(layers[i]->size);
00614         pos_up_val.resize(greedy_layers[i]->size);
00615         neg_down_val.resize(layers[i]->size);
00616         neg_up_val.resize(greedy_layers[i]->size);
00617 
00618         disc_pos_down_val1.resize(layers[i]->size);
00619         disc_pos_up_val1.resize(layers[i+1]->size);
00620         disc_pos_down_val2.resize(layers[i]->size);
00621         disc_pos_up_val2.resize(layers[i+1]->size);
00622         disc_neg_down_val.resize(layers[i]->size);
00623         disc_neg_up_val.resize(layers[i+1]->size);
00624 
00625         for( ; *this_stage<end_stage ; (*this_stage)++ )
00626         {
00627             sample = *this_stage % nsamples;
00628             train_set->getExample(sample, input, target, weight);
00629 
00630             // Find dissimilar example
00631             int dissim_index = nearest_neighbors_indices(
00632                 sample,random_gen->uniform_multinomial_sample(k_neighbors));
00633             
00634             other_class_datasets[(int)round(target[0])]->getExample(dissim_index,
00635                                                                     dissimilar_example, 
00636                                                                     target2, weight2);
00637             
00638             if(((int)round(target[0])) == ((int)round(target2[0])))
00639                 PLERROR("DiscriminativeDeepBeliefNet::train(): dissimilar"
00640                         " example is from same class!");
00641 
00642             greedyStep( input, target, i, train_costs, *this_stage,
00643                         dissimilar_example);
00644             train_stats->update( train_costs );
00645 
00646             if( pb )
00647                 pb->update( *this_stage - init_stage + 1 );
00648         }
00649     }
00650 
00651     /***** fine-tuning by gradient descent *****/
00652     if( stage < nstages )
00653     {
00654 
00655         MODULE_LOG << "Fine-tuning all parameters, by gradient descent" << endl;
00656         MODULE_LOG << "  stage = " << stage << endl;
00657         MODULE_LOG << "  nstages = " << nstages << endl;
00658         MODULE_LOG << "  fine_tuning_learning_rate = " << 
00659             fine_tuning_learning_rate << endl;
00660 
00661         init_stage = stage;
00662         if( report_progress && stage < nstages )
00663             pb = new ProgressBar( "Fine-tuning parameters of all layers of "
00664                                   + classname(),
00665                                   nstages - init_stage );
00666 
00667         setLearningRate( fine_tuning_learning_rate );
00668         train_costs.fill(MISSING_VALUE);
00669 
00670         final_cost_input.resize(n_classes);
00671         final_cost_value.resize(2); // Should be resized anyways
00672         final_cost_gradient.resize(n_classes);
00673 
00674         for( ; stage<nstages ; stage++ )
00675         {
00676             sample = stage % nsamples;
00677             if( !fast_exact_is_equal( fine_tuning_decrease_ct, 0. ) )
00678                 setLearningRate( fine_tuning_learning_rate
00679                                  / (1. + fine_tuning_decrease_ct * stage ) );
00680 
00681             train_set->getExample( sample, input, target, weight );
00682 
00683             fineTuningStep( input, target, train_costs );
00684             train_stats->update( train_costs );
00685 
00686             if( pb )
00687                 pb->update( stage - init_stage + 1 );
00688         }
00689 
00690     }
00691     
00692     train_stats->finalize();
00693     MODULE_LOG << "  train costs = " << train_stats->getMean() << endl;
00694 
00695 
00696     // Update currently_trained_layer
00697     if(stage > 0)
00698         currently_trained_layer = n_layers;
00699     else
00700     {            
00701         currently_trained_layer = n_layers-1;
00702         while(currently_trained_layer>1 
00703               && greedy_stages[currently_trained_layer-1] <= 0)
00704             currently_trained_layer--;
00705     }
00706 }
00707 
00708 void DiscriminativeDeepBeliefNet::greedyStep( 
00709     const Vec& input, const Vec& target, int index, 
00710     Vec train_costs, int this_stage, Vec dissimilar_example )
00711 {
00712     PLASSERT( index < n_layers );
00713     real lr;
00714 
00715     // Get dissimilar example representation
00716     computeRepresentation(dissimilar_example, dissimilar_example_representation, 
00717                           index);
00718 
00719     // Get example representation
00720     computeRepresentation(input, input_representation, 
00721                           index);
00722 
00723     if( !do_not_use_generative_criteria )
00724     {
00725         // CD generative learning stats
00726         
00727         // Positive phase
00728         greedy_connections[index]->setAsDownInput( input_representation );
00729         greedy_layers[index]->getAllActivations( greedy_connections[index] );
00730         greedy_layers[index]->computeExpectation();
00731         greedy_layers[index]->generateSample();
00732         
00733         pos_down_val << input_representation;
00734         pos_up_val << greedy_layers[index]->expectation;
00735         
00736         if( !compare_joint_in_discriminative_criteria )
00737         {
00738             disc_pos_down_val1 << input_representation;
00739             disc_pos_up_val1 << layers[index+1]->expectation;
00740         }
00741         
00742 //        if( !cancel_normalization_terms )
00743 //        {
00744         // Negative phase
00745         greedy_connections[index]->setAsUpInput( greedy_layers[index]->sample );    
00746         layers[index]->getAllActivations( greedy_connections[index] );
00747         layers[index]->computeExpectation();
00748         layers[index]->generateSample();
00749         
00750         greedy_connections[index]->setAsDownInput( layers[index]->sample );
00751         greedy_layers[index]->getAllActivations( greedy_connections[index] );
00752         greedy_layers[index]->computeExpectation();
00753         
00754         neg_down_val << layers[index]->sample;
00755         neg_up_val << greedy_layers[index]->expectation;
00756 //      }
00757     }
00758     else if( !compare_joint_in_discriminative_criteria )
00759     {
00760         
00761         connections[index]->setAsDownInput( input_representation );
00762         layers[index+1]->getAllActivations( connections[index] );
00763         layers[index+1]->computeExpectation();
00764         
00765         disc_pos_down_val1 << input_representation;
00766         disc_pos_up_val1 << layers[index+1]->expectation;
00767     }
00768 
00769     // CD discriminative criteria stats
00770 
00771     if( !compare_joint_in_discriminative_criteria )
00772     {
00773         // Positive phase
00774         connections[index]->setAsDownInput( dissimilar_example_representation );
00775         layers[index+1]->getAllActivations( connections[index] );
00776         layers[index+1]->computeExpectation();
00777         
00778         disc_pos_down_val2 << dissimilar_example_representation;
00779         disc_pos_up_val2 << layers[index+1]->expectation;
00780     }
00781 
00782     // Negative phase
00783     disc_neg_down_val << input_representation;
00784     disc_neg_down_val += dissimilar_example_representation;
00785     disc_neg_down_val /= 2;
00786     connections[index]->setAsDownInput( disc_neg_down_val );
00787     layers[index+1]->getAllActivations( connections[index] );
00788     layers[index+1]->computeExpectation();
00789 
00790     disc_neg_up_val << layers[index+1]->expectation;
00791 
00792     if( compare_joint_in_discriminative_criteria )
00793         //&& !cancel_normalization_terms)
00794     {
00795         layers[index+1]->generateSample();
00796         connections[index]->setAsUpInput( layers[index+1]->sample );
00797         layers[index]->getAllActivations( connections[index] );
00798         layers[index]->computeExpectation();
00799         layers[index]->generateSample();
00800 
00801         connections[index]->setAsDownInput( layers[index]->sample );
00802         layers[index+1]->getAllActivations( connections[index] );
00803         layers[index+1]->computeExpectation();
00804 
00805         disc_pos_down_val1 << layers[index]->sample;
00806         disc_pos_up_val1 << layers[index+1]->expectation;
00807     }
00808 
00809     // RBM updates
00810     if( !do_not_use_generative_criteria )
00811         //&& !cancel_normalization_terms )
00812     {
00813         lr = cd_learning_rate/(1 + cd_decrease_ct 
00814                                * this_stage); 
00815         
00816         layers[index]->setLearningRate( lr );
00817         greedy_connections[index]->setLearningRate( lr );
00818         greedy_layers[index]->setLearningRate( lr );
00819         
00820         layers[index]->update( pos_down_val, neg_down_val );
00821         greedy_connections[index]->update( pos_down_val, pos_up_val,
00822                                            neg_down_val, neg_up_val );
00823         greedy_layers[index]->update( pos_up_val, neg_up_val );
00824     }
00825     
00826     if( //cancel_normalization_terms || 
00827         discriminative_criteria_weight != 0 )
00828     {
00829         lr = discriminative_criteria_weight * 
00830             cd_learning_rate/(1 + cd_decrease_ct 
00831                               * this_stage); 
00832         
00833         if( !compare_joint_in_discriminative_criteria )
00834         {
00835             layers[index]->setLearningRate( lr );
00836             connections[index]->setLearningRate( lr );
00837             layers[index+1]->setLearningRate( lr );
00838             
00839             layers[index]->accumulatePosStats( disc_pos_down_val1 );
00840             layers[index]->accumulatePosStats( disc_pos_down_val2 );
00841             layers[index]->accumulateNegStats( disc_neg_down_val );
00842             layers[index]->update();
00843             
00844             connections[index]->accumulatePosStats( disc_pos_down_val1,
00845                                                     disc_pos_up_val1 );
00846             connections[index]->accumulatePosStats( disc_pos_down_val2,
00847                                                     disc_pos_up_val2 );
00848             connections[index]->accumulateNegStats( disc_neg_down_val,
00849                                                     disc_neg_up_val );
00850             connections[index]->update();
00851             
00852             layers[index+1]->accumulatePosStats( disc_pos_up_val1 );
00853             layers[index+1]->accumulatePosStats( disc_pos_up_val2 );
00854             layers[index+1]->accumulateNegStats( disc_neg_up_val );
00855             layers[index+1]->update();
00856         }
00857         else //if( !cancel_normalization_terms )
00858         {
00859             layers[index]->setLearningRate( lr );
00860             connections[index]->setLearningRate( lr );
00861             layers[index+1]->setLearningRate( lr );
00862             
00863             layers[index]->accumulatePosStats( disc_pos_down_val1 );
00864             layers[index]->accumulateNegStats( disc_neg_down_val );
00865             layers[index]->update();
00866             
00867             connections[index]->accumulatePosStats( disc_pos_down_val1,
00868                                                     disc_pos_up_val1 );
00869             connections[index]->accumulateNegStats( disc_neg_down_val,
00870                                                     disc_neg_up_val );
00871             connections[index]->update();
00872             
00873             layers[index+1]->accumulatePosStats( disc_pos_up_val1 );
00874             layers[index+1]->accumulateNegStats( disc_neg_up_val );
00875             layers[index+1]->update();
00876         }
00877 //        else
00878 //        {
00879 //            lr = cd_learning_rate/(1 + cd_decrease_ct 
00880 //                                   * this_stage); 
00881 //            layers[index]->setLearningRate( lr );
00882 //            connections[index]->setLearningRate( lr );
00883 //            layers[index+1]->setLearningRate( lr );
00884 //            
00885 //            layers[index]->accumulatePosStats( pos_down_val );
00886 //            layers[index]->accumulateNegStats( disc_neg_down_val );
00887 //            layers[index]->update();
00888 //            
00889 //            connections[index]->accumulatePosStats( pos_down_val,
00890 //                                                    pos_up_val );
00891 //            connections[index]->accumulateNegStats( disc_neg_down_val,
00892 //                                                    disc_neg_up_val );
00893 //            connections[index]->update();
00894 //            
00895 //            layers[index+1]->accumulatePosStats( pos_up_val );
00896 //            layers[index+1]->accumulateNegStats( disc_neg_up_val );
00897 //            layers[index+1]->update();
00898 //        }
00899     }
00900 }
00901 
00902 void DiscriminativeDeepBeliefNet::fineTuningStep( 
00903     const Vec& input, const Vec& target,
00904     Vec& train_costs )
00905 {
00906     // Get example representation
00907 
00908     computeRepresentation(input, input_representation, 
00909                           n_layers-1);
00910 
00911     // Compute supervised gradient
00912     final_module->fprop( input_representation, final_cost_input );
00913     final_cost->fprop( final_cost_input, target, final_cost_value );
00914     
00915     final_cost->bpropUpdate( final_cost_input, target,
00916                              final_cost_value[0],
00917                              final_cost_gradient );
00918     final_module->bpropUpdate( input_representation,
00919                                final_cost_input,
00920                                expectation_gradients[ n_layers-1 ],
00921                                final_cost_gradient );
00922 
00923     for( int i=n_layers-1 ; i>0 ; i-- )
00924     {
00925         layers[i]->bpropUpdate( activations[i],
00926                                 expectations[i],
00927                                 activation_gradients[i],
00928                                 expectation_gradients[i] );
00929         
00930         
00931         connections[i-1]->bpropUpdate( expectations[i-1],
00932                                        activations[i],
00933                                        expectation_gradients[i-1],
00934                                        activation_gradients[i] );
00935     }        
00936 }
00937 
00938 void DiscriminativeDeepBeliefNet::computeRepresentation(const Vec& input,
00939                                                              Vec& representation,
00940                                                              int layer) const
00941 {
00942     if(layer == 0)
00943     {
00944         representation.resize(input.length());
00945         expectations[0] << input;
00946         representation << input;
00947         return;
00948     }
00949 
00950     expectations[0] << input;
00951     for( int i=0 ; i<layer; i++ )
00952     {
00953         connections[i]->fprop( expectations[i], activations[i+1] );
00954         layers[i+1]->fprop(activations[i+1],expectations[i+1]);
00955     }
00956     representation.resize(expectations[layer].length());
00957     representation << expectations[layer];
00958 }
00959 
00960 void DiscriminativeDeepBeliefNet::computeOutput(const Vec& input, Vec& output) const
00961 {
00962     if( currently_trained_layer>n_layers-1 )
00963     {
00964         computeRepresentation(input,input_representation, 
00965                               n_layers-1);
00966         final_module->fprop( input_representation, final_cost_input );
00967         output[0] = argmax(final_cost_input);
00968     }
00969     else
00970     {
00971         computeRepresentation(input, output,
00972                               currently_trained_layer);
00973     }
00974 }
00975 
00976 void DiscriminativeDeepBeliefNet::computeCostsFromOutputs(const Vec& input, const Vec& output,
00977                                            const Vec& target, Vec& costs) const
00978 {
00979     //Assumes that computeOutput has been called
00980 
00981     costs.resize( getTestCostNames().length() );
00982     costs.fill( MISSING_VALUE );
00983 
00984     if( currently_trained_layer>n_layers-1 )
00985         if( ((int)round(output[0])) == ((int)round(target[0])) )
00986             costs.last() = 0;
00987         else
00988             costs.last() = 1;
00989 }
00990 
00991 TVec<string> DiscriminativeDeepBeliefNet::getTestCostNames() const
00992 {
00993     // Return the names of the costs computed by computeCostsFromOutputs
00994     // (these may or may not be exactly the same as what's returned by
00995     // getTrainCostNames).
00996 
00997     TVec<string> cost_names(0);
00998 
00999     cost_names.append( "class_error" );
01000 
01001     return cost_names;
01002 }
01003 
01004 TVec<string> DiscriminativeDeepBeliefNet::getTrainCostNames() const
01005 {
01006     return getTestCostNames();
01007 }
01008 
01009 void DiscriminativeDeepBeliefNet::setTrainingSet(VMat training_set, bool call_forget)
01010 {
01011     inherited::setTrainingSet(training_set,call_forget);
01012     nearest_neighbors_are_up_to_date = false;
01013 }
01014 
01015 void DiscriminativeDeepBeliefNet::updateNearestNeighbors()
01016 {
01017     if( !nearest_neighbors_are_up_to_date )
01018     {
01019         MODULE_LOG << "Computing nearest neighbors" << endl;
01020 
01021         Vec input( inputsize() );
01022         Vec target( targetsize() );
01023         real weight; // unused
01024         
01025         other_class_datasets.resize(n_classes);
01026         for(int k=0; k<n_classes; k++)
01027         {
01028             other_class_datasets[k] = new ClassSubsetVMatrix();
01029             other_class_datasets[k]->classes.resize(0);
01030             for(int l=0; l<n_classes; l++)
01031                 if( l != k )
01032                     other_class_datasets[k]->classes.append(l);
01033             other_class_datasets[k]->source = train_set;
01034             other_class_datasets[k]->build();
01035         }
01036         
01037         
01038         // Find training nearest neighbors
01039         input.resize(train_set->inputsize());
01040         target.resize(train_set->targetsize());
01041         nearest_neighbors_indices.resize(train_set->length(), k_neighbors);
01042         TVec<int> nearest_neighbors_indices_row;
01043         for(int i=0; i<train_set.length(); i++)
01044         {
01045             train_set->getExample(i,input,target,weight);
01046             nearest_neighbors_indices_row = nearest_neighbors_indices(i);
01047             computeNearestNeighbors(
01048                 new GetInputVMatrix((VMatrix *)
01049                                     other_class_datasets[(int)round(target[0])]),
01050                 input,
01051                 nearest_neighbors_indices_row,
01052                 -1);
01053         }
01054     }
01055     
01056     nearest_neighbors_are_up_to_date = true;
01057 }
01058 //#####  Helper functions  ##################################################
01059 
01060 void DiscriminativeDeepBeliefNet::setLearningRate( real the_learning_rate )
01061 {
01062     layers[0]->setLearningRate( the_learning_rate );
01063     for( int i=0 ; i<n_layers-1 ; i++ )
01064     {
01065         greedy_layers[i]->setLearningRate( the_learning_rate );
01066         greedy_connections[i]->setLearningRate( the_learning_rate );
01067     }
01068 
01069     final_module->setLearningRate( the_learning_rate );
01070     final_cost->setLearningRate( the_learning_rate );
01071 }
01072 
01073 
01074 } // end of namespace PLearn
01075 
01076 
01077 /*
01078   Local Variables:
01079   mode:c++
01080   c-basic-offset:4
01081   c-file-style:"stroustrup"
01082   c-file-offsets:((innamespace . 0)(inline-open . 0))
01083   indent-tabs-mode:nil
01084   fill-column:79
01085   End:
01086 */
01087 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines