PLearn 0.1
HintonDeepBeliefNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // HintonDeepBeliefNet.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin
00036 
00039 #define PL_LOG_MODULE_NAME "HintonDeepBeliefNet"
00040 #include <plearn/io/pl_log.h>
00041 #if USING_MPI
00042 #include <plearn/sys/PLMPI.h>
00043 #endif
00044 
00045 #include "HintonDeepBeliefNet.h"
00046 #include "RBMLayer.h"
00047 #include "RBMMixedLayer.h"
00048 #include "RBMMultinomialLayer.h"
00049 #include "RBMParameters.h"
00050 #include "RBMLLParameters.h"
00051 #include "RBMJointLLParameters.h"
00052 //#include <unistd.h>
00053 
00054 namespace PLearn {
00055 using namespace std;
00056 
00057 PLEARN_IMPLEMENT_OBJECT(
00058     HintonDeepBeliefNet,
00059     "Does the same thing as Hinton's deep belief nets",
00060     "or, at least, tries to do so..."
00061 );
00062 
00064 // HintonDeepBeliefNet //
00066 HintonDeepBeliefNet::HintonDeepBeliefNet() :
00067     learning_rate(0.),
00068     fine_tuning_learning_rate(-1.),
00069     fine_tuning_decrease_ct(0.),
00070     weight_decay(0.),
00071     sum_parallel_contributions(0),
00072     use_sample_or_expectation(4)
00073 {
00074     use_sample_or_expectation[0] = 0;
00075     use_sample_or_expectation[1] = 1;
00076     use_sample_or_expectation[2] = 2;
00077     use_sample_or_expectation[3] = 0;
00078     random_gen = new PRandom();
00079     ptimer = new PTimer();
00080     ptimer->newTimer("training_time");
00081     ptimer->newTimer("test_time");
00082 }
00083 
00085 // declareOptions //
00087 void HintonDeepBeliefNet::declareOptions(OptionList& ol)
00088 {
00089     declareOption(ol, "learning_rate", &HintonDeepBeliefNet::learning_rate,
00090                   OptionBase::buildoption,
00091                   "Learning rate used during greedy learning");
00092 
00093     declareOption(ol, "fine_tuning_learning_rate",
00094                   &HintonDeepBeliefNet::fine_tuning_learning_rate,
00095                   OptionBase::buildoption,
00096                   "Learning rate used during the gradient descent");
00097 
00098     declareOption(ol, "fine_tuning_decrease_ct",
00099                   &HintonDeepBeliefNet::fine_tuning_decrease_ct,
00100                   OptionBase::buildoption,
00101                   "Decrease constant used during the gradient descent\n"
00102                   "(in fact, it will only be updated only once every epoch.\n");
00103 
00104     declareOption(ol, "weight_decay", &HintonDeepBeliefNet::weight_decay,
00105                   OptionBase::buildoption,
00106                   "Weight decay");
00107 
00108     declareOption(ol, "initialization_method",
00109                   &HintonDeepBeliefNet::initialization_method,
00110                   OptionBase::buildoption,
00111                   "The method used to initialize the weights:\n"
00112                   "  - \"uniform_linear\" = a uniform law in [-1/d, 1/d]\n"
00113                   "  - \"uniform_sqrt\"   = a uniform law in [-1/sqrt(d),"
00114                   " 1/sqrt(d)]\n"
00115                   "  - \"zero\"           = all weights are set to 0,\n"
00116                   "where d = max( up_layer_size, down_layer_size ).\n");
00117 
00118     declareOption(ol, "training_schedule",
00119                   &HintonDeepBeliefNet::training_schedule,
00120                   OptionBase::buildoption,
00121                   "Total number of examples that should be seen until each"
00122                   " layer\n"
00123                   "have been greedily trained.\n"
00124                   "We should always have training_schedule[i] <"
00125                   " training_schedule[i+1].\n");
00126 
00127     declareOption(ol, "layers", &HintonDeepBeliefNet::layers,
00128                   OptionBase::buildoption,
00129                   "Layers that learn representations of the input,"
00130                   " unsupervisedly.\n"
00131                   "layers[0] is input layer.\n");
00132 
00133     declareOption(ol, "target_layer", &HintonDeepBeliefNet::target_layer,
00134                   OptionBase::buildoption,
00135                   "Target (or label) layer");
00136 
00137     declareOption(ol, "params", &HintonDeepBeliefNet::params,
00138                   OptionBase::buildoption,
00139                   "RBMParameters linking the unsupervised layers.\n"
00140                   "params[i] links layers[i] and layers[i+1], except for"
00141                   "params[n_layers-1],\n"
00142                   "that links layers[n_layers-1] and last_layer.\n");
00143 
00144     declareOption(ol, "target_params", &HintonDeepBeliefNet::target_params,
00145                   OptionBase::buildoption,
00146                   "Parameters linking target_layer and last_layer");
00147 
00148     declareOption(ol, "use_sample_or_expectation",
00149                   &HintonDeepBeliefNet::use_sample_or_expectation,
00150                   OptionBase::buildoption,
00151                   "Vector providing information on which information to use"
00152                   " during the\n"
00153                   "contrastive divergence step:\n"
00154                   "  - 0 means that we use the expectation only,\n"
00155                   "  - 1 means that we sample (for the next step), but we use"
00156                   " the\n"
00157                   "    expectation in the CD update formula,\n"
00158                   "  - 2 means that we use the sample only.\n"
00159                   "The order of the arguments matches the steps of CD:\n"
00160                   "  - visible unit during positive phase (you should keep it"
00161                   " to 0),\n"
00162                   "  - hidden unit during positive phase,\n"
00163                   "  - visible unit during negative phase,\n"
00164                   "  - hidden unit during negative phase (you should keep it"
00165                   " to 0).\n");
00166 
00167     declareOption(ol, "sum_parallel_contributions",
00168                   &HintonDeepBeliefNet::sum_parallel_contributions,
00169                   OptionBase::buildoption,
00170                   "Only used when USING_MPI for parallelization\n"
00171                   "sum or average the delta-w contributions from different processes?\n");
00172 
00173     declareOption(ol, "n_layers", &HintonDeepBeliefNet::n_layers,
00174                   OptionBase::learntoption,
00175                   "Number of unsupervised layers, including input layer");
00176 
00177     declareOption(ol, "last_layer", &HintonDeepBeliefNet::last_layer,
00178                   OptionBase::learntoption,
00179                   "Last layer, learning joint representations of input and"
00180                   " target");
00181 
00182     declareOption(ol, "joint_layer", &HintonDeepBeliefNet::joint_layer,
00183                   OptionBase::nosave,
00184                   "Concatenation of target_layer and layers[n_layers-1]");
00185 
00186     declareOption(ol, "joint_params", &HintonDeepBeliefNet::joint_params,
00187                   OptionBase::nosave,
00188                   "Parameters linking joint_layer and last_layer");
00189 
00190     // Now call the parent class' declareOptions().
00191     inherited::declareOptions(ol);
00192 }
00193 
00195 // build //
00197 void HintonDeepBeliefNet::build()
00198 {
00199     // ### Nothing to add here, simply calls build_().
00200     inherited::build();
00201     build_();
00202 }
00203 
00205 // build_ //
00207 void HintonDeepBeliefNet::build_()
00208 {
00209     MODULE_LOG << "build_() called" << endl;
00210     n_layers = layers.length();
00211     if( n_layers <= 1 )
00212         return;
00213 
00214     if( fine_tuning_learning_rate < 0. )
00215         fine_tuning_learning_rate = learning_rate;
00216 
00217     // check value of initialization_method
00218     string im = lowerstring( initialization_method );
00219     if( im == "" || im == "uniform_sqrt" )
00220         initialization_method = "uniform_sqrt";
00221     else if( im == "uniform_linear" )
00222         initialization_method = im;
00223     else if( im == "zero" )
00224         initialization_method = im;
00225     else
00226         PLERROR( "RBMParameters::build_ - initialization_method\n"
00227                  "\"%s\" unknown.\n", initialization_method.c_str() );
00228     MODULE_LOG << "  initialization_method = \"" << initialization_method
00229         << "\"" << endl;
00230 
00231     //TODO: build structure to store gradients during gradient descent
00232 
00233     if( training_schedule.length() != n_layers-1 )
00234         training_schedule = TVec<int>( n_layers-1 );
00235     MODULE_LOG << "  training_schedule = " << training_schedule << endl;
00236     MODULE_LOG << endl;
00237 
00238     build_layers();
00239     build_params();
00240 }
00241 
00242 void HintonDeepBeliefNet::build_layers()
00243 {
00244     MODULE_LOG << "build_layers() called" << endl;
00245     if( inputsize_ >= 0 )
00246     {
00247         PLASSERT( layers[0]->size + target_layer->size == inputsize() );
00248         setPredictorPredictedSizes( layers[0]->size,
00249                                     target_layer->size, false );
00250         MODULE_LOG << "  n_predictor = " << n_predictor << endl;
00251         MODULE_LOG << "  n_predicted = " << n_predicted << endl;
00252     }
00253 
00254     for( int i=0 ; i<n_layers ; i++ )
00255         layers[i]->random_gen = random_gen;
00256     target_layer->random_gen = random_gen;
00257 
00258     last_layer = layers[n_layers-1];
00259 
00260     // concatenate target_layer and layers[n_layers-2] into joint_layer,
00261     // if it is not already done
00262     if( !joint_layer
00263         || joint_layer->sub_layers.size() !=2
00264         || joint_layer->sub_layers[0] != target_layer
00265         || joint_layer->sub_layers[1] != layers[n_layers-2] )
00266     {
00267         TVec< PP<RBMLayer> > the_sub_layers( 2 );
00268         the_sub_layers[0] = target_layer;
00269         the_sub_layers[1] = layers[n_layers-2];
00270         joint_layer = new RBMMixedLayer( the_sub_layers );
00271     }
00272     joint_layer->random_gen = random_gen;
00273 }
00274 
00275 void HintonDeepBeliefNet::build_params()
00276 {
00277     MODULE_LOG << "build_params() called" << endl;
00278     if( params.length() == 0 )
00279     {
00280         params.resize( n_layers-1 );
00281         for( int i=0 ; i<n_layers-1 ; i++ )
00282             params[i] = new RBMLLParameters();
00283     }
00284     else if( params.length() != n_layers-1 )
00285         PLERROR( "HintonDeepBeliefNet::build_params - params.length() should\n"
00286                  "be equal to layers.length()-1 (%d != %d).\n",
00287                  params.length(), n_layers-1 );
00288 
00289     activation_gradients.resize( n_layers-1 );
00290     expectation_gradients.resize( n_layers-1 );
00291     output_gradient.resize( n_predicted );
00292 
00293     for( int i=0 ; i<n_layers-1 ; i++ )
00294     {
00295         //TODO: call changeOptions instead
00296         params[i]->down_units_types = layers[i]->units_types;
00297         params[i]->up_units_types = layers[i+1]->units_types;
00298         params[i]->initialization_method = initialization_method;
00299         params[i]->random_gen = random_gen;
00300         params[i]->build();
00301 
00302         activation_gradients[i].resize( params[i]->down_layer_size );
00303         expectation_gradients[i].resize( params[i]->down_layer_size );
00304     }
00305 
00306     if( target_layer && !target_params )
00307         target_params = new RBMLLParameters();
00308 
00309     //TODO: call changeOptions instead
00310     target_params->down_units_types = target_layer->units_types;
00311     target_params->up_units_types = last_layer->units_types;
00312     target_params->initialization_method = initialization_method;
00313     target_params->random_gen = random_gen;
00314     target_params->build();
00315 
00316     // build joint_params from params[n_layers-1] and target_params
00317     // if it is not already done
00318     if( !joint_params
00319         || joint_params->target_params != target_params
00320         || joint_params->cond_params != params[n_layers-2] )
00321     {
00322         joint_params = new RBMJointLLParameters( target_params,
00323                                                  params[n_layers-2] );
00324     }
00325     joint_params->random_gen = random_gen;
00326 
00327     // share the biases
00328     for( int i=0 ; i<n_layers-2 ; i++ )
00329         params[i]->up_units_bias = params[i+1]->down_units_bias;
00330 }
00331 
00333 // forget //
00335 void HintonDeepBeliefNet::forget()
00336 {
00337     MODULE_LOG << "forget() called" << endl;
00344     ptimer->resetAllTimers();
00345     resetGenerator(seed_);
00346     for( int i=0 ; i<n_layers-1 ; i++ )
00347         params[i]->forget();
00348 
00349     for( int i=0 ; i<n_layers ; i++ )
00350         layers[i]->reset();
00351 
00352 #if USING_MPI
00353     global_params.resize(0);
00354 #endif
00355     target_params->forget();
00356     target_layer->reset();
00357 
00358     stage = 0;
00359 }
00360 
00362 // generate //
00364 void HintonDeepBeliefNet::generate(Vec& y) const
00365 {
00366     PLERROR("generate not implemented for HintonDeepBeliefNet");
00367 }
00368 
00370 // cdf //
00372 real HintonDeepBeliefNet::cdf(const Vec& y) const
00373 {
00374     PLERROR("cdf not implemented for HintonDeepBeliefNet"); return 0;
00375 }
00376 
00378 // expectation //
00380 void HintonDeepBeliefNet::expectation(Vec& mu) const
00381 {
00382     mu.resize( predicted_size );
00383 
00384     // Propagate input (predictor_part) until penultimate layer
00385     layers[0]->expectation << predictor_part;
00386     for( int i=0 ; i<n_layers-2 ; i++ )
00387     {
00388         params[i]->setAsDownInput( layers[i]->expectation );
00389         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
00390         layers[i+1]->computeExpectation();
00391     }
00392 
00393     // Set layers[n_layers-2]->expectation (penultimate) as conditionning input
00394     // of joint_params
00395     joint_params->setAsCondInput( layers[n_layers-2]->expectation );
00396 
00397     // Get all activations on target_layer from target_params
00398     target_layer->getAllActivations( (RBMLLParameters*) joint_params );
00399     target_layer->computeExpectation();
00400 
00401     mu << target_layer->expectation;
00402 }
00403 
00405 // density //
00407 real HintonDeepBeliefNet::density(const Vec& y) const
00408 {
00409     PLASSERT( y.size() == n_predicted );
00410 
00411     // TODO: 'y'[0] devrait plutot etre l'entier "index" lui-meme!
00412     int index = argmax( y );
00413 
00414     // If y != onehot( index ), then density is 0
00415     if( !is_equal( y[index], 1. ) )
00416         return 0;
00417     for( int i=0 ; i<n_predicted ; i++ )
00418         if( !is_equal( y[i], 0 ) && i != index )
00419             return 0;
00420 
00421     expectation( store_expect );
00422     return store_expect[index];
00423 }
00424 
00425 
00427 // log_density //
00429 real HintonDeepBeliefNet::log_density(const Vec& y) const
00430 {
00431     return pl_log( density(y) );
00432 }
00433 
00435 // survival_fn //
00437 real HintonDeepBeliefNet::survival_fn(const Vec& y) const
00438 {
00439     PLERROR("survival_fn not implemented for HintonDeepBeliefNet"); return 0;
00440 }
00441 
00443 // variance //
00445 void HintonDeepBeliefNet::variance(Mat& cov) const
00446 {
00447     PLERROR("variance not implemented for HintonDeepBeliefNet");
00448 }
00449 
00451 // makeDeepCopyFromShallowCopy //
00453 void HintonDeepBeliefNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00454 {
00455     inherited::makeDeepCopyFromShallowCopy(copies);
00456 
00457     deepCopyField(ptimer, copies);
00458     deepCopyField(layers, copies);
00459     deepCopyField(last_layer, copies);
00460     deepCopyField(target_layer, copies);
00461     deepCopyField(joint_layer, copies);
00462     deepCopyField(params, copies);
00463     deepCopyField(joint_params, copies);
00464     deepCopyField(target_params, copies);
00465     deepCopyField(training_schedule, copies);
00466 }
00467 
00469 // setPredictor //
00471 void HintonDeepBeliefNet::setPredictor(const Vec& predictor, bool call_parent)
00472     const
00473 {
00474     if (call_parent)
00475         inherited::setPredictor(predictor, true);
00476     // ### Add here any specific code required by your subclass.
00477 }
00478 
00480 // setPredictorPredictedSizes //
00482 bool HintonDeepBeliefNet::setPredictorPredictedSizes(int the_predictor_size,
00483                                                      int the_predicted_size,
00484                                                      bool call_parent)
00485 {
00486     bool sizes_have_changed = false;
00487     if (call_parent)
00488         sizes_have_changed = inherited::setPredictorPredictedSizes(
00489             the_predictor_size, the_predicted_size, true);
00490 
00491     // ### Add here any specific code required by your subclass.
00492     if( the_predictor_size >= 0 && the_predictor_size != layers[0]->size ||
00493         the_predicted_size >= 0 && the_predicted_size != target_layer->size )
00494         PLERROR( "HintonDeepBeliefNet::setPredictorPredictedSizes - \n"
00495                  "n_predictor should be equal to layer[0]->size (%d)\n"
00496                  "n_predicted should be equal to target_layer->size (%d).\n",
00497                  layers[0]->size, target_layer->size );
00498 
00499     n_predictor = layers[0]->size;
00500     n_predicted = target_layer->size;
00501 
00502     // Returned value.
00503     return sizes_have_changed;
00504 }
00505 
00506 
00508 // train //
00510 void HintonDeepBeliefNet::train()
00511 {
00512     MODULE_LOG << "train() called " << endl;
00513     // The role of the train method is to bring the learner up to
00514     // stage==nstages, updating train_stats with training costs measured
00515     // on-line in the process.
00516 
00517     /* TYPICAL CODE:
00518 
00519     static Vec input;  // static so we don't reallocate memory each time...
00520     static Vec target; // (but be careful that static means shared!)
00521     input.resize(inputsize());    // the train_set's inputsize()
00522     target.resize(targetsize());  // the train_set's targetsize()
00523     real weight;
00524 
00525     // This generic PLearner method does a number of standard stuff useful for
00526     // (almost) any learner, and return 'false' if no training should take
00527     // place. See PLearner.h for more details.
00528     if (!initTrain())
00529         return;
00530 
00531     while(stage<nstages)
00532     {
00533         // clear statistics of previous epoch
00534         train_stats->forget();
00535 
00536         //... train for 1 stage, and update train_stats,
00537         // using train_set->getExample(input, target, weight)
00538         // and train_stats->update(train_costs)
00539 
00540         ++stage;
00541         train_stats->finalize(); // finalize statistics for this epoch
00542     }
00543     */
00544 
00545     Vec input( inputsize() );
00546     Vec target( targetsize() ); // unused
00547     real weight; // unused
00548     Vec train_costs(3);
00549     int nsamples = train_set->length();
00550     ptimer->startTimer("training_time");
00551 #if USING_MPI
00552     // initialize global parameters for allowing to easily share them across
00553     // multiple CPUs
00554 
00555     // wait until we can attach a gdb process
00556     //pout << "START WAITING..." << endl;
00557     //sleep(20);
00558     //pout << "DONE WAITING!" << endl;
00559     MPI_Barrier(MPI_COMM_WORLD);
00560     //int total_bsize=minibatch_size*PLMPI::size;
00561     int total_bsize=PLMPI::size;
00562     // forget(); // DEBUGGING TO GET REPRODUCIBLE RESULTS
00563     if (global_params.size()==0)
00564     {
00565         int n_params = joint_params->nParameters(1,1);
00566         for (int i=0;i<params.length()-1;i++)
00567             n_params += params[i]->nParameters(0,1);
00568         global_params.resize(n_params);
00569         previous_global_params.resize(n_params);
00570         Vec p=global_params;
00571         for (int i=0;i<params.length()-1;i++)
00572             p=params[i]->makeParametersPointHere(p,0,1);
00573         p=joint_params->makeParametersPointHere(p,1,1);
00574         if (p.length()!=0)
00575             PLERROR("HintonDeepBeliefNet: Inconsistencies between nParameters and makeParametersPointHere!");
00576     }
00577 #endif
00578 
00579     MODULE_LOG << "  nsamples = " << nsamples << endl;
00580     MODULE_LOG << "  initial stage = " << stage << endl;
00581     MODULE_LOG << "  objective: nstages = " << nstages << endl;
00582 
00583     if( !initTrain() )
00584     {
00585         MODULE_LOG << "train() aborted" << endl;
00586         return;
00587     }
00588 
00589     ProgressBar* pb = 0;
00590 
00591     // clear stats of previous epoch
00592     train_stats->forget();
00593 
00594     /***** initial greedy training *****/
00595     for( int layer=0 ; layer < n_layers-2 ; layer++ )
00596     {
00597         MODULE_LOG << "Training parameters between layers " << layer
00598             << " and " << layer+1 << endl;
00599 
00600         int end_stage = min( training_schedule[layer], nstages );
00601 
00602         MODULE_LOG << "  stage = " << stage << endl;
00603         MODULE_LOG << "  end_stage = " << end_stage << endl;
00604 
00605         if( report_progress && stage < end_stage )
00606         {
00607             pb = new ProgressBar( "Training layer "+tostring(layer)
00608                                   +" of "+classname(),
00609                                   end_stage - stage );
00610         }
00611 
00612         params[layer]->learning_rate = learning_rate;
00613 
00614 #if USING_MPI
00615         // make a copy of the parameters as they were at the beginning of
00616         // the minibatch
00617         previous_global_params << global_params;
00618 #endif
00619 
00620         for( ; stage<end_stage ; stage++ )
00621         {
00622 #if USING_MPI
00623             // only look at some of the examples, associated with this process
00624             // number (rank)
00625             if (stage%PLMPI::size==PLMPI::rank)
00626             {
00627 #endif
00628 //                resetGenerator(1); // DEBUGGING HACK TO MAKE SURE RESULTS ARE INDEPENDENT OF PARALLELIZATION
00629                 int sample = stage % nsamples;
00630                 train_set->getExample(sample, input, target, weight);
00631                 greedyStep( input.subVec(0, n_predictor), layer );
00632 
00633                 if( pb )
00634                 {
00635                     if( layer == 0 )
00636                         pb->update( stage + 1 );
00637                     else
00638                         pb->update( stage - training_schedule[layer-1] + 1 );
00639                 }
00640 #if USING_MPI
00641             }
00642             // time to share among processors
00643             if (stage%total_bsize==0 || stage==end_stage-1)
00644                 shareParamsMPI();
00645 #endif
00646         }
00647 
00648         if( pb )
00649         {
00650             delete pb;
00651             pb = 0;
00652         }
00653     }
00654 
00655     /***** joint training *****/
00656     MODULE_LOG << "Training joint parameters, between target,"
00657         << " penultimate (" << n_layers-2 << ")," << endl
00658         << "and last (" << n_layers-1 << ") layers." << endl;
00659 
00660     int end_stage = min( training_schedule[n_layers-2], nstages );
00661 
00662     MODULE_LOG << "  stage = " << stage << endl;
00663     MODULE_LOG << "  end_stage = " << end_stage << endl;
00664 
00665     if( report_progress && stage < end_stage )
00666         pb = new ProgressBar( "Training joint layer (target and "
00667                              +tostring(n_layers-2)+") of "+classname(),
00668                              end_stage - stage );
00669 
00670     joint_params->learning_rate = learning_rate;
00671 //    target_params->learning_rate = learning_rate;
00672 
00673     int previous_stage = (n_layers < 3) ? 0 : training_schedule[n_layers-3];
00674     int last = min(training_schedule[n_layers-2],nstages);
00675     for( ; stage<last ; stage++ )
00676     {
00677 #if USING_MPI
00678         // only look at some of the examples, associated with this process number (rank)
00679         if (stage%PLMPI::size==PLMPI::rank)
00680         {
00681 #endif
00682             int sample = stage % nsamples;
00683             train_set->getExample(sample, input, target, weight);
00684             jointGreedyStep( input );
00685 
00686             if( pb )
00687                 pb->update( stage - previous_stage + 1 );
00688 #if USING_MPI
00689         }
00690         // time to share among processors
00691         if (stage%total_bsize==0 || stage==last-1)
00692             shareParamsMPI();
00693 #endif
00694     }
00695     if( pb )
00696     {
00697         delete pb;
00698         pb = 0;
00699     }
00700 
00701     /***** fine-tuning *****/
00702     MODULE_LOG << "Fine-tuning all parameters, by gradient descent" << endl;
00703 
00704     int init_stage = stage;
00705     if( report_progress && stage < nstages )
00706         pb = new ProgressBar( "Fine-tuning parameters of all layers of "
00707                              +classname(),
00708                              nstages - init_stage );
00709 
00710     MODULE_LOG << "  fine_tuning_learning_rate = "
00711         << fine_tuning_learning_rate << endl;
00712 
00713     for( int i=0 ; i<n_layers-1 ; i++ )
00714         params[i]->learning_rate = fine_tuning_learning_rate;
00715     joint_params->learning_rate = fine_tuning_learning_rate;
00716     target_params->learning_rate = fine_tuning_learning_rate;
00717 
00718     int begin_sample = stage % nsamples;
00719     for( ; stage<nstages ; stage++ )
00720     {
00721 #if USING_MPI
00722         // only look at some of the examples, associated with this process number (rank)
00723         if (stage%PLMPI::size==PLMPI::rank)
00724         {
00725 #endif
00726             int sample = stage % nsamples;
00727             if( sample == begin_sample )
00728                 train_stats->forget();
00729             if( !fast_exact_is_equal( fine_tuning_learning_rate, 0. ) )
00730             {
00731                 real cur_learning_rate = fine_tuning_learning_rate
00732                     / (1. + fine_tuning_decrease_ct*(stage-init_stage) );
00733                 for( int i=0 ; i<n_layers-1 ; i++ )
00734                     params[i]->learning_rate = cur_learning_rate;
00735                 joint_params->learning_rate = cur_learning_rate;
00736                 target_params->learning_rate = cur_learning_rate;
00737             }
00738 
00739             train_set->getExample(sample, input, target, weight);
00740             fineTuneByGradientDescent( input, train_costs );
00741             train_stats->update( train_costs );
00742 
00743             if( pb )
00744                 pb->update( stage - init_stage + 1 );
00745 #if USING_MPI
00746         }
00747         // time to share among processors
00748         if (stage%total_bsize==0 || stage==nstages-1)
00749             shareParamsMPI();
00750 #endif
00751     }
00752 
00753     if( pb )
00754         delete pb;
00755 
00756     ptimer->stopTimer("training_time");
00757     real training_time = ptimer->getTimer("training_time");
00758     train_costs[2] = training_time;
00759     train_stats->update(train_costs);
00760     MODULE_LOG << "Training finished in " << endl << training_time << " seconds." << endl;
00761     train_stats->finalize(); // finalize statistics 
00762 }
00763 
00764 // assumes that down_layer->expectation is set
00765 void HintonDeepBeliefNet::contrastiveDivergenceStep(
00766     const PP<RBMLayer>& down_layer,
00767     const PP<RBMParameters>& parameters,
00768     const PP<RBMLayer>& up_layer )
00769 {
00770     // positive phase
00771     if( use_sample_or_expectation[0] == 0 )
00772         parameters->setAsDownInput( down_layer->expectation );
00773     else
00774     {
00775         down_layer->generateSample();
00776         parameters->setAsDownInput( down_layer->sample );
00777     }
00778 
00779     up_layer->getAllActivations( parameters );
00780     up_layer->computeExpectation();
00781     up_layer->generateSample();
00782 
00783     // accumulate stats using the right vector (sample or expectation)
00784     // we store a copy of positive phase values
00785     pos_down_values.resize( down_layer->size );
00786     pos_up_values.resize( up_layer->size );
00787 
00788     if( use_sample_or_expectation[0] == 2 )
00789         pos_down_values << down_layer->sample;
00790     else
00791         pos_down_values << down_layer->expectation;
00792 
00793     if( use_sample_or_expectation[1] == 2 )
00794         pos_up_values << up_layer->sample;
00795     else
00796         pos_up_values << up_layer->expectation;
00797 
00798     // down propagation
00799     if( use_sample_or_expectation[1] == 0 )
00800         parameters->setAsUpInput( up_layer->expectation );
00801     else
00802         parameters->setAsUpInput( up_layer->sample );
00803 
00804     down_layer->getAllActivations( parameters );
00805     down_layer->computeExpectation();
00806     down_layer->generateSample();
00807 
00808     // negative phase
00809     if( use_sample_or_expectation[2] == 0 )
00810         parameters->setAsDownInput( down_layer->expectation );
00811     else
00812         parameters->setAsDownInput( down_layer->sample );
00813 
00814     up_layer->getAllActivations( parameters );
00815     up_layer->computeExpectation();
00816 
00817     // accumulate stats using the right vector (sample or expectation)
00818     // no need to copy because the values won't change before update
00819     Vec neg_down_values;
00820     Vec neg_up_values;
00821     if( use_sample_or_expectation[2] == 2 )
00822         neg_down_values = down_layer->sample;
00823     else
00824         neg_down_values = down_layer->expectation;
00825 
00826     if( use_sample_or_expectation[3] == 2 )
00827         neg_up_values = up_layer->sample;
00828     else
00829         neg_up_values = up_layer->expectation;
00830 
00831     // update
00832     parameters->update(pos_down_values, pos_up_values,
00833                        neg_down_values, neg_up_values);
00834 }
00835 
00836 void HintonDeepBeliefNet::greedyStep( const Vec& predictor, int index )
00837 {
00838     // deterministic propagation until we reach index
00839     layers[0]->expectation << predictor;
00840     for( int i=0 ; i<index ; i++ )
00841     {
00842         params[i]->setAsDownInput( layers[i]->expectation );
00843         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
00844         layers[i+1]->computeExpectation();
00845     }
00846 
00847     // perform one step of CD
00848     contrastiveDivergenceStep( layers[index],
00849                                (RBMLLParameters*) params[index],
00850                                layers[index+1] );
00851 }
00852 
00853 void HintonDeepBeliefNet::jointGreedyStep( const Vec& input )
00854 {
00855     // deterministic propagation until we reach n_layers-2, setting the input
00856     // of the "input" part of joint_layer
00857     layers[0]->expectation << input.subVec( 0, n_predictor );
00858     for( int i=0 ; i<n_layers-2 ; i++ )
00859     {
00860         params[i]->setAsDownInput( layers[i]->expectation );
00861         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
00862         layers[i+1]->computeExpectation();
00863     }
00864 
00865     // now fill the "target" part of joint_layer
00866     target_layer->expectation << input.subVec( n_predictor, n_predicted );
00867 
00868     contrastiveDivergenceStep( (RBMLayer *) joint_layer,
00869                                (RBMLLParameters *) joint_params,
00870                                last_layer );
00871 }
00872 
00873 void HintonDeepBeliefNet::fineTuneByGradientDescent( const Vec& input,
00874                                                      const Vec& train_costs )
00875 {
00876     // split input in predictor_part and predicted_part
00877     splitCond(input);
00878 
00879     // compute predicted_part expectation, conditioned on predictor_part
00880     // (forward pass)
00881     expectation( output_gradient );
00882 
00883     int actual_index = argmax(predicted_part);
00884 
00885     // update train_costs
00886 #ifdef BOUNDCHECK
00887     for( int i=0 ; i<n_predicted ; i++ )
00888         PLASSERT( is_equal( predicted_part[i], 0. ) ||
00889                 i == actual_index && is_equal( predicted_part[i], 1. ) );
00890 #endif
00891     train_costs[0] = -pl_log( target_layer->expectation[actual_index] );
00892     int predicted_index = argmax( target_layer->expectation );
00893     if( predicted_index == actual_index )
00894         train_costs[1] = 0;
00895     else
00896         train_costs[1] = 1;
00897 
00898     // output gradient
00899     output_gradient[actual_index] -= 1.;
00900 
00901     joint_params->bpropUpdate( layers[n_layers-2]->expectation,
00902                                target_layer->expectation,
00903                                expectation_gradients[n_layers-2],
00904                                output_gradient );
00905 
00906     for( int i=n_layers-2 ; i>0 ; i-- )
00907     {
00908         layers[i]->bpropUpdate( layers[i]->activations,
00909                                 layers[i]->expectation,
00910                                 activation_gradients[i],
00911                                 expectation_gradients[i] );
00912         params[i-1]->bpropUpdate( layers[i-1]->expectation,
00913                                   layers[i]->activations,
00914                                   expectation_gradients[i-1],
00915                                   activation_gradients[i] );
00916     }
00917 }
00918 
00919 void HintonDeepBeliefNet::computeCostsFromOutputs(const Vec& input,
00920                                                   const Vec& output,
00921                                                   const Vec& target,
00922                                                   Vec& costs) const
00923 {
00924     char c = outputs_def[0];
00925     if( c == 'l' || c == 'd' )
00926         inherited::computeCostsFromOutputs(input, output, target, costs);
00927     else if( c == 'e' )
00928     {
00929         costs.resize( 3 );
00930         splitCond(input);
00931 
00932         // actual_index is the actual 'target'
00933         int actual_index = argmax(predicted_part);
00934 #ifdef BOUNDCHECK
00935         for( int i=0 ; i<n_predicted ; i++ )
00936             PLASSERT( is_equal( predicted_part[i], 0. ) ||
00937                     i == actual_index && is_equal( predicted_part[i], 1. ) );
00938 #endif
00939         costs[0] = -pl_log( output[actual_index] );
00940 
00941         // predicted_index is the most probable predicted class
00942         int predicted_index = argmax(output);
00943         if( predicted_index == actual_index )
00944             costs[1] = 0;
00945         else
00946             costs[1] = 1;
00947 
00948         real expected_output =  .0 ; 
00949         real expected_teacher = .0 ; 
00950         for(int i=0 ; i<n_predicted ; ++i) { 
00951             expected_output  += output[i] * i;
00952             expected_teacher += predicted_part[i] * i ; 
00953         }
00954         costs[2] = square(expected_output - expected_teacher) ; 
00955 
00956     }
00957 }
00958 
00959 TVec<string> HintonDeepBeliefNet::getTestCostNames() const
00960 {
00961     char c = outputs_def[0];
00962     TVec<string> result;
00963     if( c == 'l' || c == 'd' )
00964         result.append( "NLL" );
00965     else if( c == 'e' )
00966     {
00967         result.append( "NLL" );
00968         result.append( "class_error" );
00969         result.append( "WMSE" );
00970     }
00971     result.append("time");
00972     return result;
00973 }
00974 
00975 TVec<string> HintonDeepBeliefNet::getTrainCostNames() const
00976 {
00977     return getTestCostNames();
00978 }
00979 
00980 #if USING_MPI
00981 void HintonDeepBeliefNet::shareParamsMPI()
00982 {
00983     if (sum_parallel_contributions)
00984     {
00985         if (PLMPI::rank!=0)
00986             // after this line global_params contains the delta for all cpus except root
00987             global_params -= previous_global_params;
00988         // while the root contains the previous global params + its delta
00989         previous_global_params << global_params;
00990         // hence summing everything (result in cpu0.global_params)
00991         // yields the sum of all the changes plus the previous global params:
00992         MPI_Reduce(previous_global_params.data(),global_params.data(),
00993                    global_params.length(), PLMPI_REAL, MPI_SUM, 0, MPI_COMM_WORLD);
00994         // send it back to every one
00995         MPI_Bcast(global_params.data(), global_params.length(),
00996                   PLMPI_REAL, 0, MPI_COMM_WORLD);
00997         // and save it for next sharing step
00998         previous_global_params << global_params;
00999     }
01000     else // average contributions
01001     {
01002         //substract(global_params, previous_global_params, delta_params);
01003         previous_global_params << global_params;
01004         //MPI_Reduce(delta_params.data(),global_params.data(),
01005         MPI_Reduce(previous_global_params.data(),global_params.data(),
01006                    global_params.length(), PLMPI_REAL, MPI_SUM, 0, MPI_COMM_WORLD);
01007         global_params *= 1.0/PLMPI::size;
01008         //global_params += previous_global_params;
01009         MPI_Bcast(global_params.data(), global_params.length(),
01010                   PLMPI_REAL, 0, MPI_COMM_WORLD);
01011         //previous_global_params << global_params;
01012     }
01013 }
01014 #endif
01015 
01016 #if USING_MPI
01017 void HintonDeepBeliefNet::test(VMat testset, PP<VecStatsCollector> test_stats,
01018                                VMat testoutputs, VMat testcosts) const
01019 {
01020     int l = testset.length();
01021     Vec input;
01022     Vec target;
01023     real weight;
01024 
01025     Vec output(outputsize());
01026 
01027     Vec costs(nTestCosts());
01028 
01029     // testset->defineSizes(inputsize(),targetsize(),weightsize());
01030 
01031     int prank=PLMPI::rank;
01032     int psize=PLMPI::size;
01033 
01034     if (prank==0)
01035         ptimer->startTimer("test_time");
01036     ProgressBar* pb = NULL;
01037     if(report_progress)
01038         pb = new ProgressBar("Testing learner",l);
01039 
01040     if (l == 0) {
01041         // Empty test set: we give -1 cost arbitrarily.
01042         costs.fill(-1);
01043         test_stats->update(costs);
01044     }
01045     int n=int(ceil(l/real(psize)));
01046     Mat my_res(n,costs.size()+2);
01047     Mat all_res;
01048     if (prank==0) all_res.resize(n*psize,costs.size()+2);
01049     Vec learner_costs = costs.subVec(0,costs.size()-1);
01050     int k=0;
01051     for(int i=0; i<l; i++)
01052      if (i%psize==prank)
01053      {
01054         testset.getExample(i, input, target, weight);
01055 
01056         // Always call computeOutputAndCosts, since this is better
01057         // behaved with stateful learners
01058         computeOutputAndCosts(input,target,output,learner_costs);
01059 
01060         if(testoutputs)
01061             testoutputs->putOrAppendRow(i,output);
01062 
01063         if(testcosts)
01064             testcosts->putOrAppendRow(i, costs);
01065 
01066         if(test_stats)
01067         {
01068             my_res.subMat(k,0,1,learner_costs.length()) << learner_costs;
01069             my_res(k,costs.length()-1) = 0;
01070             my_res(k,costs.length()) = weight;
01071             my_res(k++,costs.length()+1) = 1;
01072         }
01073 
01074         if(report_progress)
01075             pb->update(i);
01076      }
01077 
01078     if (prank==0)
01079        MPI_Gather(my_res.data(),my_res.size(),PLMPI_REAL,
01080                   all_res.data(),my_res.size(),PLMPI_REAL,0,MPI_COMM_WORLD);
01081     else
01082        MPI_Gather(my_res.data(),my_res.size(),PLMPI_REAL,
01083                   0,my_res.size(),PLMPI_REAL,0,MPI_COMM_WORLD);
01084 
01085     if (prank==0)
01086     {
01087         ptimer->stopTimer("test_time");
01088         real test_time = ptimer->getTimer("test_time");
01089         int nc=costs.length();
01090         for (int i=0;i<all_res.length();i++)
01091           if (all_res(i,nc+1)==1.0)
01092           {
01093               if (i==all_res.length()-1)
01094                   all_res(i,nc-1)=test_time;
01095               else
01096                   all_res(i,nc-1)=0;
01097               test_stats->update(all_res(i).subVec(0,nc),
01098                                  all_res(i,nc));
01099           }
01100     }
01101 
01102     if(pb)
01103         delete pb;
01104 }
01105 #endif
01106 
01107 } // end of namespace PLearn
01108 
01109 
01110 /*
01111   Local Variables:
01112   mode:c++
01113   c-basic-offset:4
01114   c-file-style:"stroustrup"
01115   c-file-offsets:((innamespace . 0)(inline-open . 0))
01116   indent-tabs-mode:nil
01117   fill-column:79
01118   End:
01119 */
01120 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines