PLearn 0.1
GaussPartSupervisedDBN.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // GaussPartSupervisedDBN.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin
00036 
00039 #define PL_LOG_MODULE_NAME "GaussPartSupervisedDBN"
00040 #include <plearn/io/pl_log.h>
00041 #include <plearn/io/openFile.h>
00042 
00043 #if USING_MPI
00044 #include <plearn/sys/PLMPI.h>
00045 #endif
00046 
00047 #include "GaussPartSupervisedDBN.h"
00048 
00049 // RBM includes
00050 #include "RBMLayer.h"
00051 #include "RBMMixedLayer.h"
00052 #include "RBMMultinomialLayer.h"
00053 #include "RBMParameters.h"
00054 #include "RBMLLParameters.h"
00055 #include "RBMQLParameters.h"
00056 #include "RBMJointLLParameters.h"
00057 
00058 // OnlineLearningModules includes
00059 #include "../OnlineLearningModule.h"
00060 #include "../StackedModulesModule.h"
00061 #include "../NLLErrModule.h"
00062 #include "../GradNNetLayerModule.h"
00063 
00064 namespace PLearn {
00065 using namespace std;
00066 
00067 PLEARN_IMPLEMENT_OBJECT(
00068     GaussPartSupervisedDBN,
00069     "Hinton's DBN plus supervised gradient from a logistic regression layer",
00070     ""
00071 );
00072 
00074 // GaussPartSupervisedDBN //
00076 GaussPartSupervisedDBN::GaussPartSupervisedDBN() :
00077     learning_rate(0.),
00078     fine_tuning_learning_rate(-1.),
00079     initial_momentum(0.),
00080     final_momentum(0.),
00081     momentum_switch_time(-1),
00082     weight_decay(0.),
00083     parallelization_minibatch_size(100),
00084     sum_parallel_contributions(0),
00085     use_sample_or_expectation(4)
00086 {
00087     use_sample_or_expectation[0] = 0;
00088     use_sample_or_expectation[1] = 1;
00089     use_sample_or_expectation[2] = 2;
00090     use_sample_or_expectation[3] = 0;
00091     random_gen = new PRandom();
00092 }
00093 
00095 // declareOptions //
00097 void GaussPartSupervisedDBN::declareOptions(OptionList& ol)
00098 {
00099     declareOption(ol, "learning_rate", &GaussPartSupervisedDBN::learning_rate,
00100                   OptionBase::buildoption,
00101                   "Learning rate used during greedy learning");
00102 
00103     declareOption(ol, "supervised_learning_rates",
00104                   &GaussPartSupervisedDBN::supervised_learning_rates,
00105                   OptionBase::buildoption,
00106                   "The learning rates used for the supervised part during"
00107                   " greedy learning\n"
00108                   "(layer by layer).\n");
00109 
00110     declareOption(ol, "fine_tuning_learning_rate",
00111                   &GaussPartSupervisedDBN::fine_tuning_learning_rate,
00112                   OptionBase::buildoption,
00113                   "Learning rate used during the gradient descent");
00114 
00115     declareOption(ol, "initial_momentum",
00116                   &GaussPartSupervisedDBN::initial_momentum,
00117                   OptionBase::buildoption,
00118                   "Initial momentum factor (should be between 0 and 1)");
00119 
00120     declareOption(ol, "final_momentum",
00121                   &GaussPartSupervisedDBN::final_momentum,
00122                   OptionBase::buildoption,
00123                   "Final momentum factor (should be between 0 and 1)");
00124 
00125     declareOption(ol, "momentum_switch_time",
00126                   &GaussPartSupervisedDBN::momentum_switch_time,
00127                   OptionBase::buildoption,
00128                   "Number of samples to be seen by layer i before its momentum"
00129                   " switches\n"
00130                   "from initial_momentum to final_momentum.\n");
00131 
00132     declareOption(ol, "weight_decay", &GaussPartSupervisedDBN::weight_decay,
00133                   OptionBase::buildoption,
00134                   "Weight decay");
00135 
00136     declareOption(ol, "initialization_method",
00137                   &GaussPartSupervisedDBN::initialization_method,
00138                   OptionBase::buildoption,
00139                   "The method used to initialize the weights:\n"
00140                   "  - \"uniform_linear\" = a uniform law in [-1/d, 1/d]\n"
00141                   "  - \"uniform_sqrt\"   = a uniform law in [-1/sqrt(d),"
00142                   " 1/sqrt(d)]\n"
00143                   "  - \"zero\"           = all weights are set to 0,\n"
00144                   "where d = max( up_layer_size, down_layer_size ).\n");
00145 
00146 
00147     declareOption(ol, "training_schedule",
00148                   &GaussPartSupervisedDBN::training_schedule,
00149                   OptionBase::buildoption,
00150                   "Total number of examples that should be seen until each"
00151                   " layer\n"
00152                   "have been greedily trained.\n"
00153                   "We should always have training_schedule[i] <"
00154                   " training_schedule[i+1].\n");
00155 
00156     declareOption(ol, "fine_tuning_method",
00157                   &GaussPartSupervisedDBN::fine_tuning_method,
00158                   OptionBase::buildoption,
00159                   "Method for fine-tuning the whole network after greedy"
00160                   " learning.\n"
00161                   "One of:\n"
00162                   "  - \"none\"\n"
00163                   "  - \"CD\" or \"contrastive_divergence\"\n"
00164                   "  - \"EGD\" or \"error_gradient_descent\"\n"
00165                   "  - \"WS\" or \"wake_sleep\".\n");
00166 
00167     declareOption(ol, "layers", &GaussPartSupervisedDBN::layers,
00168                   OptionBase::buildoption,
00169                   "Layers that learn representations of the input,"
00170                   " unsupervisedly.\n"
00171                   "layers[0] is input layer.\n");
00172     
00173     declareOption(ol, "input_params", &GaussPartSupervisedDBN::input_params,
00174                   OptionBase::buildoption,
00175                   "Parameters linking layer[0] and layer[1]");
00176 
00177     declareOption(ol, "target_layer", &GaussPartSupervisedDBN::target_layer,
00178                   OptionBase::buildoption,
00179                   "Target (or label) layer");
00180 
00181     declareOption(ol, "params", &GaussPartSupervisedDBN::params,
00182                   OptionBase::buildoption,
00183                   "RBMParameters linking the unsupervised layers.\n"
00184                   "params[i] links layers[i] and layers[i+1], except for"
00185                   "params[n_layers-1],\n"
00186                   "that links layers[n_layers-1] and last_layer.\n");
00187 
00188     declareOption(ol, "target_params", &GaussPartSupervisedDBN::target_params,
00189                   OptionBase::buildoption,
00190                   "Parameters linking target_layer and last_layer");
00191 
00192 /*
00193     declareOption(ol, "use_sample_rather_than_expectation_in_positive_phase_statistics",
00194                   &GaussPartSupervisedDBN::use_sample_rather_than_expectation_in_positive_phase_statistics,
00195                   OptionBase::buildoption,
00196                   "In positive phase statistics use output->sample * input\n"
00197                   "rather than output->expectation * input.\n");
00198 */
00199     declareOption(ol, "use_sample_or_expectation",
00200                   &GaussPartSupervisedDBN::use_sample_or_expectation,
00201                   OptionBase::buildoption,
00202                   "Vector providing information on which information to use"
00203                   " during the\n"
00204                   "contrastive divergence step:\n"
00205                   "  - 0 means that we use the expectation only,\n"
00206                   "  - 1 means that we sample (for the next step), but we use"
00207                   " the\n"
00208                   "    expectation in the CD update formula,\n"
00209                   "  - 2 means that we use the sample only.\n"
00210                   "The order of the arguments matches the steps of CD:\n"
00211                   "  - visible unit during positive phase (you should keep it"
00212                   " to 0),\n"
00213                   "  - hidden unit during positive phase,\n"
00214                   "  - visible unit during negative phase,\n"
00215                   "  - hidden unit during negative phase (you should keep it"
00216                   " to 0).\n");
00217 
00218     declareOption(ol, "parallelization_minibatch_size",
00219                   &GaussPartSupervisedDBN::parallelization_minibatch_size,
00220                   OptionBase::buildoption,
00221                   "Only used when USING_MPI for parallelization.\n"
00222                   "This is the number of examples seen by one process\n"
00223                   "during training after which the weight updates are shared\n"
00224                   "among all the processes.\n");
00225 
00226     declareOption(ol, "sum_parallel_contributions",
00227                   &GaussPartSupervisedDBN::sum_parallel_contributions,
00228                   OptionBase::buildoption,
00229                   "Only used when USING_MPI for parallelization.\n"
00230                   "sum or average the delta-w contributions from different processes?\n");
00231 
00232     declareOption(ol, "n_layers", &GaussPartSupervisedDBN::n_layers,
00233                   OptionBase::learntoption,
00234                   "Number of unsupervised layers, including input layer");
00235 
00236     declareOption(ol, "last_layer", &GaussPartSupervisedDBN::last_layer,
00237                   OptionBase::learntoption,
00238                   "Last layer, learning joint representations of input and"
00239                   " target");
00240 
00241     declareOption(ol, "joint_layer", &GaussPartSupervisedDBN::joint_layer,
00242                   OptionBase::nosave,
00243                   "Concatenation of target_layer and layers[n_layers-1]");
00244 
00245     declareOption(ol, "joint_params", &GaussPartSupervisedDBN::joint_params,
00246                   OptionBase::nosave,
00247                   "Parameters linking joint_layer and last_layer");
00248 
00249     declareOption(ol, "regressors", &GaussPartSupervisedDBN::regressors,
00250                   OptionBase::learntoption,
00251                   "Logistic regressors that will provide the supervised"
00252                   " gradient\n"
00253                   "for each RBMParameters\n");
00254 
00255     // Now call the parent class' declareOptions().
00256     inherited::declareOptions(ol);
00257 }
00258 
00260 // build //
00262 void GaussPartSupervisedDBN::build()
00263 {
00264     // ### Nothing to add here, simply calls build_().
00265     inherited::build();
00266     build_();
00267 }
00268 
00270 // build_ //
00272 void GaussPartSupervisedDBN::build_()
00273 {
00274     MODULE_LOG << "build_() called" << endl;
00275     n_layers = layers.length();
00276     if( n_layers <= 1 )
00277         return;
00278 
00279     if( fine_tuning_learning_rate < 0. )
00280         fine_tuning_learning_rate = learning_rate;
00281 
00282     // check value of initialization_method
00283     string im = lowerstring( initialization_method );
00284     if( im == "" || im == "uniform_sqrt" )
00285         initialization_method = "uniform_sqrt";
00286     else if( im == "uniform_linear" )
00287         initialization_method = im;
00288     else if( im == "zero" )
00289         initialization_method = im;
00290     else
00291         PLERROR( "RBMParameters::build_ - initialization_method\n"
00292                  "\"%s\" unknown.\n", initialization_method.c_str() );
00293     MODULE_LOG << "  initialization_method = \"" << initialization_method
00294         << "\"" << endl;
00295 
00296     // check value of fine_tuning_method
00297     string ftm = lowerstring( fine_tuning_method );
00298     if( ftm == "" | ftm == "none" )
00299         fine_tuning_method = "";
00300     else if( ftm == "cd" | ftm == "contrastive_divergence" )
00301         fine_tuning_method = "CD";
00302     else if( ftm == "egd" | ftm == "error_gradient_descent" )
00303         fine_tuning_method = "EGD";
00304     else if( ftm == "ws" | ftm == "wake_sleep" )
00305         fine_tuning_method = "WS";
00306     else
00307         PLERROR( "GaussPartSupervisedDBN::build_ - fine_tuning_method \"%s\"\n"
00308                  "is unknown.\n", fine_tuning_method.c_str() );
00309     MODULE_LOG << "  fine_tuning_method = \"" << fine_tuning_method << "\""
00310         <<  endl;
00311     //TODO: build structure to store gradients during gradient descent
00312 
00313     if( training_schedule.length() != n_layers-1 )
00314         training_schedule = TVec<int>( n_layers-1, 1000000 );
00315 
00316     // fills with 0's if too short
00317     supervised_learning_rates.resize( n_layers-1 );
00318 
00319     MODULE_LOG << "  training_schedule = " << training_schedule << endl;
00320     MODULE_LOG << "learning_rate = " << learning_rate << endl;
00321     MODULE_LOG << "fine_tuning_learning_rate = "
00322         << fine_tuning_learning_rate << endl;
00323     MODULE_LOG << "supervised_learning_rates = "
00324         << supervised_learning_rates << endl;
00325     MODULE_LOG << endl;
00326 
00327     build_layers();
00328     build_params();
00329     build_regressors();
00330 }
00331 
00332 void GaussPartSupervisedDBN::build_layers()
00333 {
00334     MODULE_LOG << "build_layers() called" << endl;
00335     if( inputsize_ >= 0 )
00336     {
00337         PLASSERT( layers[0]->size + target_layer->size == inputsize() );
00338         setPredictorPredictedSizes( layers[0]->size,
00339                                     target_layer->size, false );
00340         MODULE_LOG << "  n_predictor = " << n_predictor << endl;
00341         MODULE_LOG << "  n_predicted = " << n_predicted << endl;
00342     }
00343 
00344     for( int i=0 ; i<n_layers ; i++ )
00345         layers[i]->random_gen = random_gen;
00346     target_layer->random_gen = random_gen;
00347 
00348     last_layer = layers[n_layers-1];
00349 
00350     // concatenate target_layer and layers[n_layers-2] into joint_layer,
00351     // if it is not already done
00352     if( !joint_layer
00353         || joint_layer->sub_layers.size() !=2
00354         || joint_layer->sub_layers[0] != target_layer
00355         || joint_layer->sub_layers[1] != layers[n_layers-2] )
00356     {
00357         TVec< PP<RBMLayer> > the_sub_layers( 2 );
00358         the_sub_layers[0] = target_layer;
00359         the_sub_layers[1] = layers[n_layers-2];
00360         joint_layer = new RBMMixedLayer( the_sub_layers );
00361     }
00362     joint_layer->random_gen = random_gen;
00363 }
00364 
00365 void GaussPartSupervisedDBN::build_params()
00366 {
00367     MODULE_LOG << "build_params() called" << endl;
00368     if( params.length() == 0 )
00369     {
00370         input_params = new RBMQLParameters() ; 
00371         params.resize( n_layers-1 );
00372         for( int i=1 ; i<n_layers-1 ; i++ )
00373             params[i] = new RBMLLParameters();
00374     }
00375     else if( params.length() != n_layers-1 )
00376         PLERROR( "GaussPartSupervisedDBN::build_params - params.length() should\n"
00377                  "be equal to layers.length()-1 (%d != %d).\n",
00378                  params.length(), n_layers-1 );
00379 
00380     activation_gradients.resize( n_layers-1 );
00381     expectation_gradients.resize( n_layers-1 );
00382     output_gradient.resize( n_predicted );
00383 
00384     input_params->down_units_types = layers[0]->units_types;
00385     input_params->up_units_types = layers[1]->units_types;
00386     input_params->learning_rate = learning_rate;
00387     input_params->initialization_method = initialization_method;
00388     input_params->random_gen = random_gen;
00389     input_params->build();
00390     
00391     activation_gradients[0].resize( input_params->down_layer_size );
00392     expectation_gradients[0].resize( input_params->down_layer_size );
00393     
00394 
00395     for( int i=1 ; i<n_layers-1 ; i++ )
00396     {
00397         //TODO: call changeOptions instead
00398         params[i]->down_units_types = layers[i]->units_types;
00399         params[i]->up_units_types = layers[i+1]->units_types;
00400         params[i]->initialization_method = initialization_method;
00401         params[i]->random_gen = random_gen;
00402         params[i]->build();
00403 
00404         activation_gradients[i].resize( params[i]->down_layer_size );
00405         expectation_gradients[i].resize( params[i]->down_layer_size );
00406     }
00407 
00408     if( target_layer && !target_params )
00409         target_params = new RBMLLParameters();
00410 
00411     //TODO: call changeOptions instead
00412     target_params->down_units_types = target_layer->units_types;
00413     target_params->up_units_types = last_layer->units_types;
00414     target_params->initialization_method = initialization_method;
00415     target_params->random_gen = random_gen;
00416     target_params->build();
00417 
00418     // build joint_params from params[n_layers-1] and target_params
00419     // if it is not already done
00420     if( !joint_params
00421         || joint_params->target_params != target_params
00422         || joint_params->cond_params != params[n_layers-2] )
00423     {
00424         joint_params = new RBMJointLLParameters( target_params,
00425                                                  params[n_layers-2] );
00426     }
00427     joint_params->random_gen = random_gen;
00428 
00429     // share the biases
00430     for( int i=1 ; i<n_layers-2 ; i++ )
00431         params[i]->up_units_bias = params[i+1]->down_units_bias;
00432     input_params->up_units_bias = params[1]->down_units_bias;
00433 }
00434 
00435 void GaussPartSupervisedDBN::build_regressors()
00436 {
00437     MODULE_LOG << "build_regressors() called" << endl;
00438     if( regressors.length() != n_layers-1 )
00439         regressors.resize( n_layers-1 );
00440 
00441     for( int i=0 ; i<n_layers-1 ; i++ )
00442         if( !(regressors[i]))
00443 //            || regressors[i]->input_size != i>0?
00444 //            params[i]->up_layer_size : input_params->up_layer_size )
00445         {
00446             MODULE_LOG << "creating regressor " << i << endl;
00447 
00448             // A linear layer of the appropriate size, that will be trained by
00449             // stochastic gradient descent, initial weights are 0.
00450             PP<GradNNetLayerModule> p_gnnlm = new GradNNetLayerModule();
00451             p_gnnlm->input_size = i > 0 ? params[i]->up_layer_size :
00452                 input_params->up_layer_size;
00453             p_gnnlm->output_size = n_predicted;
00454             p_gnnlm->start_learning_rate = supervised_learning_rates[i];
00455             MODULE_LOG << "start_learning_rate = "
00456                 << p_gnnlm->start_learning_rate << endl;
00457             p_gnnlm->init_weights_random_scale = 0.;
00458             p_gnnlm->build();
00459 
00460             // The softmax+NLL part
00461             PP<NLLErrModule> p_nll = new NLLErrModule();
00462             p_nll->input_size = n_predicted;
00463             p_nll->output_size = 1;
00464             p_nll->build();
00465 
00466             // Stack them, and...
00467             TVec< PP<OnlineLearningModule> > stack(2);
00468             stack[0] = (GradNNetLayerModule*) p_gnnlm;
00469             stack[1] = (NLLErrModule*) p_nll;
00470 
00471             // ... encapsulate them in another Module, that will compute
00472             // and backprop the NLL
00473             PP<StackedModulesModule> p_smm = new StackedModulesModule();
00474             p_smm->modules = stack;
00475             p_smm->last_layer_is_cost = true;
00476             p_smm->target_size = n_predicted;
00477             p_smm->build();
00478 
00479             regressors[i] = (StackedModulesModule*) p_smm;
00480         }
00481 }
00482 
00483 
00485 // forget //
00487 void GaussPartSupervisedDBN::forget()
00488 {
00489     MODULE_LOG << "forget() called" << endl;
00496     resetGenerator(seed_);
00497     input_params->forget() ; 
00498     for( int i=1 ; i<n_layers-1 ; i++ )
00499         params[i]->forget();
00500 
00501     for( int i=0 ; i<n_layers ; i++ )
00502         layers[i]->reset();
00503 
00504 #if USING_MPI
00505     global_params.resize(0);
00506 #endif
00507     target_params->forget();
00508     target_layer->reset();
00509 
00510     stage = 0;
00511 }
00512 
00514 // generate //
00516 void GaussPartSupervisedDBN::generate(Vec& y) const
00517 {
00518     PLERROR("generate not implemented for GaussPartSupervisedDBN");
00519 }
00520 
00522 // cdf //
00524 real GaussPartSupervisedDBN::cdf(const Vec& y) const
00525 {
00526     PLERROR("cdf not implemented for GaussPartSupervisedDBN"); return 0;
00527 }
00528 
00530 // expectation //
00532 void GaussPartSupervisedDBN::expectation(Vec& mu) const
00533 {
00534     mu.resize( predicted_size );
00535 
00536     // Propagate input (predictor_part) until penultimate layer
00537     layers[0]->expectation << predictor_part;
00538     
00539     input_params->setAsDownInput(layers[0]->expectation) ; 
00540     layers[1]->getAllActivations( (RBMQLParameters*) input_params );
00541     layers[1]->computeExpectation();
00542     
00543     for( int i=1 ; i<n_layers-2 ; i++ )
00544     {
00545         params[i]->setAsDownInput( layers[i]->expectation );
00546         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
00547         layers[i+1]->computeExpectation();
00548     }
00549 
00550     // Set layers[n_layers-2]->expectation (penultimate) as conditionning input
00551     // of joint_params
00552     joint_params->setAsCondInput( layers[n_layers-2]->expectation );
00553 
00554     // Get all activations on target_layer from target_params
00555     target_layer->getAllActivations( (RBMLLParameters*) joint_params );
00556     target_layer->computeExpectation();
00557 
00558     mu << target_layer->expectation;
00559 }
00560 
00562 // density //
00564 real GaussPartSupervisedDBN::density(const Vec& y) const
00565 {
00566     PLASSERT( y.size() == n_predicted );
00567 
00568     // TODO: 'y'[0] devrait plutot etre l'entier "index" lui-meme!
00569     int index = argmax( y );
00570 
00571     // If y != onehot( index ), then density is 0
00572     if( !is_equal( y[index], 1. ) )
00573         return 0;
00574     for( int i=0 ; i<n_predicted ; i++ )
00575         if( !is_equal( y[i], 0 ) && i != index )
00576             return 0;
00577 
00578     expectation( store_expect );
00579     return store_expect[index];
00580 }
00581 
00582 
00584 // log_density //
00586 real GaussPartSupervisedDBN::log_density(const Vec& y) const
00587 {
00588     return pl_log( density(y) );
00589 }
00590 
00592 // survival_fn //
00594 real GaussPartSupervisedDBN::survival_fn(const Vec& y) const
00595 {
00596     PLERROR("survival_fn not implemented for GaussPartSupervisedDBN"); return 0;
00597 }
00598 
00600 // variance //
00602 void GaussPartSupervisedDBN::variance(Mat& cov) const
00603 {
00604     PLERROR("variance not implemented for GaussPartSupervisedDBN");
00605 }
00606 
00608 // makeDeepCopyFromShallowCopy //
00610 void GaussPartSupervisedDBN::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00611 {
00612     inherited::makeDeepCopyFromShallowCopy(copies);
00613 
00614     deepCopyField(layers, copies);
00615     deepCopyField(last_layer, copies);
00616     deepCopyField(target_layer, copies);
00617     deepCopyField(joint_layer, copies);
00618     deepCopyField(params, copies);
00619     deepCopyField(joint_params, copies);
00620     deepCopyField(target_params, copies);
00621     deepCopyField(input_params, copies);
00622     deepCopyField(training_schedule, copies);
00623 }
00624 
00626 // setPredictor //
00628 void GaussPartSupervisedDBN::setPredictor(const Vec& predictor, bool call_parent)
00629     const
00630 {
00631     if (call_parent)
00632         inherited::setPredictor(predictor, true);
00633     // ### Add here any specific code required by your subclass.
00634 }
00635 
00637 // setPredictorPredictedSizes //
00639 bool GaussPartSupervisedDBN::setPredictorPredictedSizes(int the_predictor_size,
00640                                                    int the_predicted_size,
00641                                                    bool call_parent)
00642 {
00643     bool sizes_have_changed = false;
00644     if (call_parent)
00645         sizes_have_changed = inherited::setPredictorPredictedSizes(
00646             the_predictor_size, the_predicted_size, true);
00647 
00648     // ### Add here any specific code required by your subclass.
00649     if( the_predictor_size >= 0 && the_predictor_size != layers[0]->size ||
00650         the_predicted_size >= 0 && the_predicted_size != target_layer->size )
00651         PLERROR( "GaussPartSupervisedDBN::setPredictorPredictedSizes - \n"
00652                  "n_predictor should be equal to layer[0]->size (%d)\n"
00653                  "n_predicted should be equal to target_layer->size (%d).\n",
00654                  layers[0]->size, target_layer->size );
00655 
00656     n_predictor = layers[0]->size;
00657     n_predicted = target_layer->size;
00658 
00659     // Returned value.
00660     return sizes_have_changed;
00661 }
00662 
00663 
00665 // train //
00667 void GaussPartSupervisedDBN::train()
00668 {
00669     MODULE_LOG << "train() called" << endl;
00670     // The role of the train method is to bring the learner up to
00671     // stage==nstages, updating train_stats with training costs measured
00672     // on-line in the process.
00673 
00674     /* TYPICAL CODE:
00675 
00676     static Vec input;  // static so we don't reallocate memory each time...
00677     static Vec target; // (but be careful that static means shared!)
00678     input.resize(inputsize());    // the train_set's inputsize()
00679     target.resize(targetsize());  // the train_set's targetsize()
00680     real weight;
00681 
00682     // This generic PLearner method does a number of standard stuff useful for
00683     // (almost) any learner, and return 'false' if no training should take
00684     // place. See PLearner.h for more details.
00685     if (!initTrain())
00686         return;
00687 
00688     while(stage<nstages)
00689     {
00690         // clear statistics of previous epoch
00691         train_stats->forget();
00692 
00693         //... train for 1 stage, and update train_stats,
00694         // using train_set->getExample(input, target, weight)
00695         // and train_stats->update(train_costs)
00696 
00697         ++stage;
00698         train_stats->finalize(); // finalize statistics for this epoch
00699     }
00700     */
00701 
00702     Vec input( inputsize() );
00703     Vec target( targetsize() ); // unused
00704     real weight; // unused
00705     Vec train_costs(2);
00706 
00707     // hack for supervised cost
00708     real sum_sup_cost = 0;
00709     PStream sup_cost_file = openFile( expdir/"sup_cost.amat",
00710                                       PStream::raw_ascii, "a" );
00711 
00712     int nsamples = train_set->length();
00713 
00714 #if USING_MPI
00715     // initialize global parameters for allowing to easily share them across
00716     // multiple CPUs
00717 
00718     // wait until we can attach a gdb process
00719     //pout << "START WAITING..." << endl;
00720     //sleep(20);
00721     //pout << "DONE WAITING!" << endl;
00722     MPI_Barrier(MPI_COMM_WORLD);
00723     int total_bsize=parallelization_minibatch_size*PLMPI::size;
00724 //#endif
00725     forget(); // DEBUGGING TO GET REPRODUCIBLE RESULTS
00726     if (global_params.size()==0)
00727     {
00728         int n_params = joint_params->nParameters(1,1);
00729         for (int i=0;i<params.length()-1;i++)
00730             n_params += params[i]->nParameters(0,1);
00731         global_params.resize(n_params);
00732         previous_global_params.resize(n_params);
00733         Vec p=global_params;
00734         for (int i=0;i<params.length()-1;i++)
00735             p=params[i]->makeParametersPointHere(p,0,1);
00736         p=joint_params->makeParametersPointHere(p,1,1);
00737         if (p.length()!=0)
00738             PLERROR("HintonDeepBeliefNet: Inconsistencies between nParameters and makeParametersPointHere!");
00739     }
00740 #endif
00741 
00742     MODULE_LOG << "  nsamples = " << nsamples << endl;
00743     MODULE_LOG << "  initial stage = " << stage << endl;
00744     MODULE_LOG << "  objective: nstages = " << nstages << endl;
00745 
00746     if( !initTrain() )
00747     {
00748         MODULE_LOG << "train() aborted" << endl;
00749         return;
00750     }
00751 
00752     ProgressBar* pb = 0;
00753 
00754     // clear stats of previous epoch
00755     train_stats->forget();
00756 
00757     /***** initial greedy training *****/
00758     for( int layer=0 ; layer < n_layers-2 ; layer++ )
00759     {
00760         MODULE_LOG << "Training parameters between layers " << layer
00761             << " and " << layer+1 << endl;
00762 
00763         int end_stage = min( training_schedule[layer], nstages );
00764 
00765         MODULE_LOG << "  stage = " << stage << endl;
00766         MODULE_LOG << "  end_stage = " << end_stage << endl;
00767 
00768         if( report_progress && stage < end_stage )
00769         {
00770             pb = new ProgressBar( "Training layer "+tostring(layer)
00771                                   +" of "+classname(),
00772                                   end_stage - stage );
00773         }
00774         if (layer > 0) {
00775         params[layer]->learning_rate = learning_rate;
00776 
00777         int momentum_switch_stage = momentum_switch_time;
00778         if( layer > 0 )
00779             momentum_switch_stage += training_schedule[layer-1];
00780 
00781         if( stage <= momentum_switch_stage )
00782             params[layer]->momentum = initial_momentum;
00783         else
00784             params[layer]->momentum = final_momentum;
00785         }
00786         else {
00787         input_params->learning_rate = learning_rate;
00788 
00789         int momentum_switch_stage = momentum_switch_time;
00790         if( layer > 0 )
00791             momentum_switch_stage += training_schedule[layer-1];
00792 
00793             
00794         }
00795 
00796 #if USING_MPI
00797         // make a copy of the parameters as they were at the beginning of
00798         // the minibatch
00799         if (sum_parallel_contributions)
00800             previous_global_params << global_params;
00801 #endif
00802         int begin_sample = stage % nsamples;
00803         for( ; stage<end_stage ; stage++ )
00804         {
00805 #if USING_MPI
00806             // only look at some of the examples, associated with this process
00807             // number (rank)
00808             if (stage%PLMPI::size==PLMPI::rank)
00809             {
00810 #endif
00811 //                resetGenerator(1); // DEBUGGING HACK TO MAKE SURE RESULTS ARE INDEPENDENT OF PARALLELIZATION
00812                 int sample = stage % nsamples;
00813                 if( sample == begin_sample )
00814                 {
00815                     sup_cost_file << sum_sup_cost / nsamples << endl;
00816                     sum_sup_cost = 0;
00817                 }
00818 
00819                 train_set->getExample(sample, input, target, weight);
00820                 sum_sup_cost += greedyStep( input, layer );
00821 
00822                 if( pb )
00823                 {
00824                     if( layer == 0 )
00825                         pb->update( stage + 1 );
00826                     else
00827                         pb->update( stage - training_schedule[layer-1] + 1 );
00828                 }
00829 #if USING_MPI
00830             }
00831             // time to share among processors
00832             if (stage%total_bsize==0 || stage==end_stage-1)
00833                 shareParamsMPI();
00834 #endif
00835         }
00836     }
00837 
00838     /***** joint training *****/
00839     MODULE_LOG << "Training joint parameters, between target,"
00840         << " penultimate (" << n_layers-2 << ")," << endl
00841         << "and last (" << n_layers-1 << ") layers." << endl;
00842 
00843     int end_stage = min( training_schedule[n_layers-2], nstages );
00844 
00845     MODULE_LOG << "  stage = " << stage << endl;
00846     MODULE_LOG << "  end_stage = " << end_stage << endl;
00847 
00848     if( report_progress && stage < end_stage )
00849         pb = new ProgressBar( "Training joint layer (target and "
00850                              +tostring(n_layers-2)+") of "+classname(),
00851                              end_stage - stage );
00852 
00853     joint_params->learning_rate = learning_rate;
00854 //    target_params->learning_rate = learning_rate;
00855 
00856     int previous_stage = (n_layers < 3) ? 0 : training_schedule[n_layers-3];
00857     int momentum_switch_stage = momentum_switch_time + previous_stage;
00858     if( stage <= momentum_switch_stage )
00859         joint_params->momentum = initial_momentum;
00860     else
00861         joint_params->momentum = final_momentum;
00862 
00863     int begin_sample = stage % nsamples;
00864     int last = min(training_schedule[n_layers-2],nstages);
00865     for( ; stage<last ; stage++ )
00866     {
00867 #if USING_MPI
00868         // only look at some of the examples, associated with this process
00869         // number (rank)
00870         if (stage%PLMPI::size==PLMPI::rank)
00871         {
00872 #endif
00873             int sample = stage % nsamples;
00874             if( sample == begin_sample )
00875             {
00876                 sup_cost_file << sum_sup_cost / nsamples << endl;
00877                 sum_sup_cost = 0;
00878             }
00879 
00880             train_set->getExample(sample, input, target, weight);
00881             sum_sup_cost += jointGreedyStep( input );
00882 
00883             if( stage == momentum_switch_stage )
00884                 joint_params->momentum = final_momentum;
00885 
00886             if( pb )
00887                 pb->update( stage - previous_stage + 1 );
00888 #if USING_MPI
00889         }
00890         // time to share among processors
00891         if (stage%total_bsize==0 || stage==last-1)
00892             shareParamsMPI();
00893 #endif
00894     }
00895 
00896     /***** fine-tuning *****/
00897     MODULE_LOG << "Fine-tuning all parameters, using method "
00898         << fine_tuning_method << endl;
00899 
00900     int init_stage = stage;
00901     if( report_progress && stage < nstages )
00902         pb = new ProgressBar( "Fine-tuning parameters of all layers of "
00903                              +classname(),
00904                              nstages - init_stage );
00905 
00906     MODULE_LOG << "  fine_tuning_learning_rate = "
00907         << fine_tuning_learning_rate << endl;
00908 
00909     input_params->learning_rate = fine_tuning_learning_rate ; 
00910     for( int i=1 ; i<n_layers-1 ; i++ )
00911         params[i]->learning_rate = fine_tuning_learning_rate;
00912     joint_params->learning_rate = fine_tuning_learning_rate;
00913     target_params->learning_rate = fine_tuning_learning_rate;
00914 
00915     if( fine_tuning_method == "" ) // do nothing
00916     {
00917         stage = nstages;
00918         if( pb )
00919             pb->update( nstages - init_stage + 1 );
00920     }
00921     else if( fine_tuning_method == "EGD" )
00922     {
00923         begin_sample = stage % nsamples;
00924         for( ; stage<nstages ; stage++ )
00925         {
00926 #if USING_MPI
00927             // only look at some of the examples, associated with
00928             // this process number (rank)
00929             if (stage%PLMPI::size==PLMPI::rank)
00930             {
00931 #endif
00932                 int sample = stage % nsamples;
00933                 if( sample == begin_sample )
00934                     train_stats->forget();
00935 
00936                 train_set->getExample(sample, input, target, weight);
00937                 fineTuneByGradientDescent( input, train_costs );
00938                 train_stats->update( train_costs );
00939 
00940                 if( pb )
00941                     pb->update( stage - init_stage + 1 );
00942 #if USING_MPI
00943             }
00944             // time to share among processors
00945             if (stage%total_bsize==0 || stage==nstages-1)
00946                 shareParamsMPI();
00947 #endif
00948         }
00949         train_stats->finalize(); // finalize statistics for this epoch
00950     }
00951     else
00952         PLERROR( "Fine-tuning methods other than \"EGD\" are not"
00953                  " implemented yet." );
00954 
00955     if( pb )
00956         delete pb;
00957 
00958     MODULE_LOG << "Training finished" << endl << endl;
00959 }
00960 
00961 // assumes that down_layer->expectation is set
00962 real GaussPartSupervisedDBN::supervisedContrastiveDivergenceStep(
00963     const PP<RBMLayer>& down_layer,
00964     const PP<RBMParameters>& parameters,
00965     const PP<RBMLayer>& up_layer,
00966     const Vec& target,
00967     int index )
00968 {
00969 
00970     real supervised_cost = MISSING_VALUE;
00971     if( supervised_learning_rates[index] > 0 )
00972     {
00973         // (Deterministic) forward pass
00974         parameters->setAsDownInput( down_layer->expectation );
00975         up_layer->getAllActivations( parameters );
00976         up_layer->computeExpectation();
00977 
00978         Vec supervised_input = up_layer->expectation.copy();
00979         supervised_input.append( target );
00980 
00981         // Compute supervised cost and gradient
00982         Vec sup_cost(1);
00983         regressors[index]->fprop( supervised_input, sup_cost );
00984         regressors[index]->bpropUpdate( supervised_input, sup_cost,
00985                                         expectation_gradients[index+1],
00986                                         Vec() );
00987 
00988         // propagate gradient to params
00989         up_layer->bpropUpdate( up_layer->activations,
00990                                up_layer->expectation,
00991                                activation_gradients[index+1],
00992                                expectation_gradients[index+1] );
00993 
00994         // put the right learning rate
00995         parameters->learning_rate = supervised_learning_rates[index];
00996         // updates the parameters
00997         parameters->bpropUpdate( down_layer->expectation,
00998                                  up_layer->activations,
00999                                  expectation_gradients[index],
01000                                  activation_gradients[index+1] );
01001         // put the learning rate back
01002         parameters->learning_rate = learning_rate;
01003 
01004         // return the cost
01005         supervised_cost = sup_cost[0];
01006     }
01007 
01008     // We have to do another forward pass because the weights have changed
01009     contrastiveDivergenceStep( down_layer, parameters, up_layer );
01010 
01011     // return supervised cost
01012     return supervised_cost;
01013 }
01014 
01015 void GaussPartSupervisedDBN::contrastiveDivergenceStep(
01016     const PP<RBMLayer>& down_layer,
01017     const PP<RBMParameters>& parameters,
01018     const PP<RBMLayer>& up_layer )
01019 {
01020     // Re-initialize values in down_layer
01021     if( use_sample_or_expectation[0] == 0 )
01022         parameters->setAsDownInput( down_layer->expectation );
01023     else
01024     {
01025         down_layer->generateSample();
01026         parameters->setAsDownInput( down_layer->sample );
01027     }
01028 
01029     // positive phase
01030     up_layer->getAllActivations( parameters );
01031     up_layer->computeExpectation();
01032     up_layer->generateSample();
01033 
01034     // accumulate stats using the right vector (sample or expectation)
01035     if( use_sample_or_expectation[0] == 2 )
01036     {
01037         if( use_sample_or_expectation[1] == 2 )
01038             parameters->accumulatePosStats(down_layer->sample,
01039                                            up_layer->sample );
01040         else
01041             parameters->accumulatePosStats(down_layer->sample,
01042                                            up_layer->expectation );
01043     }
01044     else
01045     {
01046         if( use_sample_or_expectation[1] == 2 )
01047             parameters->accumulatePosStats(down_layer->expectation,
01048                                            up_layer->sample);
01049         else
01050             parameters->accumulatePosStats(down_layer->expectation,
01051                                            up_layer->expectation );
01052     }
01053 
01054     // down propagation
01055     if( use_sample_or_expectation[1] == 0 )
01056         parameters->setAsUpInput( up_layer->expectation );
01057     else
01058         parameters->setAsUpInput( up_layer->sample );
01059 
01060     down_layer->getAllActivations( parameters );
01061     down_layer->computeExpectation();
01062     down_layer->generateSample();
01063 
01064     if( use_sample_or_expectation[2] == 0 )
01065         parameters->setAsDownInput( down_layer->expectation );
01066     else
01067         parameters->setAsDownInput( down_layer->sample );
01068 
01069     up_layer->getAllActivations( parameters );
01070     up_layer->computeExpectation();
01071 
01072     // accumulate stats using the right vector (sample or expectation)
01073     if( use_sample_or_expectation[3] == 2 )
01074     {
01075         up_layer->generateSample();
01076         if( use_sample_or_expectation[2] == 2 )
01077             parameters->accumulateNegStats( down_layer->sample,
01078                                             up_layer->sample );
01079         else
01080             parameters->accumulateNegStats( down_layer->expectation,
01081                                             up_layer->sample );
01082     }
01083     else
01084     {
01085         if( use_sample_or_expectation[2] == 2 )
01086             parameters->accumulateNegStats( down_layer->sample,
01087                                             up_layer->expectation );
01088         else
01089             parameters->accumulateNegStats( down_layer->expectation,
01090                                             up_layer->expectation );
01091     }
01092 
01093     // update
01094     parameters->update();
01095 }
01096 
01097 real GaussPartSupervisedDBN::greedyStep( const Vec& input, int index )
01098 {
01099     // deterministic propagation until we reach index
01100     layers[0]->expectation << input.subVec(0, n_predictor);
01101 
01102     
01103     input_params->setAsDownInput( layers[0]->expectation );
01104     layers[1]->getAllActivations( (RBMQLParameters*) input_params );
01105     layers[1]->computeExpectation();
01106     
01107     for( int i=1 ; i<index ; i++ )
01108     {
01109         params[i]->setAsDownInput( layers[i]->expectation );
01110         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
01111         layers[i+1]->computeExpectation();
01112     }
01113 
01114     // perform one step of CD + partially supervised gradient
01115     real sup_cost;
01116     if (index == 0) 
01117       sup_cost = supervisedContrastiveDivergenceStep(
01118                         layers[index],
01119                         (RBMQLParameters*) input_params,
01120                         layers[index+1],
01121                         input.subVec(n_predictor,n_predicted),
01122                         index );
01123     
01124     else 
01125      sup_cost = supervisedContrastiveDivergenceStep(
01126                         layers[index],
01127                         (RBMLLParameters*) params[index],
01128                         layers[index+1],
01129                         input.subVec(n_predictor,n_predicted),
01130                         index );
01131     return sup_cost;
01132 }
01133 
01134 real GaussPartSupervisedDBN::jointGreedyStep( const Vec& input )
01135 {
01136     // deterministic propagation until we reach n_layers-2, setting the input
01137     // of the "input" part of joint_layer
01138     layers[0]->expectation << input.subVec( 0, n_predictor );
01139         
01140     input_params->setAsDownInput( layers[0]->expectation );
01141     layers[1]->getAllActivations( (RBMQLParameters*) input_params );
01142     layers[1]->computeExpectation();
01143     
01144     for( int i=1 ; i<n_layers-2 ; i++ )
01145     {
01146         params[i]->setAsDownInput( layers[i]->expectation );
01147         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
01148         layers[i+1]->computeExpectation();
01149     }
01150 
01151     real supervised_cost = MISSING_VALUE;
01152     if( supervised_learning_rates[n_layers-2] > 0 )
01153     {
01154         // deterministic forward pass
01155         joint_params->setAsCondInput( layers[n_layers-2]->expectation );
01156         target_layer->getAllActivations( (RBMLLParameters*) joint_params );
01157         target_layer->computeExpectation();
01158 
01159         // now get the actual index of the target
01160         int actual_index = argmax( input.subVec( n_predictor, n_predicted ) );
01161 #ifdef BOUNDCHECK
01162         for( int i=0 ; i<n_predicted ; i++ )
01163             PLASSERT( is_equal( input[n_predictor+i], 0. ) ||
01164                     i == actual_index && is_equal( input[n_predictor+i], 1 ) );
01165 #endif
01166 
01167         // get supervised cost (= train cost) and output gradient
01168         supervised_cost = -pl_log( target_layer->expectation[actual_index] );
01169         output_gradient << target_layer->expectation;
01170         output_gradient[actual_index] -= 1.;
01171 
01172         // put the right learning rate
01173         joint_params->learning_rate = supervised_learning_rates[n_layers-2];
01174         // backprop and update
01175         joint_params->bpropUpdate( layers[n_layers-2]->expectation,
01176                                    target_layer->expectation,
01177                                    expectation_gradients[n_layers-2],
01178                                    output_gradient );
01179         // put the learning rate back
01180         joint_params->learning_rate = learning_rate;
01181 
01182     }
01183 
01184     // now fill the "target" part of joint_layer
01185     target_layer->expectation << input.subVec( n_predictor, n_predicted );
01186     // do contrastive divergence step with the new weights and actual target
01187     contrastiveDivergenceStep( (RBMLayer*) joint_layer,
01188                                (RBMLLParameters*) joint_params,
01189                                last_layer );
01190 
01191     // return supervised cost
01192     return supervised_cost;
01193 }
01194 
01195 void GaussPartSupervisedDBN::fineTuneByGradientDescent( const Vec& input,
01196                                                    const Vec& train_costs )
01197 {
01198     // split input in predictor_part and predicted_part
01199     splitCond(input);
01200 
01201     // compute predicted_part expectation, conditioned on predictor_part
01202     // (forward pass)
01203     expectation( output_gradient );
01204 
01205     int actual_index = argmax(predicted_part);
01206 
01207     // update train_costs
01208 #ifdef BOUNDCHECK
01209     for( int i=0 ; i<n_predicted ; i++ )
01210         PLASSERT( is_equal( predicted_part[i], 0. ) ||
01211                 i == actual_index && is_equal( predicted_part[i], 1. ) );
01212 #endif
01213     train_costs[0] = -pl_log( target_layer->expectation[actual_index] );
01214     int predicted_index = argmax( target_layer->expectation );
01215     if( predicted_index == actual_index )
01216         train_costs[1] = 0;
01217     else
01218         train_costs[1] = 1;
01219 
01220     // output gradient
01221     output_gradient[actual_index] -= 1.;
01222 
01223     joint_params->bpropUpdate( layers[n_layers-2]->expectation,
01224                                target_layer->expectation,
01225                                expectation_gradients[n_layers-2],
01226                                output_gradient );
01227 
01228     for( int i=n_layers-2 ; i>1 ; i-- )
01229     {
01230         layers[i]->bpropUpdate( layers[i]->activations,
01231                                 layers[i]->expectation,
01232                                 activation_gradients[i],
01233                                 expectation_gradients[i] );
01234         params[i-1]->bpropUpdate( layers[i-1]->expectation,
01235                                   layers[i]->activations,
01236                                   expectation_gradients[i-1],
01237                                   activation_gradients[i] );
01238         
01239     }
01240 
01241         layers[1]->bpropUpdate( layers[1]->activations,
01242                                 layers[1]->expectation,
01243                                 activation_gradients[1],
01244                                 expectation_gradients[1] );
01245         
01246         input_params->bpropUpdate( layers[0]->expectation,
01247                                   layers[1]->activations,
01248                                   expectation_gradients[0],
01249                                   activation_gradients[1] );
01250     
01251 }
01252 
01253 
01254 void GaussPartSupervisedDBN::computeCostsFromOutputs(const Vec& input,
01255                                                 const Vec& output,
01256                                                 const Vec& target,
01257                                                 Vec& costs) const
01258 {
01259     char c = outputs_def[0];
01260     if( c == 'l' || c == 'd' )
01261         inherited::computeCostsFromOutputs(input, output, target, costs);
01262     else if( c == 'e' )
01263     {
01264         costs.resize( 3 );
01265         splitCond(input);
01266 
01267         // actual_index is the actual 'target'
01268         int actual_index = argmax(predicted_part);
01269 #ifdef BOUNDCHECK
01270         for( int i=0 ; i<n_predicted ; i++ )
01271             PLASSERT( is_equal( predicted_part[i], 0. ) ||
01272                     i == actual_index && is_equal( predicted_part[i], 1. ) );
01273 #endif
01274         costs[0] = -pl_log( output[actual_index] );
01275 
01276         // predicted_index is the most probable predicted class
01277         int predicted_index = argmax(output);
01278         if( predicted_index == actual_index )
01279             costs[1] = 0;
01280         else
01281             costs[1] = 1;
01282         
01283         real expected_output =  .0 ; 
01284         real expected_teacher = .0 ; 
01285         for(int i=0 ; i<n_predicted ; ++i) { 
01286             expected_output  += output[i] * i;
01287             expected_teacher += predicted_part[i] * i ; 
01288         }
01289         costs[2] = square(expected_output - expected_teacher) ; 
01290         
01291     }
01292 }
01293 
01294 TVec<string> GaussPartSupervisedDBN::getTestCostNames() const
01295 {
01296     char c = outputs_def[0];
01297     TVec<string> result;
01298     if( c == 'l' || c == 'd' )
01299         result.append( "NLL" );
01300     else if( c == 'e' )
01301     {
01302         result.append( "NLL" );
01303         result.append( "class_error" );
01304         result.append( "WMSE" );
01305     }
01306     return result;
01307 }
01308 
01309 TVec<string> GaussPartSupervisedDBN::getTrainCostNames() const
01310 {
01311     return getTestCostNames();
01312 }
01313 
01314 #if USING_MPI
01315 void GaussPartSupervisedDBN::shareParamsMPI()
01316 {
01317     if (sum_parallel_contributions)
01318     {
01319         if (PLMPI::rank!=0)
01320             // after this line global_params contains the delta for all cpus
01321             // except root
01322             global_params -= previous_global_params;
01323         // while the root contains the previous global params + its delta
01324         previous_global_params << global_params;
01325         // hence summing everything (result in cpu0.global_params)
01326         // yields the sum of all the changes plus the previous global params:
01327         MPI_Reduce(previous_global_params.data(),global_params.data(),
01328                    global_params.length(), PLMPI_REAL, MPI_SUM, 0,
01329                    MPI_COMM_WORLD);
01330         // send it back to every one
01331         MPI_Bcast(global_params.data(), global_params.length(),
01332                   PLMPI_REAL, 0, MPI_COMM_WORLD);
01333         // and save it for next sharing step
01334         previous_global_params << global_params;
01335     }
01336     else // average contributions
01337     {
01338         previous_global_params << global_params;
01339         MPI_Reduce(previous_global_params.data(),global_params.data(),
01340                    global_params.length(), PLMPI_REAL, MPI_SUM, 0,
01341                    MPI_COMM_WORLD);
01342         global_params *= 1.0/PLMPI::size;
01343         MPI_Bcast(global_params.data(), global_params.length(),
01344                   PLMPI_REAL, 0, MPI_COMM_WORLD);
01345     }
01346 }
01347 #endif
01348 
01349 #if USING_MPI
01350 void GaussPartSupervisedDBN::test(VMat testset, PP<VecStatsCollector> test_stats,
01351                              VMat testoutputs, VMat testcosts) const
01352 {
01353     int l = testset.length();
01354     Vec input;
01355     Vec target;
01356     real weight;
01357 
01358     Vec output(outputsize());
01359 
01360     Vec costs(nTestCosts());
01361 
01362     // testset->defineSizes(inputsize(),targetsize(),weightsize());
01363 
01364     ProgressBar* pb = NULL;
01365     if(report_progress)
01366         pb = new ProgressBar("Testing learner",l);
01367 
01368     if (l == 0) {
01369         // Empty test set: we give -1 cost arbitrarily.
01370         costs.fill(-1);
01371         test_stats->update(costs);
01372     }
01373     int n=int(ceil(l/real(PLMPI::size)));
01374     Mat my_res(n,costs.size()+2);
01375     Mat all_res;
01376     if (PLMPI::rank==0) all_res.resize(n*PLMPI::size,costs.size()+2);
01377     int k=0;
01378     for(int i=0; i<l; i++)
01379      if (i%PLMPI::size==PLMPI::rank)
01380      {
01381         testset.getExample(i, input, target, weight);
01382 
01383         // Always call computeOutputAndCosts, since this is better
01384         // behaved with stateful learners
01385         computeOutputAndCosts(input,target,output,costs);
01386 
01387         if(testoutputs)
01388             testoutputs->putOrAppendRow(i,output);
01389 
01390         if(testcosts)
01391             testcosts->putOrAppendRow(i, costs);
01392 
01393         if(test_stats)
01394         {
01395             my_res.subMat(k,0,1,costs.length()) << costs;
01396             my_res(k,costs.length()) = weight;
01397             my_res(k++,costs.length()+1) = 1;
01398         }
01399 
01400         if(report_progress)
01401             pb->update(i);
01402      }
01403 
01404     if (PLMPI::rank==0)
01405        MPI_Gather(my_res.data(),my_res.size(),PLMPI_REAL,
01406                   all_res.data(),my_res.size(),PLMPI_REAL,0,MPI_COMM_WORLD);
01407     else
01408        MPI_Gather(my_res.data(),my_res.size(),PLMPI_REAL,
01409                   0,my_res.size(),PLMPI_REAL,0,MPI_COMM_WORLD);
01410 
01411     if (PLMPI::rank==0)
01412        for (int i=0;i<all_res.length();i++)
01413           if (all_res(i,costs.length()+1)==1.0)
01414              test_stats->update(all_res(i).subVec(0,costs.length()),
01415                                 all_res(i,costs.length()));
01416 
01417     if(pb)
01418         delete pb;
01419 
01420 }
01421 #endif
01422 
01423 
01424 } // end of namespace PLearn
01425 
01426 
01427 /*
01428   Local Variables:
01429   mode:c++
01430   c-basic-offset:4
01431   c-file-style:"stroustrup"
01432   c-file-offsets:((innamespace . 0)(inline-open . 0))
01433   indent-tabs-mode:nil
01434   fill-column:79
01435   End:
01436 */
01437 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines