PLearn 0.1
PartSupervisedDBN.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PartSupervisedDBN.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin
00036 
00039 #define PL_LOG_MODULE_NAME "PartSupervisedDBN"
00040 #include <plearn/io/pl_log.h>
00041 #include <plearn/io/openFile.h>
00042 
00043 #if USING_MPI
00044 #include <plearn/sys/PLMPI.h>
00045 #endif
00046 
00047 #include "PartSupervisedDBN.h"
00048 
00049 // RBM includes
00050 #include "RBMLayer.h"
00051 #include "RBMMixedLayer.h"
00052 #include "RBMMultinomialLayer.h"
00053 #include "RBMParameters.h"
00054 #include "RBMLLParameters.h"
00055 #include "RBMJointLLParameters.h"
00056 
00057 // OnlineLearningModules includes
00058 #include "../OnlineLearningModule.h"
00059 #include "../StackedModulesModule.h"
00060 #include "../NLLErrModule.h"
00061 #include "../GradNNetLayerModule.h"
00062 
00063 namespace PLearn {
00064 using namespace std;
00065 
00066 PLEARN_IMPLEMENT_OBJECT(
00067     PartSupervisedDBN,
00068     "Hinton's DBN plus supervised gradient from a logistic regression layer",
00069     ""
00070 );
00071 
00073 // PartSupervisedDBN //
00075 PartSupervisedDBN::PartSupervisedDBN() :
00076     learning_rate(0.),
00077     fine_tuning_learning_rate(-1.),
00078     initial_momentum(0.),
00079     final_momentum(0.),
00080     momentum_switch_time(-1),
00081     weight_decay(0.),
00082     parallelization_minibatch_size(100),
00083     sum_parallel_contributions(0),
00084     use_sample_or_expectation(4)
00085 {
00086     use_sample_or_expectation[0] = 0;
00087     use_sample_or_expectation[1] = 1;
00088     use_sample_or_expectation[2] = 2;
00089     use_sample_or_expectation[3] = 0;
00090     random_gen = new PRandom();
00091 }
00092 
00094 // declareOptions //
00096 void PartSupervisedDBN::declareOptions(OptionList& ol)
00097 {
00098     declareOption(ol, "learning_rate", &PartSupervisedDBN::learning_rate,
00099                   OptionBase::buildoption,
00100                   "Learning rate used during greedy learning");
00101 
00102     declareOption(ol, "supervised_learning_rates",
00103                   &PartSupervisedDBN::supervised_learning_rates,
00104                   OptionBase::buildoption,
00105                   "The learning rates used for the supervised part during"
00106                   " greedy learning\n"
00107                   "(layer by layer).\n");
00108 
00109     declareOption(ol, "fine_tuning_learning_rate",
00110                   &PartSupervisedDBN::fine_tuning_learning_rate,
00111                   OptionBase::buildoption,
00112                   "Learning rate used during the gradient descent");
00113 
00114     declareOption(ol, "initial_momentum",
00115                   &PartSupervisedDBN::initial_momentum,
00116                   OptionBase::buildoption,
00117                   "Initial momentum factor (should be between 0 and 1)");
00118 
00119     declareOption(ol, "final_momentum",
00120                   &PartSupervisedDBN::final_momentum,
00121                   OptionBase::buildoption,
00122                   "Final momentum factor (should be between 0 and 1)");
00123 
00124     declareOption(ol, "momentum_switch_time",
00125                   &PartSupervisedDBN::momentum_switch_time,
00126                   OptionBase::buildoption,
00127                   "Number of samples to be seen by layer i before its momentum"
00128                   " switches\n"
00129                   "from initial_momentum to final_momentum.\n");
00130 
00131     declareOption(ol, "weight_decay", &PartSupervisedDBN::weight_decay,
00132                   OptionBase::buildoption,
00133                   "Weight decay");
00134 
00135     declareOption(ol, "initialization_method",
00136                   &PartSupervisedDBN::initialization_method,
00137                   OptionBase::buildoption,
00138                   "The method used to initialize the weights:\n"
00139                   "  - \"uniform_linear\" = a uniform law in [-1/d, 1/d]\n"
00140                   "  - \"uniform_sqrt\"   = a uniform law in [-1/sqrt(d),"
00141                   " 1/sqrt(d)]\n"
00142                   "  - \"zero\"           = all weights are set to 0,\n"
00143                   "where d = max( up_layer_size, down_layer_size ).\n");
00144 
00145 
00146     declareOption(ol, "training_schedule",
00147                   &PartSupervisedDBN::training_schedule,
00148                   OptionBase::buildoption,
00149                   "Total number of examples that should be seen until each"
00150                   " layer\n"
00151                   "have been greedily trained.\n"
00152                   "We should always have training_schedule[i] <"
00153                   " training_schedule[i+1].\n");
00154 
00155     declareOption(ol, "fine_tuning_method",
00156                   &PartSupervisedDBN::fine_tuning_method,
00157                   OptionBase::buildoption,
00158                   "Method for fine-tuning the whole network after greedy"
00159                   " learning.\n"
00160                   "One of:\n"
00161                   "  - \"none\"\n"
00162                   "  - \"CD\" or \"contrastive_divergence\"\n"
00163                   "  - \"EGD\" or \"error_gradient_descent\"\n"
00164                   "  - \"WS\" or \"wake_sleep\".\n");
00165 
00166     declareOption(ol, "layers", &PartSupervisedDBN::layers,
00167                   OptionBase::buildoption,
00168                   "Layers that learn representations of the input,"
00169                   " unsupervisedly.\n"
00170                   "layers[0] is input layer.\n");
00171 
00172     declareOption(ol, "target_layer", &PartSupervisedDBN::target_layer,
00173                   OptionBase::buildoption,
00174                   "Target (or label) layer");
00175 
00176     declareOption(ol, "params", &PartSupervisedDBN::params,
00177                   OptionBase::buildoption,
00178                   "RBMParameters linking the unsupervised layers.\n"
00179                   "params[i] links layers[i] and layers[i+1], except for"
00180                   "params[n_layers-1],\n"
00181                   "that links layers[n_layers-1] and last_layer.\n");
00182 
00183     declareOption(ol, "target_params", &PartSupervisedDBN::target_params,
00184                   OptionBase::buildoption,
00185                   "Parameters linking target_layer and last_layer");
00186 
00187 /*
00188     declareOption(ol, "use_sample_rather_than_expectation_in_positive_phase_statistics",
00189                   &PartSupervisedDBN::use_sample_rather_than_expectation_in_positive_phase_statistics,
00190                   OptionBase::buildoption,
00191                   "In positive phase statistics use output->sample * input\n"
00192                   "rather than output->expectation * input.\n");
00193 */
00194     declareOption(ol, "use_sample_or_expectation",
00195                   &PartSupervisedDBN::use_sample_or_expectation,
00196                   OptionBase::buildoption,
00197                   "Vector providing information on which information to use"
00198                   " during the\n"
00199                   "contrastive divergence step:\n"
00200                   "  - 0 means that we use the expectation only,\n"
00201                   "  - 1 means that we sample (for the next step), but we use"
00202                   " the\n"
00203                   "    expectation in the CD update formula,\n"
00204                   "  - 2 means that we use the sample only.\n"
00205                   "The order of the arguments matches the steps of CD:\n"
00206                   "  - visible unit during positive phase (you should keep it"
00207                   " to 0),\n"
00208                   "  - hidden unit during positive phase,\n"
00209                   "  - visible unit during negative phase,\n"
00210                   "  - hidden unit during negative phase (you should keep it"
00211                   " to 0).\n");
00212 
00213     declareOption(ol, "parallelization_minibatch_size",
00214                   &PartSupervisedDBN::parallelization_minibatch_size,
00215                   OptionBase::buildoption,
00216                   "Only used when USING_MPI for parallelization.\n"
00217                   "This is the number of examples seen by one process\n"
00218                   "during training after which the weight updates are shared\n"
00219                   "among all the processes.\n");
00220 
00221     declareOption(ol, "sum_parallel_contributions",
00222                   &PartSupervisedDBN::sum_parallel_contributions,
00223                   OptionBase::buildoption,
00224                   "Only used when USING_MPI for parallelization.\n"
00225                   "sum or average the delta-w contributions from different processes?\n");
00226 
00227     declareOption(ol, "n_layers", &PartSupervisedDBN::n_layers,
00228                   OptionBase::learntoption,
00229                   "Number of unsupervised layers, including input layer");
00230 
00231     declareOption(ol, "last_layer", &PartSupervisedDBN::last_layer,
00232                   OptionBase::learntoption,
00233                   "Last layer, learning joint representations of input and"
00234                   " target");
00235 
00236     declareOption(ol, "joint_layer", &PartSupervisedDBN::joint_layer,
00237                   OptionBase::nosave,
00238                   "Concatenation of target_layer and layers[n_layers-1]");
00239 
00240     declareOption(ol, "joint_params", &PartSupervisedDBN::joint_params,
00241                   OptionBase::nosave,
00242                   "Parameters linking joint_layer and last_layer");
00243 
00244     declareOption(ol, "regressors", &PartSupervisedDBN::regressors,
00245                   OptionBase::learntoption,
00246                   "Logistic regressors that will provide the supervised"
00247                   " gradient\n"
00248                   "for each RBMParameters\n");
00249 
00250     // Now call the parent class' declareOptions().
00251     inherited::declareOptions(ol);
00252 }
00253 
00255 // build //
00257 void PartSupervisedDBN::build()
00258 {
00259     // ### Nothing to add here, simply calls build_().
00260     inherited::build();
00261     build_();
00262 }
00263 
00265 // build_ //
00267 void PartSupervisedDBN::build_()
00268 {
00269     MODULE_LOG << "build_() called" << endl;
00270     n_layers = layers.length();
00271     if( n_layers <= 1 )
00272         return;
00273 
00274     if( fine_tuning_learning_rate < 0. )
00275         fine_tuning_learning_rate = learning_rate;
00276 
00277     // check value of initialization_method
00278     string im = lowerstring( initialization_method );
00279     if( im == "" || im == "uniform_sqrt" )
00280         initialization_method = "uniform_sqrt";
00281     else if( im == "uniform_linear" )
00282         initialization_method = im;
00283     else if( im == "zero" )
00284         initialization_method = im;
00285     else
00286         PLERROR( "RBMParameters::build_ - initialization_method\n"
00287                  "\"%s\" unknown.\n", initialization_method.c_str() );
00288     MODULE_LOG << "  initialization_method = \"" << initialization_method
00289         << "\"" << endl;
00290 
00291     // check value of fine_tuning_method
00292     string ftm = lowerstring( fine_tuning_method );
00293     if( ftm == "" | ftm == "none" )
00294         fine_tuning_method = "";
00295     else if( ftm == "cd" | ftm == "contrastive_divergence" )
00296         fine_tuning_method = "CD";
00297     else if( ftm == "egd" | ftm == "error_gradient_descent" )
00298         fine_tuning_method = "EGD";
00299     else if( ftm == "ws" | ftm == "wake_sleep" )
00300         fine_tuning_method = "WS";
00301     else
00302         PLERROR( "PartSupervisedDBN::build_ - fine_tuning_method \"%s\"\n"
00303                  "is unknown.\n", fine_tuning_method.c_str() );
00304     MODULE_LOG << "  fine_tuning_method = \"" << fine_tuning_method << "\""
00305         <<  endl;
00306     //TODO: build structure to store gradients during gradient descent
00307 
00308     if( training_schedule.length() != n_layers-1 )
00309         training_schedule = TVec<int>( n_layers-1, 1000000 );
00310 
00311     // fills with 0's if too short
00312     supervised_learning_rates.resize( n_layers-1 );
00313 
00314     MODULE_LOG << "  training_schedule = " << training_schedule << endl;
00315     MODULE_LOG << "learning_rate = " << learning_rate << endl;
00316     MODULE_LOG << "fine_tuning_learning_rate = "
00317         << fine_tuning_learning_rate << endl;
00318     MODULE_LOG << "supervised_learning_rates = "
00319         << supervised_learning_rates << endl;
00320     MODULE_LOG << endl;
00321 
00322     build_layers();
00323     build_params();
00324     build_regressors();
00325 }
00326 
00327 void PartSupervisedDBN::build_layers()
00328 {
00329     MODULE_LOG << "build_layers() called" << endl;
00330     if( inputsize_ >= 0 )
00331     {
00332         PLASSERT( layers[0]->size + target_layer->size == inputsize() );
00333         setPredictorPredictedSizes( layers[0]->size,
00334                                     target_layer->size, false );
00335         MODULE_LOG << "  n_predictor = " << n_predictor << endl;
00336         MODULE_LOG << "  n_predicted = " << n_predicted << endl;
00337     }
00338 
00339     for( int i=0 ; i<n_layers ; i++ )
00340         layers[i]->random_gen = random_gen;
00341     target_layer->random_gen = random_gen;
00342 
00343     last_layer = layers[n_layers-1];
00344 
00345     // concatenate target_layer and layers[n_layers-2] into joint_layer,
00346     // if it is not already done
00347     if( !joint_layer
00348         || joint_layer->sub_layers.size() !=2
00349         || joint_layer->sub_layers[0] != target_layer
00350         || joint_layer->sub_layers[1] != layers[n_layers-2] )
00351     {
00352         TVec< PP<RBMLayer> > the_sub_layers( 2 );
00353         the_sub_layers[0] = target_layer;
00354         the_sub_layers[1] = layers[n_layers-2];
00355         joint_layer = new RBMMixedLayer( the_sub_layers );
00356     }
00357     joint_layer->random_gen = random_gen;
00358 }
00359 
00360 void PartSupervisedDBN::build_params()
00361 {
00362     MODULE_LOG << "build_params() called" << endl;
00363     if( params.length() == 0 )
00364     {
00365         params.resize( n_layers-1 );
00366         for( int i=0 ; i<n_layers-1 ; i++ )
00367             params[i] = new RBMLLParameters();
00368     }
00369     else if( params.length() != n_layers-1 )
00370         PLERROR( "PartSupervisedDBN::build_params - params.length() should\n"
00371                  "be equal to layers.length()-1 (%d != %d).\n",
00372                  params.length(), n_layers-1 );
00373 
00374     activation_gradients.resize( n_layers-1 );
00375     expectation_gradients.resize( n_layers-1 );
00376     output_gradient.resize( n_predicted );
00377 
00378     for( int i=0 ; i<n_layers-1 ; i++ )
00379     {
00380         //TODO: call changeOptions instead
00381         params[i]->down_units_types = layers[i]->units_types;
00382         params[i]->up_units_types = layers[i+1]->units_types;
00383         params[i]->initialization_method = initialization_method;
00384         params[i]->random_gen = random_gen;
00385         params[i]->build();
00386 
00387         activation_gradients[i].resize( params[i]->down_layer_size );
00388         expectation_gradients[i].resize( params[i]->down_layer_size );
00389     }
00390 
00391     if( target_layer && !target_params )
00392         target_params = new RBMLLParameters();
00393 
00394     //TODO: call changeOptions instead
00395     target_params->down_units_types = target_layer->units_types;
00396     target_params->up_units_types = last_layer->units_types;
00397     target_params->initialization_method = initialization_method;
00398     target_params->random_gen = random_gen;
00399     target_params->build();
00400 
00401     // build joint_params from params[n_layers-1] and target_params
00402     // if it is not already done
00403     if( !joint_params
00404         || joint_params->target_params != target_params
00405         || joint_params->cond_params != params[n_layers-2] )
00406     {
00407         joint_params = new RBMJointLLParameters( target_params,
00408                                                  params[n_layers-2] );
00409     }
00410     joint_params->random_gen = random_gen;
00411 
00412     // share the biases
00413     for( int i=0 ; i<n_layers-2 ; i++ )
00414         params[i]->up_units_bias = params[i+1]->down_units_bias;
00415 }
00416 
00417 void PartSupervisedDBN::build_regressors()
00418 {
00419     MODULE_LOG << "build_regressors() called" << endl;
00420     if( regressors.length() != n_layers-1 )
00421         regressors.resize( n_layers-1 );
00422 
00423     for( int i=0 ; i<n_layers-1 ; i++ )
00424         if( !(regressors[i])
00425             || regressors[i]->input_size != params[i]->up_layer_size )
00426         {
00427             MODULE_LOG << "creating regressor " << i << endl;
00428 
00429             // A linear layer of the appropriate size, that will be trained by
00430             // stochastic gradient descent, initial weights are 0.
00431             PP<GradNNetLayerModule> p_gnnlm = new GradNNetLayerModule();
00432             p_gnnlm->input_size = params[i]->up_layer_size;
00433             p_gnnlm->output_size = n_predicted;
00434             p_gnnlm->start_learning_rate = supervised_learning_rates[i];
00435             MODULE_LOG << "start_learning_rate = "
00436                 << p_gnnlm->start_learning_rate << endl;
00437             p_gnnlm->init_weights_random_scale = 0.;
00438             p_gnnlm->build();
00439 
00440             // The softmax+NLL part
00441             PP<NLLErrModule> p_nll = new NLLErrModule();
00442             p_nll->input_size = n_predicted;
00443             p_nll->output_size = 1;
00444             p_nll->build();
00445 
00446             // Stack them, and...
00447             TVec< PP<OnlineLearningModule> > stack(2);
00448             stack[0] = (GradNNetLayerModule*) p_gnnlm;
00449             stack[1] = (NLLErrModule*) p_nll;
00450 
00451             // ... encapsulate them in another Module, that will compute
00452             // and backprop the NLL
00453             PP<StackedModulesModule> p_smm = new StackedModulesModule();
00454             p_smm->modules = stack;
00455             p_smm->last_layer_is_cost = true;
00456             p_smm->target_size = n_predicted;
00457             p_smm->build();
00458 
00459             regressors[i] = (StackedModulesModule*) p_smm;
00460         }
00461 }
00462 
00463 
00465 // forget //
00467 void PartSupervisedDBN::forget()
00468 {
00469     MODULE_LOG << "forget() called" << endl;
00476     resetGenerator(seed_);
00477     for( int i=0 ; i<n_layers-1 ; i++ )
00478         params[i]->forget();
00479 
00480     for( int i=0 ; i<n_layers ; i++ )
00481         layers[i]->reset();
00482 
00483 #if USING_MPI
00484     global_params.resize(0);
00485 #endif
00486     target_params->forget();
00487     target_layer->reset();
00488 
00489     stage = 0;
00490 }
00491 
00493 // generate //
00495 void PartSupervisedDBN::generate(Vec& y) const
00496 {
00497     PLERROR("generate not implemented for PartSupervisedDBN");
00498 }
00499 
00501 // cdf //
00503 real PartSupervisedDBN::cdf(const Vec& y) const
00504 {
00505     PLERROR("cdf not implemented for PartSupervisedDBN"); return 0;
00506 }
00507 
00509 // expectation //
00511 void PartSupervisedDBN::expectation(Vec& mu) const
00512 {
00513     mu.resize( predicted_size );
00514 
00515     // Propagate input (predictor_part) until penultimate layer
00516     layers[0]->expectation << predictor_part;
00517     for( int i=0 ; i<n_layers-2 ; i++ )
00518     {
00519         params[i]->setAsDownInput( layers[i]->expectation );
00520         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
00521         layers[i+1]->computeExpectation();
00522     }
00523 
00524     // Set layers[n_layers-2]->expectation (penultimate) as conditionning input
00525     // of joint_params
00526     joint_params->setAsCondInput( layers[n_layers-2]->expectation );
00527 
00528     // Get all activations on target_layer from target_params
00529     target_layer->getAllActivations( (RBMLLParameters*) joint_params );
00530     target_layer->computeExpectation();
00531 
00532     mu << target_layer->expectation;
00533 }
00534 
00536 // density //
00538 real PartSupervisedDBN::density(const Vec& y) const
00539 {
00540     PLASSERT( y.size() == n_predicted );
00541 
00542     // TODO: 'y'[0] devrait plutot etre l'entier "index" lui-meme!
00543     int index = argmax( y );
00544 
00545     // If y != onehot( index ), then density is 0
00546     if( !is_equal( y[index], 1. ) )
00547         return 0;
00548     for( int i=0 ; i<n_predicted ; i++ )
00549         if( !is_equal( y[i], 0 ) && i != index )
00550             return 0;
00551 
00552     expectation( store_expect );
00553     return store_expect[index];
00554 }
00555 
00556 
00558 // log_density //
00560 real PartSupervisedDBN::log_density(const Vec& y) const
00561 {
00562     return pl_log( density(y) );
00563 }
00564 
00566 // survival_fn //
00568 real PartSupervisedDBN::survival_fn(const Vec& y) const
00569 {
00570     PLERROR("survival_fn not implemented for PartSupervisedDBN"); return 0;
00571 }
00572 
00574 // variance //
00576 void PartSupervisedDBN::variance(Mat& cov) const
00577 {
00578     PLERROR("variance not implemented for PartSupervisedDBN");
00579 }
00580 
00582 // makeDeepCopyFromShallowCopy //
00584 void PartSupervisedDBN::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00585 {
00586     inherited::makeDeepCopyFromShallowCopy(copies);
00587 
00588     deepCopyField(layers, copies);
00589     deepCopyField(last_layer, copies);
00590     deepCopyField(target_layer, copies);
00591     deepCopyField(joint_layer, copies);
00592     deepCopyField(params, copies);
00593     deepCopyField(joint_params, copies);
00594     deepCopyField(target_params, copies);
00595     deepCopyField(training_schedule, copies);
00596 }
00597 
00599 // setPredictor //
00601 void PartSupervisedDBN::setPredictor(const Vec& predictor, bool call_parent)
00602     const
00603 {
00604     if (call_parent)
00605         inherited::setPredictor(predictor, true);
00606     // ### Add here any specific code required by your subclass.
00607 }
00608 
00610 // setPredictorPredictedSizes //
00612 bool PartSupervisedDBN::setPredictorPredictedSizes(int the_predictor_size,
00613                                                    int the_predicted_size,
00614                                                    bool call_parent)
00615 {
00616     bool sizes_have_changed = false;
00617     if (call_parent)
00618         sizes_have_changed = inherited::setPredictorPredictedSizes(
00619             the_predictor_size, the_predicted_size, true);
00620 
00621     // ### Add here any specific code required by your subclass.
00622     if( the_predictor_size >= 0 && the_predictor_size != layers[0]->size ||
00623         the_predicted_size >= 0 && the_predicted_size != target_layer->size )
00624         PLERROR( "PartSupervisedDBN::setPredictorPredictedSizes - \n"
00625                  "n_predictor should be equal to layer[0]->size (%d)\n"
00626                  "n_predicted should be equal to target_layer->size (%d).\n",
00627                  layers[0]->size, target_layer->size );
00628 
00629     n_predictor = layers[0]->size;
00630     n_predicted = target_layer->size;
00631 
00632     // Returned value.
00633     return sizes_have_changed;
00634 }
00635 
00636 
00638 // train //
00640 void PartSupervisedDBN::train()
00641 {
00642     MODULE_LOG << "train() called" << endl;
00643     // The role of the train method is to bring the learner up to
00644     // stage==nstages, updating train_stats with training costs measured
00645     // on-line in the process.
00646 
00647     /* TYPICAL CODE:
00648 
00649     static Vec input;  // static so we don't reallocate memory each time...
00650     static Vec target; // (but be careful that static means shared!)
00651     input.resize(inputsize());    // the train_set's inputsize()
00652     target.resize(targetsize());  // the train_set's targetsize()
00653     real weight;
00654 
00655     // This generic PLearner method does a number of standard stuff useful for
00656     // (almost) any learner, and return 'false' if no training should take
00657     // place. See PLearner.h for more details.
00658     if (!initTrain())
00659         return;
00660 
00661     while(stage<nstages)
00662     {
00663         // clear statistics of previous epoch
00664         train_stats->forget();
00665 
00666         //... train for 1 stage, and update train_stats,
00667         // using train_set->getExample(input, target, weight)
00668         // and train_stats->update(train_costs)
00669 
00670         ++stage;
00671         train_stats->finalize(); // finalize statistics for this epoch
00672     }
00673     */
00674 
00675     Vec input( inputsize() );
00676     Vec target( targetsize() ); // unused
00677     real weight; // unused
00678     Vec train_costs(2);
00679 
00680     // hack for supervised cost
00681     real sum_sup_cost = 0;
00682     PStream sup_cost_file = openFile( expdir/"sup_cost.amat",
00683                                       PStream::raw_ascii, "a" );
00684 
00685     int nsamples = train_set->length();
00686 
00687 #if USING_MPI
00688     // initialize global parameters for allowing to easily share them across
00689     // multiple CPUs
00690 
00691     // wait until we can attach a gdb process
00692     //pout << "START WAITING..." << endl;
00693     //sleep(20);
00694     //pout << "DONE WAITING!" << endl;
00695     MPI_Barrier(MPI_COMM_WORLD);
00696     int total_bsize=parallelization_minibatch_size*PLMPI::size;
00697 //#endif
00698     forget(); // DEBUGGING TO GET REPRODUCIBLE RESULTS
00699     if (global_params.size()==0)
00700     {
00701         int n_params = joint_params->nParameters(1,1);
00702         for (int i=0;i<params.length()-1;i++)
00703             n_params += params[i]->nParameters(0,1);
00704         global_params.resize(n_params);
00705         previous_global_params.resize(n_params);
00706         Vec p=global_params;
00707         for (int i=0;i<params.length()-1;i++)
00708             p=params[i]->makeParametersPointHere(p,0,1);
00709         p=joint_params->makeParametersPointHere(p,1,1);
00710         if (p.length()!=0)
00711             PLERROR("HintonDeepBeliefNet: Inconsistencies between nParameters and makeParametersPointHere!");
00712     }
00713 #endif
00714 
00715     MODULE_LOG << "  nsamples = " << nsamples << endl;
00716     MODULE_LOG << "  initial stage = " << stage << endl;
00717     MODULE_LOG << "  objective: nstages = " << nstages << endl;
00718 
00719     if( !initTrain() )
00720     {
00721         MODULE_LOG << "train() aborted" << endl;
00722         return;
00723     }
00724 
00725     ProgressBar* pb = 0;
00726 
00727     // clear stats of previous epoch
00728     train_stats->forget();
00729 
00730     /***** initial greedy training *****/
00731     for( int layer=0 ; layer < n_layers-2 ; layer++ )
00732     {
00733         MODULE_LOG << "Training parameters between layers " << layer
00734             << " and " << layer+1 << endl;
00735 
00736         int end_stage = min( training_schedule[layer], nstages );
00737 
00738         MODULE_LOG << "  stage = " << stage << endl;
00739         MODULE_LOG << "  end_stage = " << end_stage << endl;
00740 
00741         if( report_progress && stage < end_stage )
00742         {
00743             pb = new ProgressBar( "Training layer "+tostring(layer)
00744                                   +" of "+classname(),
00745                                   end_stage - stage );
00746         }
00747 
00748         params[layer]->learning_rate = learning_rate;
00749 
00750         int momentum_switch_stage = momentum_switch_time;
00751         if( layer > 0 )
00752             momentum_switch_stage += training_schedule[layer-1];
00753 
00754         if( stage <= momentum_switch_stage )
00755             params[layer]->momentum = initial_momentum;
00756         else
00757             params[layer]->momentum = final_momentum;
00758 
00759 #if USING_MPI
00760         // make a copy of the parameters as they were at the beginning of
00761         // the minibatch
00762         if (sum_parallel_contributions)
00763             previous_global_params << global_params;
00764 #endif
00765         int begin_sample = stage % nsamples;
00766         for( ; stage<end_stage ; stage++ )
00767         {
00768 #if USING_MPI
00769             // only look at some of the examples, associated with this process
00770             // number (rank)
00771             if (stage%PLMPI::size==PLMPI::rank)
00772             {
00773 #endif
00774 //                resetGenerator(1); // DEBUGGING HACK TO MAKE SURE RESULTS ARE INDEPENDENT OF PARALLELIZATION
00775                 int sample = stage % nsamples;
00776                 if( sample == begin_sample )
00777                 {
00778                     sup_cost_file << sum_sup_cost / nsamples << endl;
00779                     sum_sup_cost = 0;
00780                 }
00781 
00782                 train_set->getExample(sample, input, target, weight);
00783                 sum_sup_cost += greedyStep( input, layer );
00784 
00785                 if( stage == momentum_switch_stage )
00786                     params[layer]->momentum = final_momentum;
00787 
00788                 if( pb )
00789                 {
00790                     if( layer == 0 )
00791                         pb->update( stage + 1 );
00792                     else
00793                         pb->update( stage - training_schedule[layer-1] + 1 );
00794                 }
00795 #if USING_MPI
00796             }
00797             // time to share among processors
00798             if (stage%total_bsize==0 || stage==end_stage-1)
00799                 shareParamsMPI();
00800 #endif
00801         }
00802     }
00803 
00804     /***** joint training *****/
00805     MODULE_LOG << "Training joint parameters, between target,"
00806         << " penultimate (" << n_layers-2 << ")," << endl
00807         << "and last (" << n_layers-1 << ") layers." << endl;
00808 
00809     int end_stage = min( training_schedule[n_layers-2], nstages );
00810 
00811     MODULE_LOG << "  stage = " << stage << endl;
00812     MODULE_LOG << "  end_stage = " << end_stage << endl;
00813 
00814     if( report_progress && stage < end_stage )
00815         pb = new ProgressBar( "Training joint layer (target and "
00816                              +tostring(n_layers-2)+") of "+classname(),
00817                              end_stage - stage );
00818 
00819     joint_params->learning_rate = learning_rate;
00820 //    target_params->learning_rate = learning_rate;
00821 
00822     int previous_stage = (n_layers < 3) ? 0 : training_schedule[n_layers-3];
00823     int momentum_switch_stage = momentum_switch_time + previous_stage;
00824     if( stage <= momentum_switch_stage )
00825         joint_params->momentum = initial_momentum;
00826     else
00827         joint_params->momentum = final_momentum;
00828 
00829     int begin_sample = stage % nsamples;
00830     int last = min(training_schedule[n_layers-2],nstages);
00831     for( ; stage<last ; stage++ )
00832     {
00833 #if USING_MPI
00834         // only look at some of the examples, associated with this process
00835         // number (rank)
00836         if (stage%PLMPI::size==PLMPI::rank)
00837         {
00838 #endif
00839             int sample = stage % nsamples;
00840             if( sample == begin_sample )
00841             {
00842                 sup_cost_file << sum_sup_cost / nsamples << endl;
00843                 sum_sup_cost = 0;
00844             }
00845 
00846             train_set->getExample(sample, input, target, weight);
00847             sum_sup_cost += jointGreedyStep( input );
00848 
00849             if( stage == momentum_switch_stage )
00850                 joint_params->momentum = final_momentum;
00851 
00852             if( pb )
00853                 pb->update( stage - previous_stage + 1 );
00854 #if USING_MPI
00855         }
00856         // time to share among processors
00857         if (stage%total_bsize==0 || stage==last-1)
00858             shareParamsMPI();
00859 #endif
00860     }
00861 
00862     /***** fine-tuning *****/
00863     MODULE_LOG << "Fine-tuning all parameters, using method "
00864         << fine_tuning_method << endl;
00865     MODULE_LOG << "  fine_tuning_learning_rate = "
00866         << fine_tuning_learning_rate << endl;
00867 
00868     int init_stage = stage;
00869     if( report_progress && stage < nstages )
00870         pb = new ProgressBar( "Fine-tuning parameters of all layers of "
00871                              +classname(),
00872                              nstages - init_stage );
00873 
00874     for( int i=0 ; i<n_layers-1 ; i++ )
00875         params[i]->learning_rate = fine_tuning_learning_rate;
00876     joint_params->learning_rate = fine_tuning_learning_rate;
00877     target_params->learning_rate = fine_tuning_learning_rate;
00878 
00879     if( fine_tuning_method == "" ) // do nothing
00880     {
00881         stage = nstages;
00882         if( pb )
00883             pb->update( nstages - init_stage + 1 );
00884     }
00885     else if( fine_tuning_method == "EGD" )
00886     {
00887         begin_sample = stage % nsamples;
00888         for( ; stage<nstages ; stage++ )
00889         {
00890 #if USING_MPI
00891             // only look at some of the examples, associated with
00892             // this process number (rank)
00893             if (stage%PLMPI::size==PLMPI::rank)
00894             {
00895 #endif
00896                 int sample = stage % nsamples;
00897                 if( sample == begin_sample )
00898                     train_stats->forget();
00899 
00900                 train_set->getExample(sample, input, target, weight);
00901                 fineTuneByGradientDescent( input, train_costs );
00902                 train_stats->update( train_costs );
00903 
00904                 if( pb )
00905                     pb->update( stage - init_stage + 1 );
00906 #if USING_MPI
00907             }
00908             // time to share among processors
00909             if (stage%total_bsize==0 || stage==nstages-1)
00910                 shareParamsMPI();
00911 #endif
00912         }
00913         train_stats->finalize(); // finalize statistics for this epoch
00914     }
00915     else
00916         PLERROR( "Fine-tuning methods other than \"EGD\" are not"
00917                  " implemented yet." );
00918 
00919     if( pb )
00920         delete pb;
00921 
00922     MODULE_LOG << "Training finished" << endl << endl;
00923 }
00924 
00925 // assumes that down_layer->expectation is set
00926 real PartSupervisedDBN::supervisedContrastiveDivergenceStep(
00927     const PP<RBMLayer>& down_layer,
00928     const PP<RBMParameters>& parameters,
00929     const PP<RBMLayer>& up_layer,
00930     const Vec& target,
00931     int index )
00932 {
00933 
00934     real supervised_cost = MISSING_VALUE;
00935     if( supervised_learning_rates[index] > 0 )
00936     {
00937         // (Deterministic) forward pass
00938         parameters->setAsDownInput( down_layer->expectation );
00939         up_layer->getAllActivations( parameters );
00940         up_layer->computeExpectation();
00941 
00942         Vec supervised_input = up_layer->expectation.copy();
00943         supervised_input.append( target );
00944 
00945         // Compute supervised cost and gradient
00946         Vec sup_cost(1);
00947         regressors[index]->fprop( supervised_input, sup_cost );
00948         regressors[index]->bpropUpdate( supervised_input, sup_cost,
00949                                         expectation_gradients[index+1],
00950                                         Vec() );
00951 
00952         // propagate gradient to params
00953         up_layer->bpropUpdate( up_layer->activations,
00954                                up_layer->expectation,
00955                                activation_gradients[index+1],
00956                                expectation_gradients[index+1] );
00957 
00958         // put the right learning rate
00959         parameters->learning_rate = supervised_learning_rates[index];
00960         // updates the parameters
00961         parameters->bpropUpdate( down_layer->expectation,
00962                                  up_layer->activations,
00963                                  expectation_gradients[index],
00964                                  activation_gradients[index+1] );
00965         // put the learning rate back
00966         parameters->learning_rate = learning_rate;
00967 
00968         // return the cost
00969         supervised_cost = sup_cost[0];
00970     }
00971 
00972     // We have to do another forward pass because the weights have changed
00973     contrastiveDivergenceStep( down_layer, parameters, up_layer );
00974 
00975     // return supervised cost
00976     return supervised_cost;
00977 }
00978 
00979 void PartSupervisedDBN::contrastiveDivergenceStep(
00980     const PP<RBMLayer>& down_layer,
00981     const PP<RBMParameters>& parameters,
00982     const PP<RBMLayer>& up_layer )
00983 {
00984     // Re-initialize values in down_layer
00985     if( use_sample_or_expectation[0] == 0 )
00986         parameters->setAsDownInput( down_layer->expectation );
00987     else
00988     {
00989         down_layer->generateSample();
00990         parameters->setAsDownInput( down_layer->sample );
00991     }
00992 
00993     // positive phase
00994     up_layer->getAllActivations( parameters );
00995     up_layer->computeExpectation();
00996     up_layer->generateSample();
00997 
00998     // accumulate stats using the right vector (sample or expectation)
00999     if( use_sample_or_expectation[0] == 2 )
01000     {
01001         if( use_sample_or_expectation[1] == 2 )
01002             parameters->accumulatePosStats(down_layer->sample,
01003                                            up_layer->sample );
01004         else
01005             parameters->accumulatePosStats(down_layer->sample,
01006                                            up_layer->expectation );
01007     }
01008     else
01009     {
01010         if( use_sample_or_expectation[1] == 2 )
01011             parameters->accumulatePosStats(down_layer->expectation,
01012                                            up_layer->sample);
01013         else
01014             parameters->accumulatePosStats(down_layer->expectation,
01015                                            up_layer->expectation );
01016     }
01017 
01018     // down propagation
01019     if( use_sample_or_expectation[1] == 0 )
01020         parameters->setAsUpInput( up_layer->expectation );
01021     else
01022         parameters->setAsUpInput( up_layer->sample );
01023 
01024     down_layer->getAllActivations( parameters );
01025     down_layer->computeExpectation();
01026     down_layer->generateSample();
01027 
01028     if( use_sample_or_expectation[2] == 0 )
01029         parameters->setAsDownInput( down_layer->expectation );
01030     else
01031         parameters->setAsDownInput( down_layer->sample );
01032 
01033     up_layer->getAllActivations( parameters );
01034     up_layer->computeExpectation();
01035 
01036     // accumulate stats using the right vector (sample or expectation)
01037     if( use_sample_or_expectation[3] == 2 )
01038     {
01039         up_layer->generateSample();
01040         if( use_sample_or_expectation[2] == 2 )
01041             parameters->accumulateNegStats( down_layer->sample,
01042                                             up_layer->sample );
01043         else
01044             parameters->accumulateNegStats( down_layer->expectation,
01045                                             up_layer->sample );
01046     }
01047     else
01048     {
01049         if( use_sample_or_expectation[2] == 2 )
01050             parameters->accumulateNegStats( down_layer->sample,
01051                                             up_layer->expectation );
01052         else
01053             parameters->accumulateNegStats( down_layer->expectation,
01054                                             up_layer->expectation );
01055     }
01056 
01057     // update
01058     parameters->update();
01059 }
01060 
01061 real PartSupervisedDBN::greedyStep( const Vec& input, int index )
01062 {
01063     // deterministic propagation until we reach index
01064     layers[0]->expectation << input.subVec(0, n_predictor);
01065     for( int i=0 ; i<index ; i++ )
01066     {
01067         params[i]->setAsDownInput( layers[i]->expectation );
01068         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
01069         layers[i+1]->computeExpectation();
01070     }
01071 
01072     // perform one step of CD + partially supervised gradient
01073     real sup_cost = supervisedContrastiveDivergenceStep(
01074                         layers[index],
01075                         (RBMLLParameters*) params[index],
01076                         layers[index+1],
01077                         input.subVec(n_predictor,n_predicted),
01078                         index );
01079     return sup_cost;
01080 }
01081 
01082 real PartSupervisedDBN::jointGreedyStep( const Vec& input )
01083 {
01084     // deterministic propagation until we reach n_layers-2, setting the input
01085     // of the "input" part of joint_layer
01086     layers[0]->expectation << input.subVec( 0, n_predictor );
01087     for( int i=0 ; i<n_layers-2 ; i++ )
01088     {
01089         params[i]->setAsDownInput( layers[i]->expectation );
01090         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
01091         layers[i+1]->computeExpectation();
01092     }
01093 
01094     real supervised_cost = MISSING_VALUE;
01095     if( supervised_learning_rates[n_layers-2] > 0 )
01096     {
01097         // deterministic forward pass
01098         joint_params->setAsCondInput( layers[n_layers-2]->expectation );
01099         target_layer->getAllActivations( (RBMLLParameters*) joint_params );
01100         target_layer->computeExpectation();
01101 
01102         // now get the actual index of the target
01103         int actual_index = argmax( input.subVec( n_predictor, n_predicted ) );
01104 #ifdef BOUNDCHECK
01105         for( int i=0 ; i<n_predicted ; i++ )
01106             PLASSERT( is_equal( input[n_predictor+i], 0. ) ||
01107                     i == actual_index && is_equal( input[n_predictor+i], 1 ) );
01108 #endif
01109 
01110         // get supervised cost (= train cost) and output gradient
01111         supervised_cost = -pl_log( target_layer->expectation[actual_index] );
01112         output_gradient << target_layer->expectation;
01113         output_gradient[actual_index] -= 1.;
01114 
01115         // put the right learning rate
01116         joint_params->learning_rate = supervised_learning_rates[n_layers-2];
01117         // backprop and update
01118         joint_params->bpropUpdate( layers[n_layers-2]->expectation,
01119                                    target_layer->expectation,
01120                                    expectation_gradients[n_layers-2],
01121                                    output_gradient );
01122         // put the learning rate back
01123         joint_params->learning_rate = learning_rate;
01124 
01125     }
01126 
01127     // now fill the "target" part of joint_layer
01128     target_layer->expectation << input.subVec( n_predictor, n_predicted );
01129     // do contrastive divergence step with the new weights and actual target
01130     contrastiveDivergenceStep( (RBMLayer*) joint_layer,
01131                                (RBMLLParameters*) joint_params,
01132                                last_layer );
01133 
01134     // return supervised cost
01135     return supervised_cost;
01136 }
01137 
01138 void PartSupervisedDBN::fineTuneByGradientDescent( const Vec& input,
01139                                                    const Vec& train_costs )
01140 {
01141     // split input in predictor_part and predicted_part
01142     splitCond(input);
01143 
01144     // compute predicted_part expectation, conditioned on predictor_part
01145     // (forward pass)
01146     expectation( output_gradient );
01147 
01148     int actual_index = argmax(predicted_part);
01149 
01150     // update train_costs
01151 #ifdef BOUNDCHECK
01152     for( int i=0 ; i<n_predicted ; i++ )
01153         PLASSERT( is_equal( predicted_part[i], 0. ) ||
01154                 i == actual_index && is_equal( predicted_part[i], 1. ) );
01155 #endif
01156     train_costs[0] = -pl_log( target_layer->expectation[actual_index] );
01157     int predicted_index = argmax( target_layer->expectation );
01158     if( predicted_index == actual_index )
01159         train_costs[1] = 0;
01160     else
01161         train_costs[1] = 1;
01162 
01163     // output gradient
01164     output_gradient[actual_index] -= 1.;
01165 
01166     joint_params->bpropUpdate( layers[n_layers-2]->expectation,
01167                                target_layer->expectation,
01168                                expectation_gradients[n_layers-2],
01169                                output_gradient );
01170 
01171     for( int i=n_layers-2 ; i>0 ; i-- )
01172     {
01173         layers[i]->bpropUpdate( layers[i]->activations,
01174                                 layers[i]->expectation,
01175                                 activation_gradients[i],
01176                                 expectation_gradients[i] );
01177         params[i-1]->bpropUpdate( layers[i-1]->expectation,
01178                                   layers[i]->activations,
01179                                   expectation_gradients[i-1],
01180                                   activation_gradients[i] );
01181     }
01182 }
01183 
01184 
01185 void PartSupervisedDBN::computeCostsFromOutputs(const Vec& input,
01186                                                 const Vec& output,
01187                                                 const Vec& target,
01188                                                 Vec& costs) const
01189 {
01190     char c = outputs_def[0];
01191     if( c == 'l' || c == 'd' )
01192         inherited::computeCostsFromOutputs(input, output, target, costs);
01193     else if( c == 'e' )
01194     {
01195         costs.resize( 2 );
01196         splitCond(input);
01197 
01198         // actual_index is the actual 'target'
01199         int actual_index = argmax(predicted_part);
01200 #ifdef BOUNDCHECK
01201         for( int i=0 ; i<n_predicted ; i++ )
01202             PLASSERT( is_equal( predicted_part[i], 0. ) ||
01203                     i == actual_index && is_equal( predicted_part[i], 1. ) );
01204 #endif
01205         costs[0] = -pl_log( output[actual_index] );
01206 
01207         // predicted_index is the most probable predicted class
01208         int predicted_index = argmax(output);
01209         if( predicted_index == actual_index )
01210             costs[1] = 0;
01211         else
01212             costs[1] = 1;
01213     }
01214 }
01215 
01216 TVec<string> PartSupervisedDBN::getTestCostNames() const
01217 {
01218     char c = outputs_def[0];
01219     TVec<string> result;
01220     if( c == 'l' || c == 'd' )
01221         result.append( "NLL" );
01222     else if( c == 'e' )
01223     {
01224         result.append( "NLL" );
01225         result.append( "class_error" );
01226     }
01227     return result;
01228 }
01229 
01230 TVec<string> PartSupervisedDBN::getTrainCostNames() const
01231 {
01232     return getTestCostNames();
01233 }
01234 
01235 #if USING_MPI
01236 void PartSupervisedDBN::shareParamsMPI()
01237 {
01238     if (sum_parallel_contributions)
01239     {
01240         if (PLMPI::rank!=0)
01241             // after this line global_params contains the delta for all cpus
01242             // except root
01243             global_params -= previous_global_params;
01244         // while the root contains the previous global params + its delta
01245         previous_global_params << global_params;
01246         // hence summing everything (result in cpu0.global_params)
01247         // yields the sum of all the changes plus the previous global params:
01248         MPI_Reduce(previous_global_params.data(),global_params.data(),
01249                    global_params.length(), PLMPI_REAL, MPI_SUM, 0,
01250                    MPI_COMM_WORLD);
01251         // send it back to every one
01252         MPI_Bcast(global_params.data(), global_params.length(),
01253                   PLMPI_REAL, 0, MPI_COMM_WORLD);
01254         // and save it for next sharing step
01255         previous_global_params << global_params;
01256     }
01257     else // average contributions
01258     {
01259         previous_global_params << global_params;
01260         MPI_Reduce(previous_global_params.data(),global_params.data(),
01261                    global_params.length(), PLMPI_REAL, MPI_SUM, 0,
01262                    MPI_COMM_WORLD);
01263         global_params *= 1.0/PLMPI::size;
01264         MPI_Bcast(global_params.data(), global_params.length(),
01265                   PLMPI_REAL, 0, MPI_COMM_WORLD);
01266     }
01267 }
01268 #endif
01269 
01270 #if USING_MPI
01271 void PartSupervisedDBN::test(VMat testset, PP<VecStatsCollector> test_stats,
01272                              VMat testoutputs, VMat testcosts) const
01273 {
01274     int l = testset.length();
01275     Vec input;
01276     Vec target;
01277     real weight;
01278 
01279     Vec output(outputsize());
01280 
01281     Vec costs(nTestCosts());
01282 
01283     // testset->defineSizes(inputsize(),targetsize(),weightsize());
01284 
01285     ProgressBar* pb = NULL;
01286     if(report_progress)
01287         pb = new ProgressBar("Testing learner",l);
01288 
01289     if (l == 0) {
01290         // Empty test set: we give -1 cost arbitrarily.
01291         costs.fill(-1);
01292         test_stats->update(costs);
01293     }
01294     int n=int(ceil(l/real(PLMPI::size)));
01295     Mat my_res(n,costs.size()+2);
01296     Mat all_res;
01297     if (PLMPI::rank==0) all_res.resize(n*PLMPI::size,costs.size()+2);
01298     int k=0;
01299     for(int i=0; i<l; i++)
01300      if (i%PLMPI::size==PLMPI::rank)
01301      {
01302         testset.getExample(i, input, target, weight);
01303 
01304         // Always call computeOutputAndCosts, since this is better
01305         // behaved with stateful learners
01306         computeOutputAndCosts(input,target,output,costs);
01307 
01308         if(testoutputs)
01309             testoutputs->putOrAppendRow(i,output);
01310 
01311         if(testcosts)
01312             testcosts->putOrAppendRow(i, costs);
01313 
01314         if(test_stats)
01315         {
01316             my_res.subMat(k,0,1,costs.length()) << costs;
01317             my_res(k,costs.length()) = weight;
01318             my_res(k++,costs.length()+1) = 1;
01319         }
01320 
01321         if(report_progress)
01322             pb->update(i);
01323      }
01324 
01325     if (PLMPI::rank==0)
01326        MPI_Gather(my_res.data(),my_res.size(),PLMPI_REAL,
01327                   all_res.data(),my_res.size(),PLMPI_REAL,0,MPI_COMM_WORLD);
01328     else
01329        MPI_Gather(my_res.data(),my_res.size(),PLMPI_REAL,
01330                   0,my_res.size(),PLMPI_REAL,0,MPI_COMM_WORLD);
01331 
01332     if (PLMPI::rank==0)
01333        for (int i=0;i<all_res.length();i++)
01334           if (all_res(i,costs.length()+1)==1.0)
01335              test_stats->update(all_res(i).subVec(0,costs.length()),
01336                                 all_res(i,costs.length()));
01337 
01338     if(pb)
01339         delete pb;
01340 
01341 }
01342 #endif
01343 
01344 
01345 } // end of namespace PLearn
01346 
01347 
01348 /*
01349   Local Variables:
01350   mode:c++
01351   c-basic-offset:4
01352   c-file-style:"stroustrup"
01353   c-file-offsets:((innamespace . 0)(inline-open . 0))
01354   indent-tabs-mode:nil
01355   fill-column:79
01356   End:
01357 */
01358 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines