PLearn 0.1
SupervisedDBN.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // SupervisedDBN.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin
00036 
00039 #define PL_LOG_MODULE_NAME "SupervisedDBN"
00040 #include <plearn/io/pl_log.h>
00041 #include <plearn/io/openFile.h>
00042 
00043 #if USING_MPI
00044 #include <plearn/sys/PLMPI.h>
00045 #endif
00046 
00047 #include "SupervisedDBN.h"
00048 
00049 // RBM includes
00050 #include "RBMLayer.h"
00051 //#include "RBMMixedLayer.h"
00052 //#include "RBMMultinomialLayer.h"
00053 #include "RBMParameters.h"
00054 #include "RBMLLParameters.h"
00055 //#include "RBMJointLLParameters.h"
00056 
00057 // OnlineLearningModules includes
00058 #include "../OnlineLearningModule.h"
00059 #include "../StackedModulesModule.h"
00060 #include "../NLLErrModule.h"
00061 #include "../SquaredErrModule.h"
00062 #include "../GradNNetLayerModule.h"
00063 
00064 namespace PLearn {
00065 using namespace std;
00066 
00067 PLEARN_IMPLEMENT_OBJECT(
00068     SupervisedDBN,
00069     "Hinton's DBN plus supervised gradient from a logistic regression layer",
00070     "without top joint layer"
00071 );
00072 
00074 // SupervisedDBN //
00076 SupervisedDBN::SupervisedDBN() :
00077     regression(false),
00078     learning_rate(0.),
00079     fine_tuning_learning_rate(-1.),
00080     initial_momentum(0.),
00081     final_momentum(0.),
00082     momentum_switch_time(-1),
00083     weight_decay(0.),
00084     parallelization_minibatch_size(100),
00085     sum_parallel_contributions(0),
00086     use_sample_or_expectation(4)
00087 {
00088     use_sample_or_expectation[0] = 0;
00089     use_sample_or_expectation[1] = 1;
00090     use_sample_or_expectation[2] = 2;
00091     use_sample_or_expectation[3] = 0;
00092     random_gen = new PRandom();
00093 }
00094 
00096 // declareOptions //
00098 void SupervisedDBN::declareOptions(OptionList& ol)
00099 {
00100     declareOption(ol, "regression", &SupervisedDBN::regression,
00101                   OptionBase::buildoption,
00102                   "If true, the task is regression, else it is classification");
00103 
00104     declareOption(ol, "learning_rate", &SupervisedDBN::learning_rate,
00105                   OptionBase::buildoption,
00106                   "Learning rate used during greedy learning");
00107 
00108     declareOption(ol, "supervised_learning_rates",
00109                   &SupervisedDBN::supervised_learning_rates,
00110                   OptionBase::buildoption,
00111                   "The learning rates used for the supervised part during"
00112                   " greedy learning\n"
00113                   "(layer by layer).\n");
00114 
00115     declareOption(ol, "fine_tuning_learning_rate",
00116                   &SupervisedDBN::fine_tuning_learning_rate,
00117                   OptionBase::buildoption,
00118                   "Learning rate used during the gradient descent");
00119 
00120     declareOption(ol, "initial_momentum",
00121                   &SupervisedDBN::initial_momentum,
00122                   OptionBase::buildoption,
00123                   "Initial momentum factor (should be between 0 and 1)");
00124 
00125     declareOption(ol, "final_momentum",
00126                   &SupervisedDBN::final_momentum,
00127                   OptionBase::buildoption,
00128                   "Final momentum factor (should be between 0 and 1)");
00129 
00130     declareOption(ol, "momentum_switch_time",
00131                   &SupervisedDBN::momentum_switch_time,
00132                   OptionBase::buildoption,
00133                   "Number of samples to be seen by layer i before its momentum"
00134                   " switches\n"
00135                   "from initial_momentum to final_momentum.\n");
00136 
00137     declareOption(ol, "weight_decay", &SupervisedDBN::weight_decay,
00138                   OptionBase::buildoption,
00139                   "Weight decay");
00140 
00141     declareOption(ol, "initialization_method",
00142                   &SupervisedDBN::initialization_method,
00143                   OptionBase::buildoption,
00144                   "The method used to initialize the weights:\n"
00145                   "  - \"uniform_linear\" = a uniform law in [-1/d, 1/d]\n"
00146                   "  - \"uniform_sqrt\"   = a uniform law in [-1/sqrt(d),"
00147                   " 1/sqrt(d)]\n"
00148                   "  - \"zero\"           = all weights are set to 0,\n"
00149                   "where d = max( up_layer_size, down_layer_size ).\n");
00150 
00151 
00152     declareOption(ol, "training_schedule",
00153                   &SupervisedDBN::training_schedule,
00154                   OptionBase::buildoption,
00155                   "Total number of examples that should be seen until each"
00156                   " layer\n"
00157                   "have been greedily trained.\n"
00158                   "We should always have training_schedule[i] <"
00159                   " training_schedule[i+1].\n");
00160 
00161     declareOption(ol, "fine_tuning_method",
00162                   &SupervisedDBN::fine_tuning_method,
00163                   OptionBase::buildoption,
00164                   "Method for fine-tuning the whole network after greedy"
00165                   " learning.\n"
00166                   "One of:\n"
00167                   "  - \"none\"\n"
00168                   "  - \"CD\" or \"contrastive_divergence\"\n"
00169                   "  - \"EGD\" or \"error_gradient_descent\"\n"
00170                   "  - \"WS\" or \"wake_sleep\".\n");
00171 
00172     declareOption(ol, "layers", &SupervisedDBN::layers,
00173                   OptionBase::buildoption,
00174                   "Layers that learn representations of the input,"
00175                   " unsupervisedly.\n"
00176                   "layers[0] is input layer.\n");
00177 
00178 /*
00179     declareOption(ol, "target_layer", &SupervisedDBN::target_layer,
00180                   OptionBase::buildoption,
00181                   "Target (or label) layer");
00182 */
00183     declareOption(ol, "params", &SupervisedDBN::params,
00184                   OptionBase::buildoption,
00185                   "RBMParameters linking the unsupervised layers.\n"
00186                   "params[i] links layers[i] and layers[i+1], except for"
00187                   "params[n_layers-1],\n"
00188                   "that links layers[n_layers-1] and last_layer.\n");
00189 /*
00190     declareOption(ol, "target_params", &SupervisedDBN::target_params,
00191                   OptionBase::buildoption,
00192                   "Parameters linking target_layer and last_layer");
00193 */
00194 /*
00195     declareOption(ol, "use_sample_rather_than_expectation_in_positive_phase_statistics",
00196                   &SupervisedDBN::use_sample_rather_than_expectation_in_positive_phase_statistics,
00197                   OptionBase::buildoption,
00198                   "In positive phase statistics use output->sample * input\n"
00199                   "rather than output->expectation * input.\n");
00200 */
00201     declareOption(ol, "use_sample_or_expectation",
00202                   &SupervisedDBN::use_sample_or_expectation,
00203                   OptionBase::buildoption,
00204                   "Vector providing information on which information to use"
00205                   " during the\n"
00206                   "contrastive divergence step:\n"
00207                   "  - 0 means that we use the expectation only,\n"
00208                   "  - 1 means that we sample (for the next step), but we use"
00209                   " the\n"
00210                   "    expectation in the CD update formula,\n"
00211                   "  - 2 means that we use the sample only.\n"
00212                   "The order of the arguments matches the steps of CD:\n"
00213                   "  - visible unit during positive phase (you should keep it"
00214                   " to 0),\n"
00215                   "  - hidden unit during positive phase,\n"
00216                   "  - visible unit during negative phase,\n"
00217                   "  - hidden unit during negative phase (you should keep it"
00218                   " to 0).\n");
00219 
00220     declareOption(ol, "parallelization_minibatch_size",
00221                   &SupervisedDBN::parallelization_minibatch_size,
00222                   OptionBase::buildoption,
00223                   "Only used when USING_MPI for parallelization.\n"
00224                   "This is the number of examples seen by one process\n"
00225                   "during training after which the weight updates are shared\n"
00226                   "among all the processes.\n");
00227 
00228     declareOption(ol, "sum_parallel_contributions",
00229                   &SupervisedDBN::sum_parallel_contributions,
00230                   OptionBase::buildoption,
00231                   "Only used when USING_MPI for parallelization.\n"
00232                   "sum or average the delta-w contributions from different processes?\n");
00233 
00234     declareOption(ol, "n_layers", &SupervisedDBN::n_layers,
00235                   OptionBase::learntoption,
00236                   "Number of unsupervised layers, including input layer");
00237 /*
00238     declareOption(ol, "last_layer", &SupervisedDBN::last_layer,
00239                   OptionBase::learntoption,
00240                   "Last layer, learning joint representations of input and"
00241                   " target");
00242 
00243     declareOption(ol, "joint_layer", &SupervisedDBN::joint_layer,
00244                   OptionBase::nosave,
00245                   "Concatenation of target_layer and layers[n_layers-1]");
00246 
00247     declareOption(ol, "joint_params", &SupervisedDBN::joint_params,
00248                   OptionBase::nosave,
00249                   "Parameters linking joint_layer and last_layer");
00250 */
00251     declareOption(ol, "regressors", &SupervisedDBN::regressors,
00252                   OptionBase::learntoption,
00253                   "Linear (if regression) of logistic (if !regression)"
00254                   " regressors\n"
00255                   " that will provide the supervised gradient for each"
00256                   " RBMParameters\n");
00257 
00258     // Now call the parent class' declareOptions().
00259     inherited::declareOptions(ol);
00260 }
00261 
00263 // build //
00265 void SupervisedDBN::build()
00266 {
00267     // ### Nothing to add here, simply calls build_().
00268     inherited::build();
00269     build_();
00270 }
00271 
00273 // build_ //
00275 void SupervisedDBN::build_()
00276 {
00277     MODULE_LOG << "build_() called" << endl;
00278     n_layers = layers.length();
00279     if( n_layers <= 1 )
00280         return;
00281 
00282     if( fine_tuning_learning_rate < 0. )
00283         fine_tuning_learning_rate = learning_rate;
00284 
00285     if( regression )
00286         predicted_size = 1;
00287 
00288     // check value of initialization_method
00289     string im = lowerstring( initialization_method );
00290     if( im == "" || im == "uniform_sqrt" )
00291         initialization_method = "uniform_sqrt";
00292     else if( im == "uniform_linear" )
00293         initialization_method = im;
00294     else if( im == "zero" )
00295         initialization_method = im;
00296     else
00297         PLERROR( "RBMParameters::build_ - initialization_method\n"
00298                  "\"%s\" unknown.\n", initialization_method.c_str() );
00299     MODULE_LOG << "  initialization_method = \"" << initialization_method
00300         << "\"" << endl;
00301 
00302     // check value of fine_tuning_method
00303     string ftm = lowerstring( fine_tuning_method );
00304     if( ftm == "" | ftm == "none" )
00305         fine_tuning_method = "";
00306     else if( ftm == "cd" | ftm == "contrastive_divergence" )
00307         fine_tuning_method = "CD";
00308     else if( ftm == "egd" | ftm == "error_gradient_descent" )
00309         fine_tuning_method = "EGD";
00310     else if( ftm == "ws" | ftm == "wake_sleep" )
00311         fine_tuning_method = "WS";
00312     else
00313         PLERROR( "SupervisedDBN::build_ - fine_tuning_method \"%s\"\n"
00314                  "is unknown.\n", fine_tuning_method.c_str() );
00315     MODULE_LOG << "  fine_tuning_method = \"" << fine_tuning_method << "\""
00316         <<  endl;
00317     //TODO: build structure to store gradients during gradient descent
00318 
00319     if( training_schedule.length() != n_layers-1 )
00320         training_schedule = TVec<int>( n_layers-1, 1000000 );
00321 
00322     // fills with 0's if too short
00323     supervised_learning_rates.resize( n_layers-1 );
00324 
00325     MODULE_LOG << "  training_schedule = " << training_schedule << endl;
00326     MODULE_LOG << "learning_rate = " << learning_rate << endl;
00327     MODULE_LOG << "fine_tuning_learning_rate = "
00328         << fine_tuning_learning_rate << endl;
00329     MODULE_LOG << "supervised_learning_rates = "
00330         << supervised_learning_rates << endl;
00331     MODULE_LOG << endl;
00332 
00333     build_layers();
00334     build_params();
00335     build_regressors();
00336 }
00337 
00338 void SupervisedDBN::build_layers()
00339 {
00340     MODULE_LOG << "build_layers() called" << endl;
00341     if( inputsize_ >= 0 )
00342     {
00343         PLASSERT( layers[0]->size + predicted_size == inputsize() );
00344         setPredictorPredictedSizes( layers[0]->size,
00345                                     predicted_size, false );
00346         MODULE_LOG << "  n_predictor = " << n_predictor << endl;
00347         MODULE_LOG << "  n_predicted = " << n_predicted << endl;
00348     }
00349 
00350     for( int i=0 ; i<n_layers ; i++ )
00351         layers[i]->random_gen = random_gen;
00352 /*
00353     target_layer->random_gen = random_gen;
00354 
00355     last_layer = layers[n_layers-1];
00356 
00357     // concatenate target_layer and layers[n_layers-2] into joint_layer,
00358     // if it is not already done
00359     if( !joint_layer
00360         || joint_layer->sub_layers.size() !=2
00361         || joint_layer->sub_layers[0] != target_layer
00362         || joint_layer->sub_layers[1] != layers[n_layers-2] )
00363     {
00364         TVec< PP<RBMLayer> > the_sub_layers( 2 );
00365         the_sub_layers[0] = target_layer;
00366         the_sub_layers[1] = layers[n_layers-2];
00367         joint_layer = new RBMMixedLayer( the_sub_layers );
00368     }
00369     joint_layer->random_gen = random_gen;
00370 */
00371 }
00372 
00373 void SupervisedDBN::build_params()
00374 {
00375     MODULE_LOG << "build_params() called" << endl;
00376     if( params.length() == 0 )
00377     {
00378         params.resize( n_layers-1 );
00379         for( int i=0 ; i<n_layers-1 ; i++ )
00380             params[i] = new RBMLLParameters();
00381     }
00382     else if( params.length() != n_layers-1 )
00383         PLERROR( "SupervisedDBN::build_params - params.length() should\n"
00384                  "be equal to layers.length()-1 (%d != %d).\n",
00385                  params.length(), n_layers-1 );
00386 
00387     activation_gradients.resize( n_layers );
00388     expectation_gradients.resize( n_layers );
00389 //    output_gradient.resize( n_predicted );
00390 
00391     for( int i=0 ; i<n_layers-1 ; i++ )
00392     {
00393         //TODO: call changeOptions instead
00394         params[i]->down_units_types = layers[i]->units_types;
00395         params[i]->up_units_types = layers[i+1]->units_types;
00396         params[i]->initialization_method = initialization_method;
00397         params[i]->random_gen = random_gen;
00398         params[i]->build();
00399 
00400         activation_gradients[i].resize( params[i]->down_layer_size );
00401         expectation_gradients[i].resize( params[i]->down_layer_size );
00402     }
00403 
00404     activation_gradients[n_layers-1].resize(params[n_layers-2]->up_layer_size);
00405     expectation_gradients[n_layers-1].resize(params[n_layers-2]->up_layer_size);
00406 
00407 /*
00408     if( target_layer && !target_params )
00409         target_params = new RBMLLParameters();
00410 
00411     //TODO: call changeOptions instead
00412     target_params->down_units_types = target_layer->units_types;
00413     target_params->up_units_types = last_layer->units_types;
00414     target_params->initialization_method = initialization_method;
00415     target_params->random_gen = random_gen;
00416     target_params->build();
00417 
00418     // build joint_params from params[n_layers-1] and target_params
00419     // if it is not already done
00420     if( !joint_params
00421         || joint_params->target_params != target_params
00422         || joint_params->cond_params != params[n_layers-2] )
00423     {
00424         joint_params = new RBMJointLLParameters( target_params,
00425                                                  params[n_layers-2] );
00426     }
00427     joint_params->random_gen = random_gen;
00428 */
00429 
00430     // share the biases
00431     for( int i=0 ; i<n_layers-2 ; i++ )
00432         params[i]->up_units_bias = params[i+1]->down_units_bias;
00433 }
00434 
00435 void SupervisedDBN::build_regressors()
00436 {
00437     MODULE_LOG << "build_regressors() called" << endl;
00438     if( regressors.length() != n_layers-1 )
00439         regressors.resize( n_layers-1 );
00440 
00441     for( int i=0 ; i<n_layers-1 ; i++ )
00442         if( !(regressors[i])
00443             || regressors[i]->input_size != params[i]->up_layer_size )
00444         {
00445             MODULE_LOG << "creating regressor " << i << "..." << endl;
00446 
00447             // A linear layer of the appropriate size, that will be trained by
00448             // stochastic gradient descent, initial weights are 0.
00449             PP<GradNNetLayerModule> p_gnnlm = new GradNNetLayerModule();
00450             p_gnnlm->input_size = params[i]->up_layer_size;
00451             p_gnnlm->output_size = n_predicted;
00452             p_gnnlm->start_learning_rate = supervised_learning_rates[i];
00453             MODULE_LOG << "start_learning_rate = "
00454                 << p_gnnlm->start_learning_rate << endl;
00455             p_gnnlm->init_weights_random_scale = 0.;
00456             p_gnnlm->build();
00457 
00458             // The cost part
00459             PP<OnlineLearningModule> p_cost = NULL;
00460 
00461             if( regression ) // cost is MSE
00462             {
00463                 MODULE_LOG << "... as a SquaredErrModule" << endl;
00464                 p_cost = new SquaredErrModule();
00465             }
00466             else // cost is softmax+NLL
00467             {
00468                 MODULE_LOG << "... as an NLLErrModule" << endl;
00469                 p_cost = new NLLErrModule();
00470             }
00471 
00472             p_cost->input_size = n_predicted;
00473             if( regression )
00474                 p_cost->output_size = 1;
00475             else
00476                 p_cost->output_size = 2;
00477             p_cost->build();
00478 
00479             // Stack them, and...
00480             TVec< PP<OnlineLearningModule> > stack(2);
00481             stack[0] = (GradNNetLayerModule*) p_gnnlm;
00482             stack[1] = p_cost;
00483 
00484             // ... encapsulate them in another Module, that will compute
00485             // and backprop the NLL
00486             PP<StackedModulesModule> p_smm = new StackedModulesModule();
00487             p_smm->modules = stack;
00488             p_smm->last_layer_is_cost = true;
00489             p_smm->target_size = predicted_size;
00490             p_smm->build();
00491 
00492             regressors[i] = (StackedModulesModule*) p_smm;
00493         }
00494 }
00495 
00496 
00498 // forget //
00500 void SupervisedDBN::forget()
00501 {
00502     MODULE_LOG << "forget() called" << endl;
00509     resetGenerator(seed_);
00510     for( int i=0 ; i<n_layers-1 ; i++ )
00511         params[i]->forget();
00512 
00513     for( int i=0 ; i<n_layers ; i++ )
00514         layers[i]->reset();
00515 
00516     for( int i=0 ; i<n_layers-1 ; i++ )
00517         regressors[i]->forget();
00518 
00519 #if USING_MPI
00520     global_params.resize(0);
00521 #endif
00522 /*
00523     target_params->forget();
00524     target_layer->reset();
00525 */
00526     stage = 0;
00527 }
00528 
00530 // generate //
00532 void SupervisedDBN::generate(Vec& y) const
00533 {
00534     PLERROR("generate not implemented for SupervisedDBN");
00535 }
00536 
00538 // cdf //
00540 real SupervisedDBN::cdf(const Vec& y) const
00541 {
00542     PLERROR("cdf not implemented for SupervisedDBN"); return 0;
00543 }
00544 
00546 // expectation //
00548 void SupervisedDBN::expectation(Vec& mu) const
00549 {
00550     mu.resize( predicted_size );
00551 
00552     // Propagate input (predictor_part) until penultimate layer
00553     layers[0]->expectation << predictor_part;
00554     for( int i=0 ; i<n_layers-1 ; i++ )
00555     {
00556         params[i]->setAsDownInput( layers[i]->expectation );
00557         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
00558         layers[i+1]->computeExpectation();
00559     }
00560 /*
00561     // Set layers[n_layers-2]->expectation (penultimate) as conditionning input
00562     // of joint_params
00563     joint_params->setAsCondInput( layers[n_layers-2]->expectation );
00564 
00565     // Get all activations on target_layer from target_params
00566     target_layer->getAllActivations( (RBMLLParameters*) joint_params );
00567     target_layer->computeExpectation();
00568 */
00569 
00570     supervised_input.resize( layers[n_layers-1]->expectation.size() );
00571     supervised_input << layers[n_layers-1]->expectation;
00572     supervised_input.append( predicted_part ); // yes, it is ugly
00573 
00574     // Compute supervised cost and gradient
00575     regressors[n_layers-2]->fprop( supervised_input, store_costs );
00576     mu << ((StackedModulesModule*) (OnlineLearningModule*)
00577                 regressors[n_layers-2])->values[1];
00578 }
00579 
00581 // density //
00583 real SupervisedDBN::density(const Vec& y) const
00584 {
00585     PLASSERT( y.size() == n_predicted );
00586 
00587     if( regression ) // the probabilistic model does not work
00588         return 0;
00589 
00590     // TODO: 'y'[0] devrait plutot etre l'entier "index" lui-meme!
00591     int index = argmax( y );
00592 
00593     // If y != onehot( index ), then density is 0
00594     if( !is_equal( y[index], 1. ) )
00595         return 0;
00596     for( int i=0 ; i<n_predicted ; i++ )
00597         if( !is_equal( y[i], 0 ) && i != index )
00598             return 0;
00599 
00600     expectation( store_expect );
00601     return store_expect[index];
00602 }
00603 
00604 
00606 // log_density //
00608 real SupervisedDBN::log_density(const Vec& y) const
00609 {
00610     return pl_log( density(y) );
00611 }
00612 
00614 // survival_fn //
00616 real SupervisedDBN::survival_fn(const Vec& y) const
00617 {
00618     PLERROR("survival_fn not implemented for SupervisedDBN"); return 0;
00619 }
00620 
00622 // variance //
00624 void SupervisedDBN::variance(Mat& cov) const
00625 {
00626     PLERROR("variance not implemented for SupervisedDBN");
00627 }
00628 
00630 // makeDeepCopyFromShallowCopy //
00632 void SupervisedDBN::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00633 {
00634     inherited::makeDeepCopyFromShallowCopy(copies);
00635 
00636     deepCopyField(layers, copies);
00637 //    deepCopyField(last_layer, copies);
00638 //    deepCopyField(target_layer, copies);
00639 //    deepCopyField(joint_layer, copies);
00640     deepCopyField(params, copies);
00641 //    deepCopyField(joint_params, copies);
00642 //    deepCopyField(target_params, copies);
00643     deepCopyField(training_schedule, copies);
00644 }
00645 
00647 // setPredictor //
00649 void SupervisedDBN::setPredictor(const Vec& predictor, bool call_parent)
00650     const
00651 {
00652     if (call_parent)
00653         inherited::setPredictor(predictor, true);
00654     // ### Add here any specific code required by your subclass.
00655 }
00656 
00658 // setPredictorPredictedSizes //
00660 bool SupervisedDBN::setPredictorPredictedSizes(int the_predictor_size,
00661                                                int the_predicted_size,
00662                                                bool call_parent)
00663 {
00664     bool sizes_have_changed = false;
00665     if (call_parent)
00666         sizes_have_changed = inherited::setPredictorPredictedSizes(
00667             the_predictor_size, the_predicted_size, true);
00668 
00669     // ### Add here any specific code required by your subclass.
00670     if( the_predictor_size >= 0 && the_predictor_size != layers[0]->size ||
00671         the_predicted_size >= 0 && the_predicted_size != predicted_size )
00672         PLERROR( "SupervisedDBN::setPredictorPredictedSizes - \n"
00673                  "n_predictor should be equal to layer[0]->size (%d)\n"
00674                  "n_predicted should be equal to predicted_size (%d).\n",
00675                  layers[0]->size, predicted_size );
00676 
00677     n_predictor = layers[0]->size;
00678     n_predicted = predicted_size;
00679 
00680     // Returned value.
00681     return sizes_have_changed;
00682 }
00683 
00684 
00686 // train //
00688 void SupervisedDBN::train()
00689 {
00690     MODULE_LOG << "train() called" << endl;
00691     // The role of the train method is to bring the learner up to
00692     // stage==nstages, updating train_stats with training costs measured
00693     // on-line in the process.
00694 
00695     /* TYPICAL CODE:
00696 
00697     static Vec input;  // static so we don't reallocate memory each time...
00698     static Vec target; // (but be careful that static means shared!)
00699     input.resize(inputsize());    // the train_set's inputsize()
00700     target.resize(targetsize());  // the train_set's targetsize()
00701     real weight;
00702 
00703     // This generic PLearner method does a number of standard stuff useful for
00704     // (almost) any learner, and return 'false' if no training should take
00705     // place. See PLearner.h for more details.
00706     if (!initTrain())
00707         return;
00708 
00709     while(stage<nstages)
00710     {
00711         // clear statistics of previous epoch
00712         train_stats->forget();
00713 
00714         //... train for 1 stage, and update train_stats,
00715         // using train_set->getExample(input, target, weight)
00716         // and train_stats->update(train_costs)
00717 
00718         ++stage;
00719         train_stats->finalize(); // finalize statistics for this epoch
00720     }
00721     */
00722 
00723     Vec input( inputsize() );
00724     Vec target( targetsize() ); // unused
00725     real weight; // unused
00726     Vec train_costs(2);
00727 
00728     // hack for supervised cost
00729     real sum_sup_cost = 0;
00730     PStream sup_cost_file = openFile( expdir/"sup_cost.amat",
00731                                       PStream::raw_ascii, "a" );
00732 
00733     int nsamples = train_set->length();
00734 
00735 #if USING_MPI
00736     // initialize global parameters for allowing to easily share them across
00737     // multiple CPUs
00738 
00739     // wait until we can attach a gdb process
00740     //pout << "START WAITING..." << endl;
00741     //sleep(20);
00742     //pout << "DONE WAITING!" << endl;
00743     MPI_Barrier(MPI_COMM_WORLD);
00744     int total_bsize=parallelization_minibatch_size*PLMPI::size;
00745 //#endif
00746     forget(); // DEBUGGING TO GET REPRODUCIBLE RESULTS
00747     if (global_params.size()==0)
00748     {
00749         int n_params = joint_params->nParameters(1,1);
00750         for (int i=0;i<params.length()-1;i++)
00751             n_params += params[i]->nParameters(0,1);
00752         global_params.resize(n_params);
00753         previous_global_params.resize(n_params);
00754         Vec p=global_params;
00755         for (int i=0;i<params.length()-1;i++)
00756             p=params[i]->makeParametersPointHere(p,0,1);
00757         p=joint_params->makeParametersPointHere(p,1,1);
00758         if (p.length()!=0)
00759             PLERROR("HintonDeepBeliefNet: Inconsistencies between nParameters and makeParametersPointHere!");
00760     }
00761 #endif
00762 
00763     MODULE_LOG << "  nsamples = " << nsamples << endl;
00764     MODULE_LOG << "  initial stage = " << stage << endl;
00765     MODULE_LOG << "  objective: nstages = " << nstages << endl;
00766 
00767     if( !initTrain() )
00768     {
00769         MODULE_LOG << "train() aborted" << endl;
00770         return;
00771     }
00772 
00773     ProgressBar* pb = 0;
00774 
00775     // clear stats of previous epoch
00776     train_stats->forget();
00777 
00778     /***** initial greedy training *****/
00779     for( int layer=0 ; layer < n_layers-1 ; layer++ )
00780     {
00781         MODULE_LOG << "Training parameters between layers " << layer
00782             << " and " << layer+1 << endl;
00783 
00784         int end_stage = min( training_schedule[layer], nstages );
00785 
00786         MODULE_LOG << "  stage = " << stage << endl;
00787         MODULE_LOG << "  end_stage = " << end_stage << endl;
00788 
00789         if( report_progress && stage < end_stage )
00790         {
00791             pb = new ProgressBar( "Training layer "+tostring(layer)
00792                                   +" of "+classname(),
00793                                   end_stage - stage );
00794         }
00795 
00796         params[layer]->learning_rate = learning_rate;
00797 
00798         int momentum_switch_stage = momentum_switch_time;
00799         if( layer > 0 )
00800             momentum_switch_stage += training_schedule[layer-1];
00801 
00802         if( stage <= momentum_switch_stage )
00803             params[layer]->momentum = initial_momentum;
00804         else
00805             params[layer]->momentum = final_momentum;
00806 
00807 #if USING_MPI
00808         // make a copy of the parameters as they were at the beginning of
00809         // the minibatch
00810         if (sum_parallel_contributions)
00811             previous_global_params << global_params;
00812 #endif
00813         int begin_sample = stage % nsamples;
00814         for( ; stage<end_stage ; stage++ )
00815         {
00816 #if USING_MPI
00817             // only look at some of the examples, associated with this process
00818             // number (rank)
00819             if (stage%PLMPI::size==PLMPI::rank)
00820             {
00821 #endif
00822 //                resetGenerator(1); // DEBUGGING HACK TO MAKE SURE RESULTS ARE INDEPENDENT OF PARALLELIZATION
00823                 int sample = stage % nsamples;
00824                 if( sample == begin_sample )
00825                 {
00826                     sup_cost_file << sum_sup_cost / nsamples << endl;
00827                     sum_sup_cost = 0;
00828                 }
00829 
00830                 train_set->getExample(sample, input, target, weight);
00831                 sum_sup_cost += greedyStep( input, layer );
00832 
00833                 if( stage == momentum_switch_stage )
00834                     params[layer]->momentum = final_momentum;
00835 
00836                 if( pb )
00837                 {
00838                     if( layer == 0 )
00839                         pb->update( stage + 1 );
00840                     else
00841                         pb->update( stage - training_schedule[layer-1] + 1 );
00842                 }
00843 #if USING_MPI
00844             }
00845             // time to share among processors
00846             if (stage%total_bsize==0 || stage==end_stage-1)
00847                 shareParamsMPI();
00848 #endif
00849         }
00850     }
00851 
00852 #if 0
00853     /***** joint training *****/
00854     MODULE_LOG << "Training joint parameters, between target,"
00855         << " penultimate (" << n_layers-2 << ")," << endl
00856         << "and last (" << n_layers-1 << ") layers." << endl;
00857 
00858     int end_stage = min( training_schedule[n_layers-2], nstages );
00859 
00860     MODULE_LOG << "  stage = " << stage << endl;
00861     MODULE_LOG << "  end_stage = " << end_stage << endl;
00862 
00863     if( report_progress && stage < end_stage )
00864         pb = new ProgressBar( "Training joint layer (target and "
00865                              +tostring(n_layers-2)+") of "+classname(),
00866                              end_stage - stage );
00867 
00868     joint_params->learning_rate = learning_rate;
00869 //    target_params->learning_rate = learning_rate;
00870 
00871     int previous_stage = (n_layers < 3) ? 0 : training_schedule[n_layers-3];
00872     int momentum_switch_stage = momentum_switch_time + previous_stage;
00873     if( stage <= momentum_switch_stage )
00874         joint_params->momentum = initial_momentum;
00875     else
00876         joint_params->momentum = final_momentum;
00877 
00878     int begin_sample = stage % nsamples;
00879     int last = min(training_schedule[n_layers-2],nstages);
00880     for( ; stage<last ; stage++ )
00881     {
00882 #if USING_MPI
00883         // only look at some of the examples, associated with this process
00884         // number (rank)
00885         if (stage%PLMPI::size==PLMPI::rank)
00886         {
00887 #endif
00888             int sample = stage % nsamples;
00889             if( sample == begin_sample )
00890             {
00891                 sup_cost_file << sum_sup_cost / nsamples << endl;
00892                 sum_sup_cost = 0;
00893             }
00894 
00895             train_set->getExample(sample, input, target, weight);
00896             sum_sup_cost += jointGreedyStep( input );
00897 
00898             if( stage == momentum_switch_stage )
00899                 joint_params->momentum = final_momentum;
00900 
00901             if( pb )
00902                 pb->update( stage - previous_stage + 1 );
00903 #if USING_MPI
00904         }
00905         // time to share among processors
00906         if (stage%total_bsize==0 || stage==last-1)
00907             shareParamsMPI();
00908 #endif
00909     }
00910 #endif //0
00911 
00912     /***** fine-tuning *****/
00913     MODULE_LOG << "Fine-tuning all parameters, using method "
00914         << fine_tuning_method << endl;
00915     MODULE_LOG << "  fine_tuning_learning_rate = "
00916         << fine_tuning_learning_rate << endl;
00917 
00918     int init_stage = stage;
00919     if( report_progress && stage < nstages )
00920         pb = new ProgressBar( "Fine-tuning parameters of all layers of "
00921                              +classname(),
00922                              nstages - init_stage );
00923 
00924     for( int i=0 ; i<n_layers-1 ; i++ )
00925         params[i]->learning_rate = fine_tuning_learning_rate;
00926 
00927     ((GradNNetLayerModule*) (OnlineLearningModule*)
00928         ((StackedModulesModule*) (OnlineLearningModule*)
00929             regressors[n_layers-2])->modules[1])->start_learning_rate =
00930         fine_tuning_learning_rate;
00931 
00932 //    joint_params->learning_rate = fine_tuning_learning_rate;
00933 //    target_params->learning_rate = fine_tuning_learning_rate;
00934 
00935     if( fine_tuning_method == "" ) // do nothing
00936     {
00937         stage = nstages;
00938         if( pb )
00939             pb->update( nstages - init_stage + 1 );
00940     }
00941     else if( fine_tuning_method == "EGD" )
00942     {
00943         int begin_sample = stage % nsamples;
00944         for( ; stage<nstages ; stage++ )
00945         {
00946 #if USING_MPI
00947             // only look at some of the examples, associated with
00948             // this process number (rank)
00949             if (stage%PLMPI::size==PLMPI::rank)
00950             {
00951 #endif
00952                 int sample = stage % nsamples;
00953                 if( sample == begin_sample )
00954                     train_stats->forget();
00955 
00956                 train_set->getExample(sample, input, target, weight);
00957                 fineTuneByGradientDescent( input, train_costs );
00958                 train_stats->update( train_costs );
00959 
00960                 if( pb )
00961                     pb->update( stage - init_stage + 1 );
00962 #if USING_MPI
00963             }
00964             // time to share among processors
00965             if (stage%total_bsize==0 || stage==nstages-1)
00966                 shareParamsMPI();
00967 #endif
00968         }
00969         train_stats->finalize(); // finalize statistics for this epoch
00970     }
00971     else
00972         PLERROR( "Fine-tuning methods other than \"EGD\" are not"
00973                  " implemented yet." );
00974 
00975     if( pb )
00976         delete pb;
00977 
00978     MODULE_LOG << "Training finished" << endl << endl;
00979 }
00980 
00981 // assumes that down_layer->expectation is set
00982 real SupervisedDBN::supervisedContrastiveDivergenceStep(
00983     const PP<RBMLayer>& down_layer,
00984     const PP<RBMParameters>& parameters,
00985     const PP<RBMLayer>& up_layer,
00986     const Vec& target,
00987     int index )
00988 {
00989 
00990     real supervised_cost = MISSING_VALUE;
00991     if( supervised_learning_rates[index] > 0 )
00992     {
00993         // (Deterministic) forward pass
00994         parameters->setAsDownInput( down_layer->expectation );
00995         up_layer->getAllActivations( parameters );
00996         up_layer->computeExpectation();
00997 
00998         supervised_input.resize( up_layer->expectation.size() );
00999         supervised_input << up_layer->expectation;
01000         supervised_input.append( target );
01001 
01002         // Compute supervised cost and gradient
01003         Vec sup_cost(1);
01004         regressors[index]->fprop( supervised_input, sup_cost );
01005         regressors[index]->bpropUpdate( supervised_input, sup_cost,
01006                                         expectation_gradients[index+1],
01007                                         Vec() );
01008 
01009         // propagate gradient to params
01010         up_layer->bpropUpdate( up_layer->activations,
01011                                up_layer->expectation,
01012                                activation_gradients[index+1],
01013                                expectation_gradients[index+1] );
01014 
01015         // put the right learning rate
01016         parameters->learning_rate = supervised_learning_rates[index];
01017         // updates the parameters
01018         parameters->bpropUpdate( down_layer->expectation,
01019                                  up_layer->activations,
01020                                  expectation_gradients[index],
01021                                  activation_gradients[index+1] );
01022         // put the learning rate back
01023         parameters->learning_rate = learning_rate;
01024 
01025         // return the cost
01026         supervised_cost = sup_cost[0];
01027     }
01028 
01029     // We have to do another forward pass because the weights have changed
01030     contrastiveDivergenceStep( down_layer, parameters, up_layer );
01031 
01032     // return supervised cost
01033     return supervised_cost;
01034 }
01035 
01036 void SupervisedDBN::contrastiveDivergenceStep(
01037     const PP<RBMLayer>& down_layer,
01038     const PP<RBMParameters>& parameters,
01039     const PP<RBMLayer>& up_layer )
01040 {
01041     // Re-initialize values in down_layer
01042     if( use_sample_or_expectation[0] == 0 )
01043         parameters->setAsDownInput( down_layer->expectation );
01044     else
01045     {
01046         down_layer->generateSample();
01047         parameters->setAsDownInput( down_layer->sample );
01048     }
01049 
01050     // positive phase
01051     up_layer->getAllActivations( parameters );
01052     up_layer->computeExpectation();
01053     up_layer->generateSample();
01054 
01055     // accumulate stats using the right vector (sample or expectation)
01056     if( use_sample_or_expectation[0] == 2 )
01057     {
01058         if( use_sample_or_expectation[1] == 2 )
01059             parameters->accumulatePosStats(down_layer->sample,
01060                                            up_layer->sample );
01061         else
01062             parameters->accumulatePosStats(down_layer->sample,
01063                                            up_layer->expectation );
01064     }
01065     else
01066     {
01067         if( use_sample_or_expectation[1] == 2 )
01068             parameters->accumulatePosStats(down_layer->expectation,
01069                                            up_layer->sample);
01070         else
01071             parameters->accumulatePosStats(down_layer->expectation,
01072                                            up_layer->expectation );
01073     }
01074 
01075     // down propagation
01076     if( use_sample_or_expectation[1] == 0 )
01077         parameters->setAsUpInput( up_layer->expectation );
01078     else
01079         parameters->setAsUpInput( up_layer->sample );
01080 
01081     down_layer->getAllActivations( parameters );
01082     down_layer->computeExpectation();
01083     down_layer->generateSample();
01084 
01085     if( use_sample_or_expectation[2] == 0 )
01086         parameters->setAsDownInput( down_layer->expectation );
01087     else
01088         parameters->setAsDownInput( down_layer->sample );
01089 
01090     up_layer->getAllActivations( parameters );
01091     up_layer->computeExpectation();
01092 
01093     // accumulate stats using the right vector (sample or expectation)
01094     if( use_sample_or_expectation[3] == 2 )
01095     {
01096         up_layer->generateSample();
01097         if( use_sample_or_expectation[2] == 2 )
01098             parameters->accumulateNegStats( down_layer->sample,
01099                                             up_layer->sample );
01100         else
01101             parameters->accumulateNegStats( down_layer->expectation,
01102                                             up_layer->sample );
01103     }
01104     else
01105     {
01106         if( use_sample_or_expectation[2] == 2 )
01107             parameters->accumulateNegStats( down_layer->sample,
01108                                             up_layer->expectation );
01109         else
01110             parameters->accumulateNegStats( down_layer->expectation,
01111                                             up_layer->expectation );
01112     }
01113 
01114     // update
01115     parameters->update();
01116 }
01117 
01118 real SupervisedDBN::greedyStep( const Vec& input, int index )
01119 {
01120     // deterministic propagation until we reach index
01121     layers[0]->expectation << input.subVec(0, n_predictor);
01122     for( int i=0 ; i<index ; i++ )
01123     {
01124         params[i]->setAsDownInput( layers[i]->expectation );
01125         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
01126         layers[i+1]->computeExpectation();
01127     }
01128 
01129     // perform one step of CD + partially supervised gradient
01130     real sup_cost = supervisedContrastiveDivergenceStep(
01131                         layers[index],
01132                         (RBMLLParameters*) params[index],
01133                         layers[index+1],
01134                         input.subVec(n_predictor,n_predicted),
01135                         index );
01136     return sup_cost;
01137 }
01138 
01139 /*
01140 real SupervisedDBN::jointGreedyStep( const Vec& input )
01141 {
01142     // deterministic propagation until we reach n_layers-2, setting the input
01143     // of the "input" part of joint_layer
01144     layers[0]->expectation << input.subVec( 0, n_predictor );
01145     for( int i=0 ; i<n_layers-2 ; i++ )
01146     {
01147         params[i]->setAsDownInput( layers[i]->expectation );
01148         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
01149         layers[i+1]->computeExpectation();
01150     }
01151 
01152     real supervised_cost = MISSING_VALUE;
01153     if( supervised_learning_rates[n_layers-2] > 0 )
01154     {
01155         // deterministic forward pass
01156         joint_params->setAsCondInput( layers[n_layers-2]->expectation );
01157         target_layer->getAllActivations( (RBMLLParameters*) joint_params );
01158         target_layer->computeExpectation();
01159 
01160         // now get the actual index of the target
01161         int actual_index = argmax( input.subVec( n_predictor, n_predicted ) );
01162 #ifdef BOUNDCHECK
01163         for( int i=0 ; i<n_predicted ; i++ )
01164             PLASSERT( is_equal( input[n_predictor+i], 0. ) ||
01165                     i == actual_index && is_equal( input[n_predictor+i], 1 ) );
01166 #endif
01167 
01168         // get supervised cost (= train cost) and output gradient
01169         supervised_cost = -pl_log( target_layer->expectation[actual_index] );
01170         output_gradient << target_layer->expectation;
01171         output_gradient[actual_index] -= 1.;
01172 
01173         // put the right learning rate
01174         joint_params->learning_rate = supervised_learning_rates[n_layers-2];
01175         // backprop and update
01176         joint_params->bpropUpdate( layers[n_layers-2]->expectation,
01177                                    target_layer->expectation,
01178                                    expectation_gradients[n_layers-2],
01179                                    output_gradient );
01180         // put the learning rate back
01181         joint_params->learning_rate = learning_rate;
01182 
01183     }
01184 
01185     // now fill the "target" part of joint_layer
01186     target_layer->expectation << input.subVec( n_predictor, n_predicted );
01187     // do contrastive divergence step with the new weights and actual target
01188     contrastiveDivergenceStep( (RBMLayer*) joint_layer,
01189                                (RBMLLParameters*) joint_params,
01190                                last_layer );
01191 
01192     // return supervised cost
01193     return supervised_cost;
01194 }
01195 */
01196 
01197 void SupervisedDBN::fineTuneByGradientDescent( const Vec& input,
01198                                                Vec& train_costs )
01199 {
01200     // split input in predictor_part and predicted_part
01201     splitCond(input);
01202 
01203     // fprop
01204     layers[0]->expectation << input.subVec(0, n_predictor);
01205     for( int i=0 ; i<n_layers-1 ; i++ )
01206     {
01207         params[i]->setAsDownInput( layers[i]->expectation );
01208         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
01209         layers[i+1]->computeExpectation();
01210     }
01211 
01212     supervised_input.resize( layers[n_layers-1]->expectation.length() );
01213     supervised_input << layers[n_layers-1]->expectation;
01214     supervised_input.append( input.subVec( n_predictor, n_predicted ) );
01215 
01216     // Compute supervised cost and gradient
01217     regressors[n_layers-2]->fprop( supervised_input, train_costs );
01218     regressors[n_layers-2]->bpropUpdate( supervised_input, train_costs,
01219                                          expectation_gradients[n_layers-1],
01220                                          Vec() );
01221 
01222     // bprop and update
01223     for( int i=n_layers-1 ; i>0 ; i-- )
01224     {
01225         layers[i]->bpropUpdate( layers[i]->activations,
01226                                 layers[i]->expectation,
01227                                 activation_gradients[i],
01228                                 expectation_gradients[i] );
01229         params[i-1]->bpropUpdate( layers[i-1]->expectation,
01230                                   layers[i]->activations,
01231                                   expectation_gradients[i-1],
01232                                   activation_gradients[i] );
01233     }
01234 }
01235 
01236 
01237 void SupervisedDBN::computeCostsFromOutputs(const Vec& input,
01238                                             const Vec& output,
01239                                             const Vec& target,
01240                                             Vec& costs) const
01241 {
01242     char c = outputs_def[0];
01243     if( (c == 'l' || c == 'd') && !regression )
01244         inherited::computeCostsFromOutputs(input, output, target, costs);
01245     else if( c == 'e' )
01246     {
01247         // assumes computeOutput has just been called
01248         // (yes, this is ugly)
01249         costs.resize( store_costs.size() );
01250         costs << store_costs;
01251     }
01252 }
01253 
01254 TVec<string> SupervisedDBN::getTestCostNames() const
01255 {
01256     char c = outputs_def[0];
01257     TVec<string> result;
01258     if( (c == 'l' || c == 'd') && !regression )
01259         result.append( "NLL" );
01260     else if( c == 'e' )
01261     {
01262         if( regression )
01263             result.append( "mse" );
01264         else
01265         {
01266             result.append( "NLL" );
01267             result.append( "class_error" );
01268         }
01269     }
01270     return result;
01271 }
01272 
01273 TVec<string> SupervisedDBN::getTrainCostNames() const
01274 {
01275     return getTestCostNames();
01276 }
01277 
01278 #if USING_MPI
01279 void SupervisedDBN::shareParamsMPI()
01280 {
01281     if (sum_parallel_contributions)
01282     {
01283         if (PLMPI::rank!=0)
01284             // after this line global_params contains the delta for all cpus
01285             // except root
01286             global_params -= previous_global_params;
01287         // while the root contains the previous global params + its delta
01288         previous_global_params << global_params;
01289         // hence summing everything (result in cpu0.global_params)
01290         // yields the sum of all the changes plus the previous global params:
01291         MPI_Reduce(previous_global_params.data(),global_params.data(),
01292                    global_params.length(), PLMPI_REAL, MPI_SUM, 0,
01293                    MPI_COMM_WORLD);
01294         // send it back to every one
01295         MPI_Bcast(global_params.data(), global_params.length(),
01296                   PLMPI_REAL, 0, MPI_COMM_WORLD);
01297         // and save it for next sharing step
01298         previous_global_params << global_params;
01299     }
01300     else // average contributions
01301     {
01302         previous_global_params << global_params;
01303         MPI_Reduce(previous_global_params.data(),global_params.data(),
01304                    global_params.length(), PLMPI_REAL, MPI_SUM, 0,
01305                    MPI_COMM_WORLD);
01306         global_params *= 1.0/PLMPI::size;
01307         MPI_Bcast(global_params.data(), global_params.length(),
01308                   PLMPI_REAL, 0, MPI_COMM_WORLD);
01309     }
01310 }
01311 #endif
01312 
01313 #if USING_MPI
01314 void SupervisedDBN::test(VMat testset, PP<VecStatsCollector> test_stats,
01315                              VMat testoutputs, VMat testcosts) const
01316 {
01317     int l = testset.length();
01318     Vec input;
01319     Vec target;
01320     real weight;
01321 
01322     Vec output(outputsize());
01323 
01324     Vec costs(nTestCosts());
01325 
01326     // testset->defineSizes(inputsize(),targetsize(),weightsize());
01327 
01328     ProgressBar* pb = NULL;
01329     if(report_progress)
01330         pb = new ProgressBar("Testing learner",l);
01331 
01332     if (l == 0) {
01333         // Empty test set: we give -1 cost arbitrarily.
01334         costs.fill(-1);
01335         test_stats->update(costs);
01336     }
01337     int n=int(ceil(l/real(PLMPI::size)));
01338     Mat my_res(n,costs.size()+2);
01339     Mat all_res;
01340     if (PLMPI::rank==0) all_res.resize(n*PLMPI::size,costs.size()+2);
01341     int k=0;
01342     for(int i=0; i<l; i++)
01343      if (i%PLMPI::size==PLMPI::rank)
01344      {
01345         testset.getExample(i, input, target, weight);
01346 
01347         // Always call computeOutputAndCosts, since this is better
01348         // behaved with stateful learners
01349         computeOutputAndCosts(input,target,output,costs);
01350 
01351         if(testoutputs)
01352             testoutputs->putOrAppendRow(i,output);
01353 
01354         if(testcosts)
01355             testcosts->putOrAppendRow(i, costs);
01356 
01357         if(test_stats)
01358         {
01359             my_res.subMat(k,0,1,costs.length()) << costs;
01360             my_res(k,costs.length()) = weight;
01361             my_res(k++,costs.length()+1) = 1;
01362         }
01363 
01364         if(report_progress)
01365             pb->update(i);
01366      }
01367 
01368     if (PLMPI::rank==0)
01369        MPI_Gather(my_res.data(),my_res.size(),PLMPI_REAL,
01370                   all_res.data(),my_res.size(),PLMPI_REAL,0,MPI_COMM_WORLD);
01371     else
01372        MPI_Gather(my_res.data(),my_res.size(),PLMPI_REAL,
01373                   0,my_res.size(),PLMPI_REAL,0,MPI_COMM_WORLD);
01374 
01375     if (PLMPI::rank==0)
01376        for (int i=0;i<all_res.length();i++)
01377           if (all_res(i,costs.length()+1)==1.0)
01378              test_stats->update(all_res(i).subVec(0,costs.length()),
01379                                 all_res(i,costs.length()));
01380 
01381     if(pb)
01382         delete pb;
01383 
01384 }
01385 #endif
01386 
01387 
01388 } // end of namespace PLearn
01389 
01390 
01391 /*
01392   Local Variables:
01393   mode:c++
01394   c-basic-offset:4
01395   c-file-style:"stroustrup"
01396   c-file-offsets:((innamespace . 0)(inline-open . 0))
01397   indent-tabs-mode:nil
01398   fill-column:79
01399   End:
01400 */
01401 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines