PLearn 0.1
GaussianDBNClassification.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // GaussianDBNClassification.cc
00004 //
00005 // Copyright (C) 2006 Dan Popovici, Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Dan Popovici
00036 
00039 #define PL_LOG_MODULE_NAME "GaussianDBNClassification"
00040 #include <plearn/io/pl_log.h>
00041 
00042 #include "GaussianDBNClassification.h"
00043 #include "RBMLayer.h"
00044 #include "RBMMixedLayer.h"
00045 #include "RBMMultinomialLayer.h"
00046 #include "RBMParameters.h"
00047 #include "RBMLLParameters.h"
00048 #include "RBMQLParameters.h"
00049 #include "RBMJointLLParameters.h"
00050 
00051 namespace PLearn {
00052 using namespace std;
00053 
00054 PLEARN_IMPLEMENT_OBJECT(
00055     GaussianDBNClassification,
00056     "Does the same thing as Hinton's deep belief nets",
00057     ""
00058 );
00059 
00061 // GaussianDBNClassification //
00063 GaussianDBNClassification::GaussianDBNClassification() :
00064     learning_rate(0.),
00065     weight_decay(0.),
00066     use_sample_rather_than_expectation_in_positive_phase_statistics(false)
00067 {
00068     random_gen = new PRandom();
00069 }
00070 
00072 // declareOptions //
00074 void GaussianDBNClassification::declareOptions(OptionList& ol)
00075 {
00076     declareOption(ol, "learning_rate", &GaussianDBNClassification::learning_rate,
00077                   OptionBase::buildoption,
00078                   "Learning rate");
00079 
00080     declareOption(ol, "weight_decay", &GaussianDBNClassification::weight_decay,
00081                   OptionBase::buildoption,
00082                   "Weight decay");
00083 
00084     declareOption(ol, "initialization_method",
00085                   &GaussianDBNClassification::initialization_method,
00086                   OptionBase::buildoption,
00087                   "The method used to initialize the weights:\n"
00088                   "  - \"uniform_linear\" = a uniform law in [-1/d, 1/d]\n"
00089                   "  - \"uniform_sqrt\"   = a uniform law in [-1/sqrt(d),"
00090                   " 1/sqrt(d)]\n"
00091                   "  - \"zero\"           = all weights are set to 0,\n"
00092                   "where d = max( up_layer_size, down_layer_size ).\n");
00093 
00094 
00095     declareOption(ol, "training_schedule",
00096                   &GaussianDBNClassification::training_schedule,
00097                   OptionBase::buildoption,
00098                   "Number of examples to use during each of the different"
00099                   " greedy\n"
00100                   "steps of the training phase.\n");
00101 
00102     declareOption(ol, "fine_tuning_method",
00103                   &GaussianDBNClassification::fine_tuning_method,
00104                   OptionBase::buildoption,
00105                   "Method for fine-tuning the whole network after greedy"
00106                   " learning.\n"
00107                   "One of:\n"
00108                   "  - \"none\"\n"
00109                   "  - \"CD\" or \"contrastive_divergence\"\n"
00110                   "  - \"EGD\" or \"error_gradient_descent\"\n"
00111                   "  - \"WS\" or \"wake_sleep\".\n");
00112 
00113     declareOption(ol, "layers", &GaussianDBNClassification::layers,
00114                   OptionBase::buildoption,
00115                   "Layers that learn representations of the input,"
00116                   " unsupervisedly.\n"
00117                   "layers[0] is input layer.\n");
00118 
00119     declareOption(ol, "target_layer", &GaussianDBNClassification::target_layer,
00120                   OptionBase::buildoption,
00121                   "Target (or label) layer");
00122 
00123     declareOption(ol, "params", &GaussianDBNClassification::params,
00124                   OptionBase::buildoption,
00125                   "RBMParameters linking the unsupervised layers.\n"
00126                   "params[i] links layers[i] and layers[i+1], except for"
00127                   "params[n_layers-1],\n"
00128                   "that links layers[n_layers-1] and last_layer.\n");
00129 
00130     declareOption(ol, "target_params", &GaussianDBNClassification::target_params,
00131                   OptionBase::buildoption,
00132                   "Parameters linking target_layer and last_layer");
00133     
00134     declareOption(ol, "input_params", &GaussianDBNClassification::input_params,
00135                   OptionBase::buildoption,
00136                   "Parameters linking layer[0] and layer[1]");
00137 
00138     declareOption(ol, "use_sample_rather_than_expectation_in_positive_phase_statistics",
00139                   &GaussianDBNClassification::use_sample_rather_than_expectation_in_positive_phase_statistics,
00140                   OptionBase::buildoption,
00141                   "In positive phase statistics use output->sample * input\n"
00142                   "rather than output->expectation * input.\n");
00143 
00144     declareOption(ol, "n_layers", &GaussianDBNClassification::n_layers,
00145                   OptionBase::learntoption,
00146                   "Number of unsupervised layers, including input layer");
00147 
00148     declareOption(ol, "last_layer", &GaussianDBNClassification::last_layer,
00149                   OptionBase::learntoption,
00150                   "Last layer, learning joint representations of input and"
00151                   " target");
00152 
00153     declareOption(ol, "joint_layer", &GaussianDBNClassification::joint_layer,
00154                   OptionBase::learntoption,
00155                   "Concatenation of target_layer and layers[n_layers-1]");
00156 
00157     declareOption(ol, "joint_params", &GaussianDBNClassification::joint_params,
00158                   OptionBase::learntoption,
00159                   "Parameters linking joint_layer and last_layer");
00160 
00161     // Now call the parent class' declareOptions().
00162     inherited::declareOptions(ol);
00163 }
00164 
00166 // build //
00168 void GaussianDBNClassification::build()
00169 {
00170     // ### Nothing to add here, simply calls build_().
00171     inherited::build();
00172     build_();
00173 }
00174 
00176 // build_ //
00178 void GaussianDBNClassification::build_()
00179 {
00180     MODULE_LOG << "build_() called" << endl;
00181     n_layers = layers.length();
00182     if( n_layers <= 1 )
00183         return;
00184 
00185     // check value of initialization_method
00186     string im = lowerstring( initialization_method );
00187     if( im == "" || im == "uniform_sqrt" )
00188         initialization_method = "uniform_sqrt";
00189     else if( im == "uniform_linear" )
00190         initialization_method = im;
00191     else if( im == "zero" )
00192         initialization_method = im;
00193     else
00194         PLERROR( "RBMParameters::build_ - initialization_method\n"
00195                  "\"%s\" unknown.\n", initialization_method.c_str() );
00196     MODULE_LOG << "  initialization_method = \"" << initialization_method
00197         << "\"" << endl;
00198 
00199     // check value of fine_tuning_method
00200     string ftm = lowerstring( fine_tuning_method );
00201     if( ftm == "" | ftm == "none" )
00202         fine_tuning_method = "";
00203     else if( ftm == "cd" | ftm == "contrastive_divergence" )
00204         fine_tuning_method = "CD";
00205     else if( ftm == "egd" | ftm == "error_gradient_descent" )
00206         fine_tuning_method = "EGD";
00207     else if( ftm == "ws" | ftm == "wake_sleep" )
00208         fine_tuning_method = "WS";
00209     else
00210         PLERROR( "GaussianDBNClassification::build_ - fine_tuning_method \"%s\"\n"
00211                  "is unknown.\n", fine_tuning_method.c_str() );
00212     MODULE_LOG << "  fine_tuning_method = \"" << fine_tuning_method << "\""
00213         <<  endl;
00214     //TODO: build structure to store gradients during gradient descent
00215 
00216     if( training_schedule.length() != n_layers )
00217         training_schedule = TVec<int>( n_layers, 1000000 );
00218     MODULE_LOG << "  training_schedule = " << training_schedule << endl;
00219     MODULE_LOG << endl;
00220 
00221     build_layers();
00222     build_params();
00223 }
00224 
00225 void GaussianDBNClassification::build_layers()
00226 {
00227     MODULE_LOG << "build_layers() called" << endl;
00228     if( inputsize_ >= 0 )
00229     {
00230         PLASSERT( layers[0]->size + target_layer->size == inputsize() );
00231         setPredictorPredictedSizes( layers[0]->size,
00232                                     target_layer->size, false );
00233         MODULE_LOG << "  n_predictor = " << n_predictor << endl;
00234         MODULE_LOG << "  n_predicted = " << n_predicted << endl;
00235     }
00236 
00237     for( int i=0 ; i<n_layers ; i++ )
00238         layers[i]->random_gen = random_gen;
00239     target_layer->random_gen = random_gen;
00240 
00241     last_layer = layers[n_layers-1];
00242 
00243     // concatenate target_layer and layers[n_layers-2] into joint_layer
00244     TVec< PP<RBMLayer> > the_sub_layers( 2 );
00245     the_sub_layers[0] = target_layer;
00246     the_sub_layers[1] = layers[n_layers-2];
00247     joint_layer = new RBMMixedLayer( the_sub_layers );
00248     joint_layer->random_gen = random_gen;
00249 }
00250 
00251 void GaussianDBNClassification::build_params()
00252 {
00253     MODULE_LOG << "build_params() called" << endl;
00254     if( params.length() == 0 )
00255     {
00256         input_params = new RBMQLParameters() ; 
00257         params.resize( n_layers-1 );
00258         for( int i=1 ; i<n_layers-1 ; i++ )
00259             params[i] = new RBMLLParameters();
00260         // params[0] is not being using, it is not being created
00261     }
00262     else if( params.length() != n_layers-1 )
00263         PLERROR( "GaussianDBNClassification::build_params - params.length() should\n"
00264                  "be equal to layers.length()-1 (%d != %d).\n",
00265                  params.length(), n_layers-1 );
00266 
00267     activation_gradients.resize( n_layers-1 );
00268     expectation_gradients.resize( n_layers-1 );
00269     output_gradient.resize( n_predicted );
00270 
00271     input_params->down_units_types = layers[0]->units_types;
00272     input_params->up_units_types = layers[1]->units_types;
00273     input_params->learning_rate = learning_rate;
00274     input_params->initialization_method = initialization_method;
00275     input_params->random_gen = random_gen;
00276     input_params->build();
00277 
00278     activation_gradients[0].resize( input_params->down_layer_size );
00279     expectation_gradients[0].resize( input_params->down_layer_size );
00280 
00281 
00282     for( int i=1 ; i<n_layers-1 ; i++ )
00283     {
00284         //TODO: call changeOptions instead
00285         
00286         params[i]->down_units_types = layers[i]->units_types;
00287         params[i]->up_units_types = layers[i+1]->units_types;
00288         params[i]->learning_rate = learning_rate;
00289         params[i]->initialization_method = initialization_method;
00290         params[i]->random_gen = random_gen;
00291         params[i]->build();
00292 
00293         activation_gradients[i].resize( params[i]->down_layer_size );
00294         expectation_gradients[i].resize( params[i]->down_layer_size );
00295     }
00296 
00297     if( target_layer && !target_params )
00298         target_params = new RBMLLParameters();
00299 
00300     //TODO: call changeOptions instead
00301     target_params->down_units_types = target_layer->units_types;
00302     target_params->up_units_types = last_layer->units_types;
00303     target_params->learning_rate = learning_rate;
00304     target_params->initialization_method = initialization_method;
00305     target_params->random_gen = random_gen;
00306     target_params->build();
00307 
00308     // build joint_params from params[n_layers-1] and target_params
00309     joint_params = new RBMJointLLParameters( target_params,
00310                                              params[n_layers-2] );
00311     joint_params->learning_rate = learning_rate;
00312     joint_params->random_gen = random_gen;
00313 }
00314 
00316 // forget //
00318 void GaussianDBNClassification::forget()
00319 {
00320     MODULE_LOG << "forget() called" << endl;
00327     resetGenerator(seed_);
00328     input_params->forget() ; 
00329     for( int i=1 ; i<n_layers-1 ; i++ )
00330         params[i]->forget();
00331 
00332     for( int i=0 ; i<n_layers ; i++ )
00333         layers[i]->reset();
00334 
00335     target_params->forget();
00336     target_layer->reset();
00337 
00338     stage = 0;
00339 }
00340 
00342 // generate //
00344 void GaussianDBNClassification::generate(Vec& y) const
00345 {
00346     PLERROR("generate not implemented for GaussianDBNClassification");
00347 }
00348 
00350 // cdf //
00352 real GaussianDBNClassification::cdf(const Vec& y) const
00353 {
00354     PLERROR("cdf not implemented for GaussianDBNClassification"); return 0;
00355 }
00356 
00358 // expectation //
00360 void GaussianDBNClassification::expectation(Vec& mu) const
00361 {
00362     mu.resize( predicted_size );
00363 
00364     // Propagate input (predictor_part) until penultimate layer
00365     layers[0]->expectation << predictor_part;
00366     input_params->setAsDownInput(layers[0]->expectation) ; 
00367     layers[1]->getAllActivations( (RBMQLParameters*) input_params );
00368     layers[1]->computeExpectation();
00369     
00370     for( int i=1 ; i<n_layers-2 ; i++ )
00371     {
00372         params[i]->setAsDownInput( layers[i]->expectation );
00373         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
00374         layers[i+1]->computeExpectation();
00375     }
00376 
00377     // Set layers[n_layers-2]->expectation (penultimate) as conditionning input
00378     // of joint_params
00379     joint_params->setAsCondInput( layers[n_layers-2]->expectation );
00380 
00381     // Get all activations on target_layer from target_params
00382     target_layer->getAllActivations( (RBMLLParameters*) joint_params );
00383     target_layer->computeExpectation();
00384 
00385     mu << target_layer->expectation;
00386 }
00387 
00389 // density //
00391 real GaussianDBNClassification::density(const Vec& y) const
00392 {
00393     PLASSERT( y.size() == n_predicted );
00394 
00395     // TODO: 'y'[0] devrait plutot etre l'entier "index" lui-meme!
00396     int index = argmax( y );
00397 
00398     // If y != onehot( index ), then density is 0
00399     if( !is_equal( y[index], 1. ) )
00400         return 0;
00401     for( int i=0 ; i<n_predicted ; i++ )
00402         if( !is_equal( y[i], 0 ) && i != index )
00403             return 0;
00404 
00405     expectation( store_expect );
00406     return store_expect[index];
00407 }
00408 
00409 
00411 // log_density //
00413 real GaussianDBNClassification::log_density(const Vec& y) const
00414 {
00415     return pl_log( density(y) );
00416 }
00417 
00419 // survival_fn //
00421 real GaussianDBNClassification::survival_fn(const Vec& y) const
00422 {
00423     PLERROR("survival_fn not implemented for GaussianDBNClassification"); return 0;
00424 }
00425 
00427 // variance //
00429 void GaussianDBNClassification::variance(Mat& cov) const
00430 {
00431     PLERROR("variance not implemented for GaussianDBNClassification");
00432 }
00433 
00435 // makeDeepCopyFromShallowCopy //
00437 void GaussianDBNClassification::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00438 {
00439     inherited::makeDeepCopyFromShallowCopy(copies);
00440 
00441     deepCopyField(layers, copies);
00442     deepCopyField(last_layer, copies);
00443     deepCopyField(target_layer, copies);
00444     deepCopyField(joint_layer, copies);
00445     deepCopyField(params, copies);
00446     deepCopyField(joint_params, copies);
00447     deepCopyField(input_params, copies);
00448     deepCopyField(target_params, copies);
00449     deepCopyField(training_schedule, copies);
00450 }
00451 
00453 // setPredictor //
00455 void GaussianDBNClassification::setPredictor(const Vec& predictor, bool call_parent)
00456     const
00457 {
00458     if (call_parent)
00459         inherited::setPredictor(predictor, true);
00460     // ### Add here any specific code required by your subclass.
00461 }
00462 
00464 // setPredictorPredictedSizes //
00466 bool GaussianDBNClassification::setPredictorPredictedSizes(int the_predictor_size,
00467                                                      int the_predicted_size,
00468                                                      bool call_parent)
00469 {
00470     bool sizes_have_changed = false;
00471     if (call_parent)
00472         sizes_have_changed = inherited::setPredictorPredictedSizes(
00473             the_predictor_size, the_predicted_size, true);
00474 
00475     // ### Add here any specific code required by your subclass.
00476     if( the_predictor_size >= 0 && the_predictor_size != layers[0]->size ||
00477         the_predicted_size >= 0 && the_predicted_size != target_layer->size )
00478         PLERROR( "GaussianDBNClassification::setPredictorPredictedSizes - \n"
00479                  "n_predictor should be equal to layer[0]->size (%d)\n"
00480                  "n_predicted should be equal to target_layer->size (%d).\n",
00481                  layers[0]->size, target_layer->size );
00482 
00483     n_predictor = layers[0]->size;
00484     n_predicted = target_layer->size;
00485 
00486     // Returned value.
00487     return sizes_have_changed;
00488 }
00489 
00490 
00492 // train //
00494 void GaussianDBNClassification::train()
00495 {
00496     MODULE_LOG << "train() called" << endl;
00497     // The role of the train method is to bring the learner up to
00498     // stage==nstages, updating train_stats with training costs measured
00499     // on-line in the process.
00500 
00501     /* TYPICAL CODE:
00502 
00503     static Vec input;  // static so we don't reallocate memory each time...
00504     static Vec target; // (but be careful that static means shared!)
00505     input.resize(inputsize());    // the train_set's inputsize()
00506     target.resize(targetsize());  // the train_set's targetsize()
00507     real weight;
00508 
00509     // This generic PLearner method does a number of standard stuff useful for
00510     // (almost) any learner, and return 'false' if no training should take
00511     // place. See PLearner.h for more details.
00512     if (!initTrain())
00513         return;
00514 
00515     while(stage<nstages)
00516     {
00517         // clear statistics of previous epoch
00518         train_stats->forget();
00519 
00520         //... train for 1 stage, and update train_stats,
00521         // using train_set->getExample(input, target, weight)
00522         // and train_stats->update(train_costs)
00523 
00524         ++stage;
00525         train_stats->finalize(); // finalize statistics for this epoch
00526     }
00527     */
00528 
00529     Vec input( inputsize() );
00530     Vec target( targetsize() ); // unused
00531     real weight; // unused
00532 
00533     if( !initTrain() )
00534     {
00535         MODULE_LOG << "train() aborted" << endl;
00536         return;
00537     }
00538 
00539     int nsamples = train_set->length();
00540     int sample = 0;
00541     MODULE_LOG << "  nsamples = " << nsamples << endl;
00542 
00543     // Let's define stage and nstages:
00544     //   - 0: fresh state, nothing is done
00545     //   - 1..n_layers-2: params[stage-1] is trained
00546     //   - n_layers-1: joint_params is trained (including params[n_layers-2])
00547     //   - n_layers: after the fine tuning
00548 
00549     MODULE_LOG << "initial stage = " << stage << endl;
00550     MODULE_LOG << "objective: nstages = " << nstages << endl;
00551 
00552     for( ; stage < nstages ; stage++ )
00553     {
00554         // clear stats of previous epoch
00555         train_stats->forget();
00556 
00557         // loops over the training set, until training_schedule[stage] examples
00558         // have been seen.
00559         // TODO: modify the training set used?
00560         int layer = stage;
00561         int n_samples_to_see = training_schedule[stage];
00562 
00563         // this progress bar shows the number of loops through the whole
00564         // training set
00565         ProgressBar* pb = 0;
00566 
00567         if( stage < n_layers-2 )
00568         {
00569             MODULE_LOG << "Training parameters between layers " << stage
00570                 << " and " << stage+1 << endl;
00571 
00572             if( report_progress )
00573                 pb = new ProgressBar( "Training " + classname()
00574                                       + " parameters between layers "
00575                                       + tostring(stage) + " and "
00576                                       + tostring(stage+1),
00577                                       n_samples_to_see );
00578 
00579             int begin_sample = sample;
00580             int end_sample = begin_sample + n_samples_to_see;
00581             for( ; sample < end_sample ; sample++ )
00582             {
00583                 // sample is the index in the training set
00584                 int i = sample % train_set->length();
00585                 train_set->getExample(i, input, target, weight);
00586                 greedyStep( input.subVec(0, n_predictor), layer );
00587 
00588                 if( pb )
00589                     pb->update( sample - begin_sample + 1 );
00590             }
00591 
00592         }
00593         else if( stage == n_layers-2 )
00594         {
00595             MODULE_LOG << "Training joint parameters, between target,"
00596                 << " penultimate (" << n_layers-2 << ")," << endl
00597                 << "and last (" << n_layers-1 << ") layers." << endl;
00598             if( report_progress )
00599                 pb = new ProgressBar( "Training " + classname()
00600                                       + " parameters between target, "
00601                                       + tostring(stage) + " and "
00602                                       + tostring(stage+1) + " layers",
00603                                       n_samples_to_see );
00604 
00605             int begin_sample = sample;
00606             int end_sample = begin_sample + n_samples_to_see;
00607 
00608             for( ; sample < end_sample ; sample++ )
00609             {
00610                 // sample is the index in the training set
00611                 int i = sample % train_set->length();
00612                 train_set->getExample(i, input, target, weight);
00613                 jointGreedyStep( input );
00614 
00615                 if( pb )
00616                     pb->update( sample - begin_sample + 1 );
00617             }
00618         }
00619         else if( stage == n_layers-1 )
00620         {
00621             MODULE_LOG << "Fine-tuning all parameters, using method "
00622                 << fine_tuning_method << endl;
00623 
00624             if( fine_tuning_method == "" ) // do nothing
00625                 sample += n_samples_to_see;
00626             else if( fine_tuning_method == "EGD" )
00627             {
00628                 if( report_progress )
00629                     pb = new ProgressBar( "Training all " + classname()
00630                                           + " parameters by fine tuning",
00631                                           n_samples_to_see );
00632 
00633 /*
00634 pout << "==================" << endl
00635     << "Before update:" << endl
00636     << "up:      " << joint_params->up_units_params << endl
00637     << "weights: " << endl << joint_params->weights << endl
00638     << "down:    " << joint_params->down_units_params << endl
00639     << endl;
00640 // */
00641                 int begin_sample = sample;
00642                 int end_sample = begin_sample + n_samples_to_see;
00643                 for( ; sample < end_sample ; sample++ )
00644                 {
00645                     // sample is the index in the training set
00646                     int i = sample % train_set->length();
00647                     train_set->getExample(i, input, target, weight);
00648                     fineTuneByGradientDescent( input );
00649 
00650                     if( pb )
00651                         pb->update( sample - begin_sample + 1 );
00652                 }
00653 /*
00654 pout << "-------" << endl
00655     << "After update:" << endl
00656     << "up:      " << joint_params->up_units_params << endl
00657     << "weights: " << endl << joint_params->weights << endl
00658     << "down:    " << joint_params->down_units_params << endl
00659     << endl;
00660 // */
00661             }
00662             else
00663                 PLERROR( "Fine-tuning methods other than \"EGD\" are not"
00664                          " implemented yet." );
00665 
00666         }
00667         train_stats->finalize(); // finalize statistics for this epoch
00668     }
00669     MODULE_LOG << endl;
00670 }
00671 
00672 void GaussianDBNClassification::greedyStep( const Vec& predictor, int index )
00673 {
00674     // deterministic propagation until we reach index
00675     layers[0]->expectation << predictor;
00676 
00677     input_params->setAsDownInput( layers[0]->expectation );
00678     layers[1]->getAllActivations( (RBMQLParameters*) input_params );
00679     layers[1]->computeExpectation();
00680         
00681     for( int i=1 ; i<index ; i++ )
00682     {
00683         params[i]->setAsDownInput( layers[i]->expectation );
00684         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
00685         layers[i+1]->computeExpectation();
00686     }
00687 
00688     // positive phase
00689     if (index == 0) {
00690         input_params->setAsDownInput( layers[index]->expectation );
00691         layers[index+1]->getAllActivations((RBMQLParameters*) input_params);
00692         layers[index+1]->computeExpectation();
00693         layers[index+1]->generateSample();
00694         if (use_sample_rather_than_expectation_in_positive_phase_statistics)
00695             input_params->accumulatePosStats(layers[index]->expectation,
00696                     layers[index+1]->sample );
00697         else
00698             input_params->accumulatePosStats(layers[index]->expectation,
00699                     layers[index+1]->expectation );
00700 
00701         // down propagation
00702         input_params->setAsUpInput( layers[index+1]->sample );
00703         layers[index]->getAllActivations( (RBMQLParameters*) input_params );
00704 
00705         // negative phase
00706         layers[index]->generateSample();
00707         input_params->setAsDownInput( layers[index]->sample );
00708         layers[index+1]->getAllActivations((RBMQLParameters*) input_params);
00709         layers[index+1]->computeExpectation();
00710         input_params->accumulateNegStats( layers[index]->sample,
00711                 layers[index+1]->expectation );
00712 
00713         // update
00714         input_params->update();
00715 
00716     }
00717     else {
00718         params[index]->setAsDownInput( layers[index]->expectation );
00719         layers[index+1]->getAllActivations((RBMLLParameters*) params[index]);
00720         layers[index+1]->computeExpectation();
00721         layers[index+1]->generateSample();
00722         if (use_sample_rather_than_expectation_in_positive_phase_statistics)
00723             params[index]->accumulatePosStats(layers[index]->expectation,
00724                     layers[index+1]->sample );
00725         else
00726             params[index]->accumulatePosStats(layers[index]->expectation,
00727                     layers[index+1]->expectation );
00728 
00729         // down propagation
00730         params[index]->setAsUpInput( layers[index+1]->sample );
00731         layers[index]->getAllActivations( (RBMLLParameters*) params[index] );
00732 
00733         // negative phase
00734         layers[index]->generateSample();
00735         params[index]->setAsDownInput( layers[index]->sample );
00736         layers[index+1]->getAllActivations((RBMLLParameters*) params[index]);
00737         layers[index+1]->computeExpectation();
00738         params[index]->accumulateNegStats( layers[index]->sample,
00739                 layers[index+1]->expectation );
00740 
00741         // update
00742         params[index]->update();
00743 
00744     }
00745     
00746 
00747 }
00748 
00749 void GaussianDBNClassification::jointGreedyStep( const Vec& input )
00750 {
00751     // deterministic propagation until we reach n_layers-2, setting the input
00752     // of the "input" part of joint_layer
00753     layers[0]->expectation << input.subVec( 0, n_predictor );
00754     input_params->setAsDownInput( layers[0]->expectation );
00755     layers[1]->getAllActivations( (RBMQLParameters*) input_params );
00756     layers[1]->computeExpectation();
00757     
00758     
00759     for( int i=1 ; i<n_layers-2 ; i++ )
00760     {
00761         params[i]->setAsDownInput( layers[i]->expectation );
00762         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
00763         layers[i+1]->computeExpectation();
00764     }
00765 
00766     // now fill the "target" part of joint_layer
00767     target_layer->expectation << input.subVec( n_predictor, n_predicted );
00768 
00769     // positive phase
00770     joint_params->setAsDownInput( joint_layer->expectation );
00771     last_layer->getAllActivations( (RBMLLParameters*) joint_params );
00772     last_layer->computeExpectation();
00773     last_layer->generateSample();
00774     if (use_sample_rather_than_expectation_in_positive_phase_statistics)
00775         joint_params->accumulatePosStats( joint_layer->expectation,
00776                                           last_layer->sample );
00777     else
00778         joint_params->accumulatePosStats( joint_layer->expectation,
00779                                           last_layer->expectation );
00780 
00781     // down propagation
00782     joint_params->setAsUpInput( last_layer->sample );
00783     joint_layer->getAllActivations( (RBMLLParameters*) joint_params );
00784 
00785     // negative phase
00786     joint_layer->generateSample();
00787     joint_params->setAsDownInput( joint_layer->sample );
00788     last_layer->getAllActivations( (RBMLLParameters*) joint_params );
00789     last_layer->computeExpectation();
00790     joint_params->accumulateNegStats( joint_layer->sample,
00791                                       last_layer->expectation );
00792 
00793     // update
00794     joint_params->update();
00795 }
00796 
00797 void GaussianDBNClassification::fineTuneByGradientDescent( const Vec& input )
00798 {
00799     // split input in predictor_part and predicted_part
00800     splitCond(input);
00801 
00802     // compute predicted_part expectation, conditioned on predictor_part
00803     // (forward pass)
00804     expectation( output_gradient );
00805 
00806     int actual_index = argmax(predicted_part);
00807     output_gradient[actual_index] -= 1.;
00808 
00809     joint_params->bpropUpdate( layers[n_layers-2]->expectation,
00810                                target_layer->expectation,
00811                                expectation_gradients[n_layers-2],
00812                                output_gradient );
00813 
00814     for( int i=n_layers-2 ; i>0 ; i-- )
00815     {
00816         layers[i]->bpropUpdate( layers[i]->activations,
00817                                 layers[i]->expectation,
00818                                 activation_gradients[i],
00819                                 expectation_gradients[i] );
00820         params[i-1]->bpropUpdate( layers[i-1]->expectation,
00821                                   layers[i]->activations,
00822                                   expectation_gradients[i-1],
00823                                   activation_gradients[i] );
00824     }
00825 }
00826 
00827 void GaussianDBNClassification::computeCostsFromOutputs(const Vec& input,
00828                                                   const Vec& output,
00829                                                   const Vec& target,
00830                                                   Vec& costs) const
00831 {
00832     char c = outputs_def[0];
00833     if( c == 'l' || c == 'd' )
00834         inherited::computeCostsFromOutputs(input, output, target, costs);
00835     else if( c == 'e' )
00836     {
00837         costs.resize( 2 );
00838         splitCond(input);
00839 
00840         // actual_index is the actual 'target'
00841         int actual_index = argmax(predicted_part);
00842 #ifdef BOUNDCHECK
00843         for( int i=0 ; i<n_predicted ; i++ )
00844             PLASSERT( is_equal( predicted_part[i], 0. ) ||
00845                     i == actual_index && is_equal( predicted_part[i], 1. ) );
00846 #endif
00847         costs[0] = -pl_log( output[actual_index] );
00848 
00849         // predicted_index is the most probable predicted class
00850         int predicted_index = argmax(output);
00851         if( predicted_index == actual_index )
00852             costs[1] = 0;
00853         else
00854             costs[1] = 1;
00855     }
00856 }
00857 
00858 TVec<string> GaussianDBNClassification::getTestCostNames() const
00859 {
00860     char c = outputs_def[0];
00861     TVec<string> result;
00862     if( c == 'l' || c == 'd' )
00863         result.append( "NLL" );
00864     else if( c == 'e' )
00865     {
00866         result.append( "NLL" );
00867         result.append( "class_error" );
00868     }
00869     return result;
00870 }
00871 
00872 } // end of namespace PLearn
00873 
00874 
00875 /*
00876   Local Variables:
00877   mode:c++
00878   c-basic-offset:4
00879   c-file-style:"stroustrup"
00880   c-file-offsets:((innamespace . 0)(inline-open . 0))
00881   indent-tabs-mode:nil
00882   fill-column:79
00883   End:
00884 */
00885 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines