PLearn 0.1
GaussianDBNRegression.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // GaussianDBNRegression.cc
00004 //
00005 // Copyright (C) 2006 Dan Popovici, Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Dan Popovici
00036 
00039 #define PL_LOG_MODULE_NAME "GaussianDBNRegression"
00040 #include <plearn/io/pl_log.h>
00041 
00042 #include "GaussianDBNRegression.h"
00043 #include "RBMLayer.h"
00044 #include "RBMMixedLayer.h"
00045 #include "RBMMultinomialLayer.h"
00046 #include "RBMParameters.h"
00047 #include "RBMLLParameters.h"
00048 #include "RBMQLParameters.h"
00049 #include "RBMLQParameters.h"
00050 #include "RBMJointLLParameters.h"
00051 
00052 namespace PLearn {
00053 using namespace std;
00054 
00055 PLEARN_IMPLEMENT_OBJECT(
00056     GaussianDBNRegression,
00057     "Does the same thing as Hinton's deep belief nets",
00058     ""
00059 );
00060 
00062 // GaussianDBNRegression //
00064 GaussianDBNRegression::GaussianDBNRegression() :
00065     learning_rate(0.),
00066     weight_decay(0.),
00067     use_sample_rather_than_expectation_in_positive_phase_statistics(false)
00068 {
00069     random_gen = new PRandom();
00070 }
00071 
00073 // declareOptions //
00075 void GaussianDBNRegression::declareOptions(OptionList& ol)
00076 {
00077     declareOption(ol, "learning_rate", &GaussianDBNRegression::learning_rate,
00078                   OptionBase::buildoption,
00079                   "Learning rate");
00080 
00081     declareOption(ol, "weight_decay", &GaussianDBNRegression::weight_decay,
00082                   OptionBase::buildoption,
00083                   "Weight decay");
00084 
00085     declareOption(ol, "initialization_method",
00086                   &GaussianDBNRegression::initialization_method,
00087                   OptionBase::buildoption,
00088                   "The method used to initialize the weights:\n"
00089                   "  - \"uniform_linear\" = a uniform law in [-1/d, 1/d]\n"
00090                   "  - \"uniform_sqrt\"   = a uniform law in [-1/sqrt(d),"
00091                   " 1/sqrt(d)]\n"
00092                   "  - \"zero\"           = all weights are set to 0,\n"
00093                   "where d = max( up_layer_size, down_layer_size ).\n");
00094 
00095 
00096     declareOption(ol, "training_schedule",
00097                   &GaussianDBNRegression::training_schedule,
00098                   OptionBase::buildoption,
00099                   "Number of examples to use during each of the different"
00100                   " greedy\n"
00101                   "steps of the training phase.\n");
00102 
00103     declareOption(ol, "fine_tuning_method",
00104                   &GaussianDBNRegression::fine_tuning_method,
00105                   OptionBase::buildoption,
00106                   "Method for fine-tuning the whole network after greedy"
00107                   " learning.\n"
00108                   "One of:\n"
00109                   "  - \"none\"\n"
00110                   "  - \"CD\" or \"contrastive_divergence\"\n"
00111                   "  - \"EGD\" or \"error_gradient_descent\"\n"
00112                   "  - \"WS\" or \"wake_sleep\".\n");
00113 
00114     declareOption(ol, "layers", &GaussianDBNRegression::layers,
00115                   OptionBase::buildoption,
00116                   "Layers that learn representations of the input,"
00117                   " unsupervisedly.\n"
00118                   "layers[0] is input layer.\n");
00119 
00120     declareOption(ol, "target_layer", &GaussianDBNRegression::target_layer,
00121                   OptionBase::buildoption,
00122                   "Target (or label) layer");
00123 
00124     declareOption(ol, "params", &GaussianDBNRegression::params,
00125                   OptionBase::buildoption,
00126                   "RBMParameters linking the unsupervised layers.\n"
00127                   "params[i] links layers[i] and layers[i+1], except for"
00128                   "params[n_layers-1],\n"
00129                   "that links layers[n_layers-1] and last_layer.\n");
00130 
00131     declareOption(ol, "target_params", &GaussianDBNRegression::target_params,
00132                   OptionBase::buildoption,
00133                   "Parameters linking target_layer and last_layer");
00134     
00135     declareOption(ol, "input_params", &GaussianDBNRegression::input_params,
00136                   OptionBase::buildoption,
00137                   "Parameters linking layer[0] and layer[1]");
00138 
00139     declareOption(ol, "use_sample_rather_than_expectation_in_positive_phase_statistics",
00140                   &GaussianDBNRegression::use_sample_rather_than_expectation_in_positive_phase_statistics,
00141                   OptionBase::buildoption,
00142                   "In positive phase statistics use output->sample * input\n"
00143                   "rather than output->expectation * input.\n");
00144 
00145     declareOption(ol, "n_layers", &GaussianDBNRegression::n_layers,
00146                   OptionBase::learntoption,
00147                   "Number of unsupervised layers, including input layer");
00148 
00149     // Now call the parent class' declareOptions().
00150     inherited::declareOptions(ol);
00151 }
00152 
00154 // build //
00156 void GaussianDBNRegression::build()
00157 {
00158     // ### Nothing to add here, simply calls build_().
00159     inherited::build();
00160     build_();
00161 }
00162 
00164 // build_ //
00166 void GaussianDBNRegression::build_()
00167 {
00168     MODULE_LOG << "build_() called" << endl;
00169     n_layers = layers.length();
00170     if( n_layers <= 1 )
00171         return;
00172 
00173     // check value of initialization_method
00174     string im = lowerstring( initialization_method );
00175     if( im == "" || im == "uniform_sqrt" )
00176         initialization_method = "uniform_sqrt";
00177     else if( im == "uniform_linear" )
00178         initialization_method = im;
00179     else if( im == "zero" )
00180         initialization_method = im;
00181     else
00182         PLERROR( "RBMParameters::build_ - initialization_method\n"
00183                  "\"%s\" unknown.\n", initialization_method.c_str() );
00184     MODULE_LOG << "  initialization_method = \"" << initialization_method
00185         << "\"" << endl;
00186 
00187     // check value of fine_tuning_method
00188     string ftm = lowerstring( fine_tuning_method );
00189     if( ftm == "" | ftm == "none" )
00190         fine_tuning_method = "";
00191     else if( ftm == "cd" | ftm == "contrastive_divergence" )
00192         fine_tuning_method = "CD";
00193     else if( ftm == "egd" | ftm == "error_gradient_descent" )
00194         fine_tuning_method = "EGD";
00195     else if( ftm == "ws" | ftm == "wake_sleep" )
00196         fine_tuning_method = "WS";
00197     else
00198         PLERROR( "GaussianDBNRegression::build_ - fine_tuning_method \"%s\"\n"
00199                  "is unknown.\n", fine_tuning_method.c_str() );
00200     MODULE_LOG << "  fine_tuning_method = \"" << fine_tuning_method << "\""
00201         <<  endl;
00202     //TODO: build structure to store gradients during gradient descent
00203 
00204     if( training_schedule.length() != n_layers )
00205         training_schedule = TVec<int>( n_layers, 1000000 );
00206     MODULE_LOG << "  training_schedule = " << training_schedule << endl;
00207     MODULE_LOG << endl;
00208 
00209     build_layers();
00210     build_params();
00211 }
00212 
00213 void GaussianDBNRegression::build_layers()
00214 {
00215     MODULE_LOG << "build_layers() called" << endl;
00216     if( inputsize_ >= 0 )
00217     {
00218         PLASSERT( layers[0]->size + target_layer->size == inputsize() );
00219         setPredictorPredictedSizes( layers[0]->size,
00220                                     target_layer->size, false );
00221         MODULE_LOG << "  n_predictor = " << n_predictor << endl;
00222         MODULE_LOG << "  n_predicted = " << n_predicted << endl;
00223     }
00224 
00225     for( int i=0 ; i<n_layers ; i++ )
00226         layers[i]->random_gen = random_gen;
00227     target_layer->random_gen = random_gen;
00228     
00229     last_layer = layers[n_layers-1];
00230 
00231 }
00232 
00233 void GaussianDBNRegression::build_params()
00234 {
00235     MODULE_LOG << "build_params() called" << endl;
00236     if( params.length() == 0 )
00237     {
00238         input_params = new RBMQLParameters() ; 
00239         params.resize( n_layers-1 );
00240         for( int i=1 ; i<n_layers-1 ; i++ )
00241             params[i] = new RBMLLParameters();
00242         // params[0] is not being using, it is not being created
00243     }
00244     else if( params.length() != n_layers-1 )
00245         PLERROR( "GaussianDBNRegression::build_params - params.length() should\n"
00246                  "be equal to layers.length()-1 (%d != %d).\n",
00247                  params.length(), n_layers-1 );
00248 
00249     activation_gradients.resize( n_layers+1 );
00250     expectation_gradients.resize( n_layers+1 );
00251     output_gradient.resize( n_predicted );
00252 
00253     input_params->down_units_types = layers[0]->units_types;
00254     input_params->up_units_types = layers[1]->units_types;
00255     input_params->learning_rate = learning_rate;
00256     input_params->initialization_method = initialization_method;
00257     input_params->random_gen = random_gen;
00258     input_params->build();
00259 
00260     activation_gradients[0].resize( input_params->down_layer_size );
00261     expectation_gradients[0].resize( input_params->down_layer_size );
00262 
00263 
00264     for( int i=1 ; i<n_layers-1 ; i++ )
00265     {
00266         //TODO: call changeOptions instead
00267         
00268         params[i]->down_units_types = layers[i]->units_types;
00269         params[i]->up_units_types = layers[i+1]->units_types;
00270         params[i]->learning_rate = learning_rate;
00271         params[i]->initialization_method = initialization_method;
00272         params[i]->random_gen = random_gen;
00273         params[i]->build();
00274         
00275         activation_gradients[i].resize( params[i]->down_layer_size );
00276         expectation_gradients[i].resize( params[i]->down_layer_size );
00277         
00278     }
00279 
00280 
00281     if( target_layer && !target_params )
00282         target_params = new RBMLQParameters();
00283 
00284     //TODO: call changeOptions instead
00285     target_params->down_units_types = last_layer->units_types;
00286     target_params->up_units_types = target_layer->units_types;
00287     target_params->learning_rate = learning_rate;
00288     target_params->initialization_method = initialization_method;
00289     target_params->random_gen = random_gen;
00290     target_params->build();
00291 
00292 }
00293 
00295 // forget //
00297 void GaussianDBNRegression::forget()
00298 {
00299     MODULE_LOG << "forget() called" << endl;
00306     resetGenerator(seed_);
00307     input_params->forget() ; 
00308     for( int i=1 ; i<n_layers-1 ; i++ )
00309         params[i]->forget();
00310 
00311     for( int i=0 ; i<n_layers ; i++ )
00312         layers[i]->reset();
00313 
00314     target_params->forget();
00315     target_layer->reset();
00316 
00317     stage = 0;
00318 }
00319 
00321 // generate //
00323 void GaussianDBNRegression::generate(Vec& y) const
00324 {
00325     PLERROR("generate not implemented for GaussianDBNRegression");
00326 }
00327 
00329 // cdf //
00331 real GaussianDBNRegression::cdf(const Vec& y) const
00332 {
00333     PLERROR("cdf not implemented for GaussianDBNRegression"); return 0;
00334 }
00335 
00337 // expectation //
00339 void GaussianDBNRegression::expectation(Vec& mu) const
00340 {
00341     mu.resize( predicted_size );
00342 
00343     // Propagate input (predictor_part) until penultimate layer
00344     layers[0]->expectation << predictor_part;
00345     input_params->setAsDownInput(layers[0]->expectation) ; 
00346     layers[1]->getAllActivations( (RBMQLParameters*) input_params );
00347     layers[1]->computeExpectation();
00348     
00349     for( int i=1 ; i<n_layers-1 ; i++ )
00350     {
00351         params[i]->setAsDownInput( layers[i]->expectation );
00352         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
00353         layers[i+1]->computeExpectation();
00354     }
00355         
00356     target_params->setAsDownInput( last_layer->expectation );
00357     target_layer->getAllActivations( (RBMLQParameters*) target_params );
00358     target_layer->computeExpectation();
00359 
00360     mu << target_layer->expectation;
00361 
00362 }
00363 
00365 // density //
00367 real GaussianDBNRegression::density(const Vec& y) const
00368 {
00369     PLASSERT( y.size() == n_predicted );
00370 
00371     // TODO: 'y'[0] devrait plutot etre l'entier "index" lui-meme!
00372     int index = argmax( y );
00373 
00374     // If y != onehot( index ), then density is 0
00375     if( !is_equal( y[index], 1. ) )
00376         return 0;
00377     for( int i=0 ; i<n_predicted ; i++ )
00378         if( !is_equal( y[i], 0 ) && i != index )
00379             return 0;
00380 
00381     expectation( store_expect );
00382     return store_expect[index];
00383 }
00384 
00385 
00387 // log_density //
00389 real GaussianDBNRegression::log_density(const Vec& y) const
00390 {
00391     return pl_log( density(y) );
00392 }
00393 
00395 // survival_fn //
00397 real GaussianDBNRegression::survival_fn(const Vec& y) const
00398 {
00399     PLERROR("survival_fn not implemented for GaussianDBNRegression"); return 0;
00400 }
00401 
00403 // variance //
00405 void GaussianDBNRegression::variance(Mat& cov) const
00406 {
00407     PLERROR("variance not implemented for GaussianDBNRegression");
00408 }
00409 
00411 // makeDeepCopyFromShallowCopy //
00413 void GaussianDBNRegression::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00414 {
00415     inherited::makeDeepCopyFromShallowCopy(copies);
00416 
00417     deepCopyField(layers, copies);
00418     deepCopyField(last_layer, copies);
00419     deepCopyField(target_layer, copies);
00420     deepCopyField(params, copies);
00421     deepCopyField(input_params, copies);
00422     deepCopyField(target_params, copies);
00423     deepCopyField(training_schedule, copies);
00424 }
00425 
00427 // setPredictor //
00429 void GaussianDBNRegression::setPredictor(const Vec& predictor, bool call_parent)
00430     const
00431 {
00432     if (call_parent)
00433         inherited::setPredictor(predictor, true);
00434     // ### Add here any specific code required by your subclass.
00435 }
00436 
00438 // setPredictorPredictedSizes //
00440 bool GaussianDBNRegression::setPredictorPredictedSizes(int the_predictor_size,
00441                                                      int the_predicted_size,
00442                                                      bool call_parent)
00443 {
00444     bool sizes_have_changed = false;
00445     if (call_parent)
00446         sizes_have_changed = inherited::setPredictorPredictedSizes(
00447             the_predictor_size, the_predicted_size, true);
00448 
00449     // ### Add here any specific code required by your subclass.
00450     if( the_predictor_size >= 0 && the_predictor_size != layers[0]->size ||
00451         the_predicted_size >= 0 && the_predicted_size != target_layer->size )
00452         PLERROR( "GaussianDBNRegression::setPredictorPredictedSizes - \n"
00453                  "n_predictor should be equal to layer[0]->size (%d)\n"
00454                  "n_predicted should be equal to target_layer->size (%d).\n",
00455                  layers[0]->size, target_layer->size );
00456 
00457     n_predictor = layers[0]->size;
00458     n_predicted = target_layer->size;
00459 
00460     // Returned value.
00461     return sizes_have_changed;
00462 }
00463 
00464 
00466 // train //
00468 void GaussianDBNRegression::train()
00469 {
00470     MODULE_LOG << "train() called" << endl;
00471     // The role of the train method is to bring the learner up to
00472     // stage==nstages, updating train_stats with training costs measured
00473     // on-line in the process.
00474 
00475     /* TYPICAL CODE:
00476 
00477     static Vec input;  // static so we don't reallocate memory each time...
00478     static Vec target; // (but be careful that static means shared!)
00479     input.resize(inputsize());    // the train_set's inputsize()
00480     target.resize(targetsize());  // the train_set's targetsize()
00481     real weight;
00482 
00483     // This generic PLearner method does a number of standard stuff useful for
00484     // (almost) any learner, and return 'false' if no training should take
00485     // place. See PLearner.h for more details.
00486     if (!initTrain())
00487         return;
00488 
00489     while(stage<nstages)
00490     {
00491         // clear statistics of previous epoch
00492         train_stats->forget();
00493 
00494         //... train for 1 stage, and update train_stats,
00495         // using train_set->getExample(input, target, weight)
00496         // and train_stats->update(train_costs)
00497 
00498         ++stage;
00499         train_stats->finalize(); // finalize statistics for this epoch
00500     }
00501     */
00502 
00503     Vec input( inputsize() );
00504     Vec target( targetsize() ); // unused
00505     real weight; // unused
00506 
00507     if( !initTrain() )
00508     {
00509         MODULE_LOG << "train() aborted" << endl;
00510         return;
00511     }
00512 
00513     int nsamples = train_set->length();
00514     MODULE_LOG << "  nsamples = " << nsamples << endl;
00515 
00516     // Let's define stage and nstages:
00517     //   - 0: fresh state, nothing is done
00518     //   - 1..n_layers-2: params[stage-1] is trained
00519     //   - n_layers-1: joint_params is trained (including params[n_layers-2])
00520     //   - n_layers: after the fine tuning
00521 
00522     MODULE_LOG << "initial stage = " << stage << endl;
00523     MODULE_LOG << "objective: nstages = " << nstages << endl;
00524         
00525     // clear stats of previous epoch
00526     train_stats->forget();
00527 
00528     for(int layer=0 ; layer<n_layers-1 ; ++layer) { 
00529             
00530         MODULE_LOG << "Training parameters between layers " << layer
00531             << " and " << layer+1 << endl;
00532         
00533         // this progress bar shows the number of loops through the whole
00534         // training set
00535         ProgressBar* pb = 0;
00536               
00537         int end_stage = min( training_schedule[layer], nstages );              
00538         if( report_progress && stage < end_stage )
00539         {
00540             pb = new ProgressBar( "Training layer "+tostring(layer)+
00541                     "of" + classname(), end_stage - stage );
00542         }
00543 
00544 
00545         for( ; stage < end_stage ; stage++ )
00546         {
00547 
00548                 // sample is the index in the training set
00549                 int sample = stage % train_set->length();
00550                 train_set->getExample(sample, input, target, weight);
00551                 greedyStep( input.subVec(0, n_predictor), layer );
00552 
00553                 if( pb )
00554                 {
00555                     if( layer == 0 )
00556                         pb->update(stage + 1);
00557                     else
00558                         pb->update(stage - training_schedule[layer-1] + 1);
00559                 }
00560 
00561                 Mat inputs(train_set.length() , n_predictor) ; 
00562                 Mat outputs(train_set.length() , n_predicted);
00563                 Mat theta(1 + n_predictor , n_predicted) ; 
00564                 Vec output_value(n_predicted) ; 
00565 
00566                 for(int i=0 ; i<train_set.length() ; ++i) { 
00567                     train_set->getExample(i, input, target, weight);
00568                     // split input in predictor_part and predicted_part
00569                     splitCond(input);
00570 
00571                     // compute predicted_part expectation, conditioned on predictor_part
00572                     // (forward pass)
00573                     expectation( output_value );
00574                     for(int j=0 ; j<n_predictor ; ++j) { 
00575                         inputs[i][j] = last_layer->expectation[j] ; 
00576 //                        cout << last_layer->expectation[j] << " " ; 
00577                     }
00578                     for(int j=0 ; j<n_predicted ; ++j) { 
00579                         outputs[i][j] = input[j+n_predictor] ; 
00580                     }
00581                 }
00582 
00583 //                pout << "inputs " << endl << inputs << endl  ; 
00584                 
00585 //                pout << "outputs " << endl << outputs << endl  ; 
00586                 
00587                 linearRegression(inputs,outputs,0.0,theta); 
00588                 // init the a_i term
00589                 target_params->up_units_params[1].fill(1) ; 
00590 
00591 //                pout << "Theta" << theta << endl ; 
00592 
00593                 // set the bias (b_i)
00594                 for(int i=0 ; i<n_predicted ; ++i) { 
00595                     target_params->up_units_params[0][i] = - 2.0 * theta[i][0] ; 
00596                 }
00597 
00598                 for(int i=0 ; i<n_predicted ; ++i) { 
00599                     for(int j=0 ; j<n_predictor ; ++j) { 
00600                         target_params->weights[i][j] = -2.0 * theta[j][i+1] ; 
00601                     }
00602                 }
00603 
00604                 
00605         }
00606             
00607 
00608     }
00609 /*            
00610     MODULE_LOG << "Fine-tuning all parameters, using method "
00611     << fine_tuning_method << endl;
00612 
00613             if( fine_tuning_method == "" ) // do nothing
00614                 sample += n_samples_to_see;
00615             else if( fine_tuning_method == "EGD" )
00616             {
00617                 if( report_progress )
00618                     pb = new ProgressBar( "Training all " + classname()
00619                                           + " parameters by fine tuning",
00620                                           n_samples_to_see );
00621 
00622 */
00623                                           
00624 /*
00625 pout << "==================" << endl
00626     << "Before update:" << endl
00627     << "up:      " << joint_params->up_units_params << endl
00628     << "weights: " << endl << joint_params->weights << endl
00629     << "down:    " << joint_params->down_units_params << endl
00630     << endl;
00631 // */
00632 
00633                 // linear regression for last weights
00634                 
00635                 
00636 /*
00637                 int begin_sample = sample;
00638                 int end_sample = begin_sample + n_samples_to_see;
00639                 for( ; sample < end_sample ; sample++ )
00640                 {
00641                     // sample is the index in the training set
00642                     int i = sample % train_set->length();
00643                     train_set->getExample(i, input, target, weight);
00644                     fineTuneByGradientDescentLastLayer( input );
00645 
00646                     if( pb )
00647                         pb->update( sample - begin_sample + 1 );
00648                 }
00649 
00650                 sample = begin_sample ; 
00651                 for( ; sample < 100 ; sample++ )
00652                 {
00653                     // sample is the index in the training set
00654                     int i = sample % train_set->length();
00655                     train_set->getExample(i, input, target, weight);
00656                     fineTuneByGradientDescent( input );
00657 
00658                     if( pb )
00659                         pb->update( sample - begin_sample + 1 );
00660                 }
00661 */                
00662 
00663                 
00664 /*
00665 pout << "-------" << endl
00666     << "After update:" << endl
00667     << "up:      " << joint_params->up_units_params << endl
00668     << "weights: " << endl << joint_params->weights << endl
00669     << "down:    " << joint_params->down_units_params << endl
00670     << endl;
00671 // */
00672 
00673     train_stats->finalize(); // finalize statistics for this epoch
00674     MODULE_LOG << endl;
00675 }
00676 
00677 void GaussianDBNRegression::greedyStep( const Vec& predictor, int index )
00678 {
00679     // deterministic propagation until we reach index
00680     layers[0]->expectation << predictor;
00681 
00682     input_params->setAsDownInput( layers[0]->expectation );
00683     layers[1]->getAllActivations( (RBMQLParameters*) input_params );
00684     layers[1]->computeExpectation();
00685         
00686     for( int i=1 ; i<index ; i++ )
00687     {
00688         params[i]->setAsDownInput( layers[i]->expectation );
00689         layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
00690         layers[i+1]->computeExpectation();
00691     }
00692 
00693     // positive phase
00694     if (index == 0) {
00695         input_params->setAsDownInput( layers[index]->expectation );
00696         layers[index+1]->getAllActivations((RBMQLParameters*) input_params);
00697         layers[index+1]->computeExpectation();
00698         layers[index+1]->generateSample();
00699         if (use_sample_rather_than_expectation_in_positive_phase_statistics)
00700             input_params->accumulatePosStats(layers[index]->expectation,
00701                     layers[index+1]->sample );
00702         else
00703             input_params->accumulatePosStats(layers[index]->expectation,
00704                     layers[index+1]->expectation );
00705 
00706         // down propagation
00707         input_params->setAsUpInput( layers[index+1]->sample );
00708         layers[index]->getAllActivations( (RBMQLParameters*) input_params );
00709 
00710         // negative phase
00711         layers[index]->generateSample();
00712         input_params->setAsDownInput( layers[index]->sample );
00713         layers[index+1]->getAllActivations((RBMQLParameters*) input_params);
00714         layers[index+1]->computeExpectation();
00715         input_params->accumulateNegStats( layers[index]->sample,
00716                 layers[index+1]->expectation );
00717 
00718         // update
00719         input_params->update();
00720 
00721     }
00722     else {
00723         params[index]->setAsDownInput( layers[index]->expectation );
00724         layers[index+1]->getAllActivations((RBMLLParameters*) params[index]);
00725         layers[index+1]->computeExpectation();
00726         layers[index+1]->generateSample();
00727         if (use_sample_rather_than_expectation_in_positive_phase_statistics)
00728             params[index]->accumulatePosStats(layers[index]->expectation,
00729                     layers[index+1]->sample );
00730         else
00731             params[index]->accumulatePosStats(layers[index]->expectation,
00732                     layers[index+1]->expectation );
00733 
00734         // down propagation
00735         params[index]->setAsUpInput( layers[index+1]->sample );
00736         layers[index]->getAllActivations( (RBMLLParameters*) params[index] );
00737 
00738         // negative phase
00739         layers[index]->generateSample();
00740         params[index]->setAsDownInput( layers[index]->sample );
00741         layers[index+1]->getAllActivations((RBMLLParameters*) params[index]);
00742         layers[index+1]->computeExpectation();
00743         params[index]->accumulateNegStats( layers[index]->sample,
00744                 layers[index+1]->expectation );
00745 
00746         // update
00747         params[index]->update();
00748 
00749     }
00750     
00751 
00752 }
00753 
00754 
00755 
00756 void GaussianDBNRegression::fineTuneByGradientDescentLastLayer( const Vec& input )
00757 {
00758     // split input in predictor_part and predicted_part
00759     splitCond(input);
00760 
00761     // compute predicted_part expectation, conditioned on predictor_part
00762     // (forward pass)
00763     expectation( output_gradient );
00764 
00765     int target_size = predicted_part.size() ; 
00766 
00767     expectation_gradients[n_layers].resize(target_size) ; 
00768     
00769     for(int i=0 ; i < target_size ; ++i) { 
00770         expectation_gradients[n_layers][i] = 2 * (output_gradient[i] - predicted_part[i]) ;
00771     }
00772 
00773     target_layer->bpropUpdate( target_layer->activations,
00774                                target_layer->expectation,
00775                                activation_gradients[n_layers] ,
00776                                expectation_gradients[n_layers]) ; 
00777     
00778     target_params->bpropUpdate( layers[n_layers-1]->expectation,
00779                                target_layer->activations,
00780                                expectation_gradients[n_layers-1],
00781                                activation_gradients[n_layers] );
00782     
00783 }
00784 
00785 void GaussianDBNRegression::fineTuneByGradientDescent( const Vec& input )
00786 {
00787     // split input in predictor_part and predicted_part
00788     splitCond(input);
00789 
00790     // compute predicted_part expectation, conditioned on predictor_part
00791     // (forward pass)
00792     expectation( output_gradient );
00793 
00794     int target_size = predicted_part.size() ; 
00795 
00796     expectation_gradients[n_layers].resize(target_size) ; 
00797     
00798     for(int i=0 ; i < target_size ; ++i) { 
00799         expectation_gradients[n_layers][i] = 2 * (output_gradient[i] - predicted_part[i]) ;
00800     }
00801 
00802     target_layer->bpropUpdate( target_layer->activations,
00803                                target_layer->expectation,
00804                                activation_gradients[n_layers] ,
00805                                expectation_gradients[n_layers]) ; 
00806     
00807     target_params->bpropUpdate( layers[n_layers-1]->expectation,
00808                                target_layer->activations,
00809                                expectation_gradients[n_layers-1],
00810                                activation_gradients[n_layers] );
00811 
00812     for( int i=n_layers-1 ; i>1 ; i-- )
00813     {
00814         layers[i]->bpropUpdate( layers[i]->activations,
00815                                 layers[i]->expectation,
00816                                 activation_gradients[i],
00817                                 expectation_gradients[i] );
00818         params[i-1]->bpropUpdate( layers[i-1]->expectation,
00819                                   layers[i]->activations,
00820                                   expectation_gradients[i-1],
00821                                   activation_gradients[i] );
00822     }
00823     
00824         layers[1]->bpropUpdate( layers[1]->activations,
00825                                 layers[1]->expectation,
00826                                 activation_gradients[1],
00827                                 expectation_gradients[1] );
00828         
00829         input_params->bpropUpdate( layers[0]->expectation,
00830                                   layers[1]->activations,
00831                                   expectation_gradients[0],
00832                                   activation_gradients[1] );
00833                                   
00834 
00835 }
00836 
00837 void GaussianDBNRegression::computeCostsFromOutputs(const Vec& input,
00838                                                   const Vec& output,
00839                                                   const Vec& target,
00840                                                   Vec& costs) const
00841 {
00842     char c = outputs_def[0];
00843     if( c == 'l' || c == 'd' )
00844         inherited::computeCostsFromOutputs(input, output, target, costs);
00845     else if( c == 'e' )
00846     {
00847         costs.resize( 1 );
00848         costs[0] = .0 ; 
00849         splitCond(input);
00850         
00851         int output_size = output.length(); 
00852         for(int i=0 ; i<output_size ; ++i) { 
00853             costs[0] += square(output[i] - predicted_part[i]) ;
00854         }
00855 
00856         costs[0] /= output_size ; 
00857 
00858     }
00859 }
00860 
00861 TVec<string> GaussianDBNRegression::getTestCostNames() const
00862 {
00863     char c = outputs_def[0];
00864     TVec<string> result;
00865     if( c == 'l' || c == 'd' )
00866         result.append( "NLL" );
00867     else if( c == 'e' )
00868     {
00869         result.append( "MSE" );
00870     }
00871     return result;
00872 }
00873 
00874 } // end of namespace PLearn
00875 
00876 
00877 /*
00878   Local Variables:
00879   mode:c++
00880   c-basic-offset:4
00881   c-file-style:"stroustrup"
00882   c-file-offsets:((innamespace . 0)(inline-open . 0))
00883   indent-tabs-mode:nil
00884   fill-column:79
00885   End:
00886 */
00887 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines