PLearn 0.1
SubsamplingDBN.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // SubsamplingDBN.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin
00036 
00040 #define PL_LOG_MODULE_NAME "SubsamplingDBN"
00041 #include <plearn/io/pl_log.h>
00042 
00043 #include "SubsamplingDBN.h"
00044 
00045 #define minibatch_hack 0 // Do we force the minibatch setting? (debug hack)
00046 
00047 namespace PLearn {
00048 using namespace std;
00049 
00050 PLEARN_IMPLEMENT_OBJECT(
00051     SubsamplingDBN,
00052     "Neural network, learned layer-wise in a greedy fashion.",
00053     "This version supports different unit types, different connection types,\n"
00054     "and different cost functions, including the NLL in classification.\n");
00055 
00057 // SubsamplingDBN //
00059 SubsamplingDBN::SubsamplingDBN() :
00060     cd_learning_rate( 0. ),
00061     grad_learning_rate( 0. ),
00062     batch_size( 1 ),
00063     grad_decrease_ct( 0. ),
00064     // grad_weight_decay( 0. ),
00065     n_classes( -1 ),
00066     use_classification_cost( true ),
00067     reconstruct_layerwise( false ),
00068     independent_biases( false ),
00069     n_layers( 0 ),
00070     online ( false ),
00071     background_gibbs_update_ratio(0),
00072     gibbs_chain_reinit_freq( INT_MAX ),
00073     minibatch_size( 0 ),
00074     initialize_gibbs_chain( false ),
00075     final_module_has_learning_rate( false ),
00076     final_cost_has_learning_rate( false ),
00077     nll_cost_index( -1 ),
00078     class_cost_index( -1 ),
00079     final_cost_index( -1 ),
00080     reconstruction_cost_index( -1 ),
00081     training_cpu_time_cost_index ( -1 ),
00082     cumulative_training_time_cost_index ( -1 ),
00083     cumulative_testing_time_cost_index ( -1 ),
00084     cumulative_training_time( 0 ),
00085     cumulative_testing_time( 0 )
00086 {
00087     random_gen = new PRandom();
00088 }
00089 
00091 // declareOptions //
00093 void SubsamplingDBN::declareOptions(OptionList& ol)
00094 {
00095     declareOption(ol, "cd_learning_rate", &SubsamplingDBN::cd_learning_rate,
00096                   OptionBase::buildoption,
00097                   "The learning rate used during contrastive divergence"
00098                   " learning");
00099 
00100     declareOption(ol, "grad_learning_rate", &SubsamplingDBN::grad_learning_rate,
00101                   OptionBase::buildoption,
00102                   "The learning rate used during gradient descent");
00103 
00104     declareOption(ol, "grad_decrease_ct", &SubsamplingDBN::grad_decrease_ct,
00105                   OptionBase::buildoption,
00106                   "The decrease constant of the learning rate used during"
00107                   "gradient descent");
00108 
00109     declareOption(ol, "batch_size", &SubsamplingDBN::batch_size,
00110                   OptionBase::buildoption,
00111         "Training batch size (1=stochastic learning, 0=full batch learning).");
00112 
00113     /* NOT IMPLEMENTED YET
00114     declareOption(ol, "grad_weight_decay", &SubsamplingDBN::grad_weight_decay,
00115                   OptionBase::buildoption,
00116                   "The weight decay used during the gradient descent");
00117     */
00118 
00119     declareOption(ol, "n_classes", &SubsamplingDBN::n_classes,
00120                   OptionBase::buildoption,
00121                   "Number of classes in the training set:\n"
00122                   "  - 0 means we are doing regression,\n"
00123                   "  - 1 means we have two classes, but only one output,\n"
00124                   "  - 2 means we also have two classes, but two outputs"
00125                   " summing to 1,\n"
00126                   "  - >2 is the usual multiclass case.\n"
00127                   );
00128 
00129     declareOption(ol, "training_schedule", &SubsamplingDBN::training_schedule,
00130                   OptionBase::buildoption,
00131                   "Number of examples to use during each phase of learning:\n"
00132                   "first the greedy phases, and then the fine-tuning phase.\n"
00133                   "However, the learning will stop as soon as we reach nstages.\n"
00134                   "For example for 2 hidden layers, with 1000 examples in each\n"
00135                   "greedy phase, and 500 in the fine-tuning phase, this option\n"
00136                   "should be [1000 1000 500], and nstages should be at least 2500.\n"
00137                   "When online = true, this vector is ignored and should be empty.\n");
00138 
00139     declareOption(ol, "use_classification_cost",
00140                   &SubsamplingDBN::use_classification_cost,
00141                   OptionBase::buildoption,
00142                   "Put the class target as an extra input of the top-level RBM\n"
00143                   "and compute and maximize conditional class probability in that\n"
00144                   "top layer (probability of the correct class given the other input\n"
00145                   "of the top-level RBM, which is the output of the rest of the network.\n");
00146 
00147     declareOption(ol, "reconstruct_layerwise",
00148                   &SubsamplingDBN::reconstruct_layerwise,
00149                   OptionBase::buildoption,
00150                   "Compute reconstruction error of each layer as an auto-encoder.\n"
00151                   "This is done using cross-entropy between actual and reconstructed.\n"
00152                   "This option automatically adds the following cost names:\n"
00153                   "   layerwise_reconstruction_error (sum over all layers)\n"
00154                   "   layer0.reconstruction_error (only layers[0])\n"
00155                   "   layer1.reconstruction_error (only layers[1])\n"
00156                   "   etc.\n");
00157 
00158     declareOption(ol, "layers", &SubsamplingDBN::layers,
00159                   OptionBase::buildoption,
00160                   "The layers of units in the network (including the input layer).");
00161 
00162     declareOption(ol, "connections", &SubsamplingDBN::connections,
00163                   OptionBase::buildoption,
00164                   "The weights of the connections between the layers");
00165 
00166     declareOption(ol, "classification_module",
00167                   &SubsamplingDBN::classification_module,
00168                   OptionBase::learntoption,
00169                   "The module computing the class probabilities (if"
00170                   " use_classification_cost)\n"
00171                   );
00172 
00173     declareOption(ol, "classification_cost",
00174                   &SubsamplingDBN::classification_cost,
00175                   OptionBase::nosave,
00176                   "The module computing the classification cost function (NLL)"
00177                   " on top\n"
00178                   "of classification_module.\n"
00179                   );
00180 
00181     declareOption(ol, "joint_layer", &SubsamplingDBN::joint_layer,
00182                   OptionBase::nosave,
00183                   "Concatenation of layers[n_layers-2] and the target layer\n"
00184                   "(that is inside classification_module), if"
00185                   " use_classification_cost.\n"
00186                  );
00187 
00188     declareOption(ol, "final_module", &SubsamplingDBN::final_module,
00189                   OptionBase::buildoption,
00190                   "Optional module that takes as input the output of the last"
00191                   " layer\n"
00192                   "layers[n_layers-1), and its output is fed to final_cost,"
00193                   " and\n"
00194                   "concatenated with the one of classification_cost (if"
00195                   " present)\n"
00196                   "as output of the learner.\n"
00197                   "If it is not provided, then the last layer will directly be"
00198                   " put as\n"
00199                   "input of final_cost.\n"
00200                  );
00201 
00202     declareOption(ol, "final_cost", &SubsamplingDBN::final_cost,
00203                   OptionBase::buildoption,
00204                   "The cost function to be applied on top of the DBN (or of\n"
00205                   "final_module if provided). Its gradients will be"
00206                   " backpropagated\n"
00207                   "to final_module, then combined with the one of"
00208                   " classification_cost and\n"
00209                   "backpropagated to the layers.\n"
00210                   );
00211 
00212     declareOption(ol, "partial_costs", &SubsamplingDBN::partial_costs,
00213                   OptionBase::buildoption,
00214                   "The different cost functions to be applied on top of each"
00215                   " layer\n"
00216                   "(except the first one) of the RBM. These costs are not\n"
00217                   "back-propagated to previous layers.\n");
00218 
00219     declareOption(ol, "independent_biases",
00220                   &SubsamplingDBN::independent_biases,
00221                   OptionBase::buildoption,
00222                   "In an RBMLayer, do we want the bias during up and down\n"
00223                   "propagations to be potentially different?\n");
00224 
00225     declareOption(ol, "subsampling_modules",
00226                   &SubsamplingDBN::subsampling_modules,
00227                   OptionBase::buildoption,
00228                   "Different subsampling modules, to be applied on top of\n"
00229                   "RBMs when they're already learned. subsampling_modules[0]\n"
00230                   "is null.\n");
00231 
00232     declareOption(ol, "reduced_layers", &SubsamplingDBN::reduced_layers,
00233                   OptionBase::learntoption,
00234                   "Layers of reduced size, to be put on top of subsampling\n"
00235                   "modules If the subsampling module is null, it will be\n"
00236                   "either the same that the one in 'layers' (default), or a\n"
00237                   "copy of it (with independant biases) if\n"
00238                   "'independent_biases' is true.\n");
00239 
00240     declareOption(ol, "online", &SubsamplingDBN::online,
00241                   OptionBase::buildoption,
00242                   "If true then all unsupervised training stages (as well as\n"
00243                   "the fine-tuning stage) are done simultaneously.\n");
00244 
00245     declareOption(ol, "background_gibbs_update_ratio", &SubsamplingDBN::background_gibbs_update_ratio,
00246                   OptionBase::buildoption,
00247                   "Coefficient between 0 and 1. If non-zero, run a background Gibbs chain and use\n"
00248                   "the visible-hidden statistics to contribute in the negative phase update\n"
00249                   "(in proportion background_gibbs_update_ratio wrt the contrastive divergence\n"
00250                   "negative phase statistics). If = 1, then do not perform any contrastive\n"
00251                   "divergence negative phase (use only the Gibbs chain statistics).\n");
00252 
00253     declareOption(ol, "gibbs_chain_reinit_freq",
00254                   &SubsamplingDBN::gibbs_chain_reinit_freq,
00255                   OptionBase::buildoption,
00256                   "After how many training examples to re-initialize the Gibbs chains.\n"
00257                   "If == INT_MAX, the default value of this option, then NEVER\n"
00258                   "re-initialize except at the beginning, when stage==0.\n");
00259 
00260     declareOption(ol, "top_layer_joint_cd", &SubsamplingDBN::top_layer_joint_cd,
00261                   OptionBase::buildoption,
00262                   "Wether we do a step of joint contrastive divergence on"
00263                   " top-layer.\n"
00264                   "Only used if online for the moment.\n");
00265 
00266     declareOption(ol, "n_layers", &SubsamplingDBN::n_layers,
00267                   OptionBase::learntoption,
00268                   "Number of layers");
00269 
00270     declareOption(ol, "minibatch_size", &SubsamplingDBN::minibatch_size,
00271                   OptionBase::learntoption,
00272                   "Size of a mini-batch.");
00273 
00274     declareOption(ol, "gibbs_down_state", &SubsamplingDBN::gibbs_down_state,
00275                   OptionBase::learntoption,
00276                   "State of visible units of RBMs at each layer in background Gibbs chain.");
00277 
00278     declareOption(ol, "cumulative_training_time", &SubsamplingDBN::cumulative_training_time,
00279                   OptionBase::learntoption | OptionBase::nosave,
00280                   "Cumulative training time since age=0, in seconds.\n");
00281 
00282     declareOption(ol, "cumulative_testing_time", &SubsamplingDBN::cumulative_testing_time,
00283                   OptionBase::learntoption | OptionBase::nosave,
00284                   "Cumulative testing time since age=0, in seconds.\n");
00285 
00286 
00287     /*
00288     declareOption(ol, "n_final_costs", &SubsamplingDBN::n_final_costs,
00289                   OptionBase::learntoption,
00290                   "Number of final costs");
00291      */
00292 
00293     /*
00294     declareOption(ol, "", &SubsamplingDBN::,
00295                   OptionBase::learntoption,
00296                   "");
00297      */
00298 
00299     // Now call the parent class' declareOptions
00300     inherited::declareOptions(ol);
00301 }
00302 
00304 // build_ //
00306 void SubsamplingDBN::build_()
00307 {
00308     PLASSERT( batch_size >= 0 );
00309 
00310     MODULE_LOG << "build_() called" << endl;
00311 
00312     // Initialize some learnt variables
00313     if (layers.isEmpty())
00314         PLERROR("In SubsamplingDBN::build_ - You must provide at least one RBM "
00315                 "layer through the 'layers' option");
00316     else
00317         n_layers = layers.length();
00318 
00319     if( !online )
00320     {
00321         if( training_schedule.length() != n_layers )
00322         {
00323             PLWARNING("In SubsamplingDBN::build_ - training_schedule.length() "
00324                     "!= n_layers, resizing and zeroing");
00325             training_schedule.resize( n_layers );
00326             training_schedule.fill( 0 );
00327         }
00328 
00329         cumulative_schedule.resize( n_layers+1 );
00330         cumulative_schedule[0] = 0;
00331         for( int i=0 ; i<n_layers ; i++ )
00332         {
00333             cumulative_schedule[i+1] = cumulative_schedule[i] +
00334                 training_schedule[i];
00335         }
00336     }
00337 
00338     build_layers_and_connections();
00339 
00340     // Activate the profiler
00341     Profiler::activate();
00342 
00343     build_costs();
00344 }
00345 
00347 // build_costs //
00349 void SubsamplingDBN::build_costs()
00350 {
00351     cost_names.resize(0);
00352     int current_index = 0;
00353 
00354     // build the classification module, its cost and the joint layer
00355     if( use_classification_cost )
00356     {
00357         PLASSERT( n_classes >= 2 );
00358         build_classification_cost();
00359 
00360         cost_names.append("NLL");
00361         nll_cost_index = current_index;
00362         current_index++;
00363 
00364         cost_names.append("class_error");
00365         class_cost_index = current_index;
00366         current_index++;
00367     }
00368 
00369     if( final_cost )
00370     {
00371         build_final_cost();
00372 
00373         TVec<string> final_names = final_cost->name();
00374         int n_final_costs = final_names.length();
00375 
00376         for( int i=0; i<n_final_costs; i++ )
00377             cost_names.append("final." + final_names[i]);
00378 
00379         final_cost_index = current_index;
00380         current_index += n_final_costs;
00381     }
00382 
00383     if( partial_costs )
00384     {
00385         int n_partial_costs = partial_costs.length();
00386         partial_costs_indices.resize(n_partial_costs);
00387 
00388         for( int i=0; i<n_partial_costs; i++ )
00389             if( partial_costs[i] )
00390             {
00391                 TVec<string> names = partial_costs[i]->name();
00392                 int n_partial_costs_i = names.length();
00393                 for( int j=0; j<n_partial_costs_i; j++ )
00394                     cost_names.append("partial"+tostring(i)+"."+names[j]);
00395                 partial_costs_indices[i] = current_index;
00396                 current_index += n_partial_costs_i;
00397 
00398                 // Share random_gen with partial_costs[i], unless it already
00399                 // has one
00400                 if( !(partial_costs[i]->random_gen) )
00401                 {
00402                     partial_costs[i]->random_gen = random_gen;
00403                     partial_costs[i]->forget();
00404                 }
00405             }
00406             else
00407                 partial_costs_indices[i] = -1;
00408     }
00409     else
00410         partial_costs_indices.resize(0);
00411 
00412     if( reconstruct_layerwise )
00413     {
00414         reconstruction_costs.resize(n_layers);
00415 
00416         cost_names.append("layerwise_reconstruction_error");
00417         reconstruction_cost_index = current_index;
00418         current_index++;
00419 
00420         for( int i=0; i<n_layers-1; i++ )
00421             cost_names.append("layer"+tostring(i)+".reconstruction_error");
00422         current_index += n_layers-1;
00423     }
00424     else
00425         reconstruction_costs.resize(0);
00426 
00427 
00428     cost_names.append("cpu_time");
00429     cost_names.append("cumulative_train_time");
00430     cost_names.append("cumulative_test_time");
00431 
00432     training_cpu_time_cost_index = current_index;
00433     current_index++;
00434     cumulative_training_time_cost_index = current_index;
00435     current_index++;
00436     cumulative_testing_time_cost_index = current_index;
00437     current_index++;
00438 
00439     PLASSERT( current_index == cost_names.length() );
00440 }
00441 
00443 // build_layers_and_connections //
00445 void SubsamplingDBN::build_layers_and_connections()
00446 {
00447     MODULE_LOG << "build_layers_and_connections() called" << endl;
00448 
00449     if( connections.length() != n_layers-1 )
00450         PLERROR("SubsamplingDBN::build_layers_and_connections() - \n"
00451                 "connections.length() (%d) != n_layers-1 (%d).\n",
00452                 connections.length(), n_layers-1);
00453 
00454     if( subsampling_modules.length() == 0 )
00455         subsampling_modules.resize(n_layers-1);
00456     if( subsampling_modules.length() != n_layers-1 )
00457         PLERROR("SubsamplingDBN::build_layers_and_connections() - \n"
00458                 "subsampling_modules.length() (%d) != n_layers-1 (%d).\n",
00459                 subsampling_modules.length(), n_layers-1);
00460 
00461     if( inputsize_ >= 0 )
00462         PLASSERT( layers[0]->size == inputsize() );
00463 
00464     activation_gradients.resize( n_layers );
00465     activations_gradients.resize( n_layers );
00466     expectation_gradients.resize( n_layers );
00467     expectations_gradients.resize( n_layers );
00468     subsampling_gradients.resize( n_layers );
00469     gibbs_down_state.resize( n_layers-1 );
00470 
00471     reduced_layers.resize(n_layers-1);
00472 
00473     for( int i=0 ; i<n_layers-1 ; i++ )
00474     {
00475         if( !(reduced_layers[i]) )
00476         {
00477             if( (independent_biases || subsampling_modules[i]) && i!=0 )
00478             {
00479                 CopiesMap map;
00480                 reduced_layers[i] = layers[i]->deepCopy(map);
00481 
00482                 if( subsampling_modules[i] )
00483                 {
00484                     reduced_layers[i]->size =
00485                         subsampling_modules[i]->output_size;
00486                     reduced_layers[i]->build();
00487                 }
00488             }
00489             else
00490                 reduced_layers[i] = layers[i];
00491         }
00492 
00493         if( subsampling_modules[i] )
00494         {
00495             if( layers[i]->size != subsampling_modules[i]->input_size )
00496                 PLERROR("SubsamplingDBN::build_layers_and_connections() - \n"
00497                         "layers[%i]->size (%d) != subsampling_modules[%i]->input_size (%d)."
00498                         "\n", i, layers[i]->size, i,
00499                         subsampling_modules[i]->input_size);
00500         }
00501         else
00502         {
00503             if( layers[i]->size != reduced_layers[i]->size )
00504                 PLERROR("SubsamplingDBN::build_layers_and_connections() - \n"
00505                         "layers[%i]->size (%d) != reduced_layers[%i]->size (%d)."
00506                         "\n", i, layers[i]->size, i, reduced_layers[i]->size);
00507         }
00508 
00509         if( reduced_layers[i]->size != connections[i]->down_size )
00510             PLERROR("SubsamplingDBN::build_layers_and_connections() - \n"
00511                     "reduced_layers[%i]->size (%d) != connections[%i]->down_size (%d)."
00512                     "\n", i, reduced_layers[i]->size, i, connections[i]->down_size);
00513 
00514         if( connections[i]->up_size != layers[i+1]->size )
00515             PLERROR("SubsamplingDBN::build_layers_and_connections() - \n"
00516                     "connections[%i]->up_size (%d) != layers[%i]->size (%d)."
00517                     "\n", i, connections[i]->up_size, i+1, layers[i+1]->size);
00518 
00519         // Assign random_gen to layers[i] and connections[i], unless they
00520         // already have one
00521         if( !(layers[i]->random_gen) )
00522         {
00523             layers[i]->random_gen = random_gen;
00524             layers[i]->forget();
00525         }
00526         if( !(reduced_layers[i]->random_gen) )
00527         {
00528             reduced_layers[i]->random_gen = random_gen;
00529             reduced_layers[i]->forget();
00530         }
00531         if( !(connections[i]->random_gen) )
00532         {
00533             connections[i]->random_gen = random_gen;
00534             connections[i]->forget();
00535         }
00536 
00537         activation_gradients[i].resize( layers[i]->size );
00538         expectation_gradients[i].resize( layers[i]->size );
00539         subsampling_gradients[i].resize( reduced_layers[i]->size );
00540     }
00541     if( !(layers[n_layers-1]->random_gen) )
00542     {
00543         layers[n_layers-1]->random_gen = random_gen;
00544         layers[n_layers-1]->forget();
00545     }
00546     int last_layer_size = layers[n_layers-1]->size;
00547     PLASSERT_MSG(last_layer_size >= 0,
00548                  "Size of last layer must be non-negative");
00549     activation_gradients[n_layers-1].resize(last_layer_size);
00550     expectation_gradients[n_layers-1].resize(last_layer_size);
00551 }
00552 
00554 // build_classification_cost //
00556 void SubsamplingDBN::build_classification_cost()
00557 {
00558     MODULE_LOG << "build_classification_cost() called" << endl;
00559 
00560     PLERROR( "classification_cost doesn't work with subsampling yet" );
00561     PLASSERT_MSG(batch_size == 1, "SubsamplingDBN::build_classification_cost - "
00562             "This method has not been verified yet for minibatch "
00563             "compatibility");
00564 
00565     PP<RBMMatrixConnection> last_to_target = new RBMMatrixConnection();
00566     last_to_target->up_size = layers[n_layers-1]->size;
00567     last_to_target->down_size = n_classes;
00568     last_to_target->random_gen = random_gen;
00569     last_to_target->build();
00570 
00571     PP<RBMMultinomialLayer> target_layer = new RBMMultinomialLayer();
00572     target_layer->size = n_classes;
00573     target_layer->random_gen = random_gen;
00574     target_layer->build();
00575 
00576     PLASSERT_MSG(n_layers >= 2, "You must specify at least two layers (the "
00577             "input layer and one hidden layer)");
00578 
00579     classification_module = new RBMClassificationModule();
00580     classification_module->previous_to_last = connections[n_layers-2];
00581     classification_module->last_layer =
00582         (RBMBinomialLayer*) (RBMLayer*) layers[n_layers-1];
00583     classification_module->last_to_target = last_to_target;
00584     classification_module->target_layer = target_layer;
00585     classification_module->random_gen = random_gen;
00586     classification_module->build();
00587 
00588     classification_cost = new NLLCostModule();
00589     classification_cost->input_size = n_classes;
00590     classification_cost->target_size = 1;
00591     classification_cost->build();
00592 
00593     joint_layer = new RBMMixedLayer();
00594     joint_layer->sub_layers.resize( 2 );
00595     joint_layer->sub_layers[0] = layers[ n_layers-2 ];
00596     joint_layer->sub_layers[1] = target_layer;
00597     joint_layer->random_gen = random_gen;
00598     joint_layer->build();
00599 }
00600 
00602 // build_final_cost //
00604 void SubsamplingDBN::build_final_cost()
00605 {
00606     MODULE_LOG << "build_final_cost() called" << endl;
00607 
00608     PLASSERT_MSG(final_cost->input_size >= 0, "The input size of the final "
00609             "cost must be non-negative");
00610 
00611     final_cost_gradient.resize( final_cost->input_size );
00612     final_cost->setLearningRate( grad_learning_rate );
00613 
00614     if( final_module )
00615     {
00616         if( layers[n_layers-1]->size != final_module->input_size )
00617             PLERROR("SubsamplingDBN::build_final_cost() - "
00618                     "layers[%i]->size (%d) != final_module->input_size (%d)."
00619                     "\n", n_layers-1, layers[n_layers-1]->size,
00620                     final_module->input_size);
00621 
00622         if( final_module->output_size != final_cost->input_size )
00623             PLERROR("SubsamplingDBN::build_final_cost() - "
00624                     "final_module->output_size (%d) != final_cost->input_size."
00625                     "\n", n_layers-1, layers[n_layers-1]->size,
00626                     final_module->input_size);
00627 
00628         final_module->setLearningRate( grad_learning_rate );
00629 
00630         // Share random_gen with final_module, unless it already has one
00631         if( !(final_module->random_gen) )
00632         {
00633             final_module->random_gen = random_gen;
00634             final_module->forget();
00635         }
00636     }
00637     else
00638     {
00639         if( layers[n_layers-1]->size != final_cost->input_size )
00640             PLERROR("SubsamplingDBN::build_final_cost() - "
00641                     "layers[%i]->size (%d) != final_cost->input_size (%d)."
00642                     "\n", n_layers-1, layers[n_layers-1]->size,
00643                     final_cost->input_size);
00644     }
00645 
00646     // check target size and final_cost->input_size
00647     if( n_classes == 0 ) // regression
00648     {
00649         if( final_cost->input_size != targetsize() )
00650             PLERROR("SubsamplingDBN::build_final_cost() - "
00651                     "final_cost->input_size (%d) != targetsize() (%d), "
00652                     "although we are doing regression (n_classes == 0).\n",
00653                     final_cost->input_size, targetsize());
00654     }
00655     else
00656     {
00657         if( final_cost->input_size != n_classes )
00658             PLERROR("SubsamplingDBN::build_final_cost() - "
00659                     "final_cost->input_size (%d) != n_classes (%d), "
00660                     "although we are doing classification (n_classes != 0).\n",
00661                     final_cost->input_size, n_classes);
00662 
00663         if( targetsize_ >= 0 && targetsize() != 1 )
00664             PLERROR("SubsamplingDBN::build_final_cost() - "
00665                     "targetsize() (%d) != 1, "
00666                     "although we are doing classification (n_classes != 0).\n",
00667                     targetsize());
00668     }
00669 
00670     // Share random_gen with final_cost, unless it already has one
00671     if( !(final_cost->random_gen) )
00672     {
00673         final_cost->random_gen = random_gen;
00674         final_cost->forget();
00675     }
00676 }
00677 
00679 // build //
00681 void SubsamplingDBN::build()
00682 {
00683     inherited::build();
00684     build_();
00685 }
00686 
00688 // makeDeepCopyFromShallowCopy //
00690 void SubsamplingDBN::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00691 {
00692     inherited::makeDeepCopyFromShallowCopy(copies);
00693 
00694     deepCopyField(training_schedule,        copies);
00695     deepCopyField(layers,                   copies);
00696     deepCopyField(connections,              copies);
00697     deepCopyField(final_module,             copies);
00698     deepCopyField(final_cost,               copies);
00699     deepCopyField(partial_costs,            copies);
00700     deepCopyField(subsampling_modules,      copies);
00701     deepCopyField(classification_module,    copies);
00702     deepCopyField(cost_names,               copies);
00703     deepCopyField(reduced_layers,           copies);
00704     deepCopyField(timer,                    copies);
00705     deepCopyField(classification_cost,      copies);
00706     deepCopyField(joint_layer,              copies);
00707     deepCopyField(activation_gradients,     copies);
00708     deepCopyField(activations_gradients,    copies);
00709     deepCopyField(expectation_gradients,    copies);
00710     deepCopyField(expectations_gradients,   copies);
00711     deepCopyField(subsampling_gradients,    copies);
00712     deepCopyField(final_cost_input,         copies);
00713     deepCopyField(final_cost_inputs,        copies);
00714     deepCopyField(final_cost_value,         copies);
00715     deepCopyField(final_cost_values,        copies);
00716     deepCopyField(final_cost_output,        copies);
00717     deepCopyField(class_output,             copies);
00718     deepCopyField(class_gradient,           copies);
00719     deepCopyField(final_cost_gradient,      copies);
00720     deepCopyField(final_cost_gradients,     copies);
00721     deepCopyField(save_layer_activation,    copies);
00722     deepCopyField(save_layer_expectation,   copies);
00723     deepCopyField(save_layer_activations,   copies);
00724     deepCopyField(save_layer_expectations,  copies);
00725     deepCopyField(pos_down_val,             copies);
00726     deepCopyField(pos_up_val,               copies);
00727     deepCopyField(cd_neg_up_vals,           copies);
00728     deepCopyField(cd_neg_down_vals,         copies);
00729     deepCopyField(gibbs_down_state,         copies);
00730     deepCopyField(optimized_costs,          copies);
00731     deepCopyField(reconstruction_costs,     copies);
00732     deepCopyField(partial_costs_indices,    copies);
00733     deepCopyField(cumulative_schedule,      copies);
00734     deepCopyField(layer_input,              copies);
00735     deepCopyField(layer_inputs,             copies);
00736 }
00737 
00738 
00740 // outputsize //
00742 int SubsamplingDBN::outputsize() const
00743 {
00744     int out_size = 0;
00745     if( use_classification_cost )
00746         out_size += n_classes;
00747 
00748     if( final_module )
00749         out_size += final_module->output_size;
00750     else
00751         out_size += layers[n_layers-1]->size;
00752 
00753     return out_size;
00754 }
00755 
00757 // forget //
00759 void SubsamplingDBN::forget()
00760 {
00761     inherited::forget();
00762 
00763     for( int i=0 ; i<n_layers ; i++ )
00764         layers[i]->forget();
00765 
00766     for( int i=0 ; i<n_layers-1 ; i++ )
00767     {
00768         reduced_layers[i]->forget();
00769         connections[i]->forget();
00770     }
00771 
00772     if( use_classification_cost )
00773     {
00774         classification_cost->forget();
00775         classification_module->forget();
00776     }
00777 
00778     if( final_module )
00779         final_module->forget();
00780 
00781     if( final_cost )
00782         final_cost->forget();
00783 
00784     if( !partial_costs.isEmpty() )
00785         for( int i=0 ; i<n_layers-1 ; i++ )
00786             if( partial_costs[i] )
00787                 partial_costs[i]->forget();
00788 
00789     cumulative_training_time = 0;
00790     cumulative_testing_time = 0;
00791 }
00792 
00794 // train //
00796 void SubsamplingDBN::train()
00797 {
00798     MODULE_LOG << "train() called " << endl;
00799 
00800     if (!online)
00801     {
00802         // Enforce value of cumulative_schedule because build_() might
00803         // not be called if we change training_schedule inside a HyperLearner
00804         for( int i=0 ; i<n_layers ; i++ )
00805             cumulative_schedule[i+1] = cumulative_schedule[i] +
00806                 training_schedule[i];
00807     }
00808 
00809     MODULE_LOG << "  training_schedule = " << training_schedule << endl;
00810     MODULE_LOG << "  cumulative_schedule = " << cumulative_schedule << endl;
00811     MODULE_LOG << "stage = " << stage
00812         << ", target nstages = " << nstages << endl;
00813 
00814     PLASSERT( train_set );
00815     if (stage == 0) {
00816         // Training set-dependent initialization.
00817         minibatch_size = batch_size > 0 ? batch_size : train_set->length();
00818         for (int i = 0 ; i < n_layers; i++) {
00819             activations_gradients[i].resize(minibatch_size, layers[i]->size);
00820             expectations_gradients[i].resize(minibatch_size, layers[i]->size);
00821 
00822             if (background_gibbs_update_ratio>0 && i<n_layers-1)
00823                 gibbs_down_state[i].resize(minibatch_size, layers[i]->size);
00824         }
00825         if (final_cost)
00826             final_cost_gradients.resize(minibatch_size, final_cost->input_size);
00827         optimized_costs.resize(minibatch_size);
00828     }
00829 
00830     Vec input( inputsize() );
00831     Vec target( targetsize() );
00832     real weight; // unused
00833     Mat inputs(minibatch_size, inputsize());
00834     Mat targets(minibatch_size, targetsize());
00835     Vec weights;
00836 
00837     TVec<string> train_cost_names = getTrainCostNames() ;
00838     Vec train_costs( train_cost_names.length() );
00839     Mat train_costs_m(minibatch_size, train_cost_names.length());
00840     train_costs.fill(MISSING_VALUE) ;
00841     train_costs_m.fill(MISSING_VALUE);
00842 
00843     int nsamples = train_set->length();
00844 
00845     if( !initTrain() )
00846     {
00847         MODULE_LOG << "train() aborted" << endl;
00848         return;
00849     }
00850 
00851     PP<ProgressBar> pb;
00852 
00853     // Start the actual time counting
00854     Profiler::reset("training");
00855     Profiler::start("training");
00856 
00857     // clear stats of previous epoch
00858     train_stats->forget();
00859 
00860     if (online)
00861     {
00862         PLERROR( "subsampling is not working yet with online" );
00863         // Train all layers simultaneously AND fine-tuning as well!
00864         if( report_progress && stage < nstages )
00865             pb = new ProgressBar( "Training "+classname(),
00866                                   nstages - stage );
00867 
00868         for( ; stage<nstages; stage++)
00869         {
00870             initialize_gibbs_chain=(stage%gibbs_chain_reinit_freq==0);
00871             // Do a step every 'minibatch_size' examples.
00872             if (stage % minibatch_size == 0) {
00873                 int sample_start = stage % nsamples;
00874                 if (batch_size > 1 || minibatch_hack) {
00875                     train_set->getExamples(sample_start, minibatch_size,
00876                                            inputs, targets, weights, NULL, true);
00877                     train_costs_m.fill(MISSING_VALUE);
00878                     if (reconstruct_layerwise)
00879                         train_costs_m.column(reconstruction_cost_index).clear();
00880                     onlineStep( inputs, targets, train_costs_m );
00881                 } else {
00882                     train_set->getExample(sample_start, input, target, weight);
00883                     onlineStep( input, target, train_costs );
00884                 }
00885             }
00886             if( pb )
00887                 pb->update( stage + 1 );
00888         }
00889     }
00890     else // Greedy learning, one layer at a time.
00891     {
00892         /***** initial greedy training *****/
00893         for( int i=0 ; i<n_layers-1 ; i++ )
00894         {
00895             if( use_classification_cost && i == n_layers-2 )
00896                 break; // we will do a joint supervised learning instead
00897 
00898             int end_stage = min(cumulative_schedule[i+1], nstages);
00899             if( stage >= end_stage )
00900                 continue;
00901 
00902             MODULE_LOG << "Training connection weights between layers " << i
00903                        << " and " << i+1 << endl;
00904             MODULE_LOG << "  stage = " << stage << endl;
00905             MODULE_LOG << "  end_stage = " << end_stage << endl;
00906             MODULE_LOG << "  cd_learning_rate = " << cd_learning_rate << endl;
00907 
00908             if( report_progress )
00909                 pb = new ProgressBar( "Training layer "+tostring(i)
00910                                       +" of "+classname(),
00911                                       end_stage - stage );
00912 
00913             reduced_layers[i]->setLearningRate( cd_learning_rate );
00914             connections[i]->setLearningRate( cd_learning_rate );
00915             layers[i+1]->setLearningRate( cd_learning_rate );
00916 
00917             for( ; stage<end_stage ; stage++ )
00918             {
00919                 initialize_gibbs_chain=(stage%gibbs_chain_reinit_freq==0);
00920                 // Do a step every 'minibatch_size' examples.
00921                 if (stage % minibatch_size == 0) {
00922                     int sample_start = stage % nsamples;
00923                     if (batch_size > 1 || minibatch_hack) {
00924                         train_set->getExamples(sample_start, minibatch_size,
00925                                 inputs, targets, weights, NULL, true);
00926                         train_costs_m.fill(MISSING_VALUE);
00927                         if (reconstruct_layerwise)
00928                             train_costs_m.column(reconstruction_cost_index).clear();
00929                         greedyStep( inputs, targets, i , train_costs_m);
00930                         for (int k = 0; k < minibatch_size; k++)
00931                             train_stats->update(train_costs_m(k));
00932                     } else {
00933                         train_set->getExample(sample_start, input, target, weight);
00934                         greedyStep( input, target, i );
00935                     }
00936 
00937                 }
00938                 if( pb )
00939                     pb->update( stage - cumulative_schedule[i] + 1 );
00940             }
00941         }
00942 
00943         // possible supervised part
00944         int end_stage = min(cumulative_schedule[n_layers-1], nstages);
00945         if( use_classification_cost && (stage < end_stage) )
00946         {
00947             PLASSERT_MSG(batch_size == 1, "'use_classification_cost' code not "
00948                     "verified with mini-batch learning yet");
00949 
00950             MODULE_LOG << "Training the classification module" << endl;
00951             MODULE_LOG << "  stage = " << stage << endl;
00952             MODULE_LOG << "  end_stage = " << end_stage << endl;
00953             MODULE_LOG << "  cd_learning_rate = " << cd_learning_rate << endl;
00954 
00955             if( report_progress )
00956                 pb = new ProgressBar( "Training the classification module",
00957                                       end_stage - stage );
00958 
00959             // set appropriate learning rate
00960             joint_layer->setLearningRate( cd_learning_rate );
00961             classification_module->joint_connection->setLearningRate(
00962                 cd_learning_rate );
00963             layers[ n_layers-1 ]->setLearningRate( cd_learning_rate );
00964 
00965             int previous_stage = cumulative_schedule[n_layers-2];
00966             for( ; stage<end_stage ; stage++ )
00967             {
00968                 initialize_gibbs_chain=(stage%gibbs_chain_reinit_freq==0);
00969                 int sample = stage % nsamples;
00970                 train_set->getExample( sample, input, target, weight );
00971                 jointGreedyStep( input, target );
00972 
00973                 if( pb )
00974                     pb->update( stage - previous_stage + 1 );
00975             }
00976         }
00977 
00978 
00979         /***** fine-tuning by gradient descent *****/
00980         end_stage = min(cumulative_schedule[n_layers], nstages);
00981         if( stage >= end_stage )
00982             return;
00983         MODULE_LOG << "Fine-tuning all parameters, by gradient descent" << endl;
00984         MODULE_LOG << "  stage = " << stage << endl;
00985         MODULE_LOG << "  end_stage = " << end_stage << endl;
00986         MODULE_LOG << "  grad_learning_rate = " << grad_learning_rate << endl;
00987 
00988         int init_stage = stage;
00989         if( report_progress )
00990             pb = new ProgressBar( "Fine-tuning parameters of all layers of "
00991                                   + classname(),
00992                                   end_stage - init_stage );
00993 
00994         setLearningRate( grad_learning_rate );
00995 
00996         train_stats->forget();
00997         bool update_stats = false;
00998         for( ; stage<end_stage ; stage++ )
00999         {
01000 
01001             // Update every 'minibatch_size' samples.
01002             if (stage % minibatch_size == 0) {
01003                 int sample_start = stage % nsamples;
01004                 // Only update train statistics for the last 'epoch', i.e. last
01005                 // 'nsamples' seen.
01006                 update_stats = update_stats || stage >= end_stage - nsamples;
01007 
01008                 if( !fast_exact_is_equal( grad_decrease_ct, 0. ) )
01009                     setLearningRate( grad_learning_rate
01010                             / (1. + grad_decrease_ct * (stage - init_stage) ) );
01011 
01012                 if (minibatch_size > 1 || minibatch_hack) {
01013                     train_set->getExamples(sample_start, minibatch_size, inputs,
01014                             targets, weights, NULL, true);
01015                     train_costs_m.fill(MISSING_VALUE);
01016                     fineTuningStep(inputs, targets, train_costs_m);
01017                 } else {
01018                     train_set->getExample( sample_start, input, target, weight );
01019                     fineTuningStep( input, target, train_costs );
01020                 }
01021                 if (update_stats)
01022                     if (minibatch_size > 1 || minibatch_hack)
01023                         for (int k = 0; k < minibatch_size; k++)
01024                             train_stats->update(train_costs_m(k));
01025                     else
01026                         train_stats->update( train_costs );
01027 
01028             }
01029             if( pb )
01030                 pb->update( stage - init_stage + 1 );
01031         }
01032     }
01033 
01034     Profiler::end("training");
01035     // The report is pretty informative and therefore quite verbose.
01036     if (verbosity > 1)
01037         Profiler::report(cout);
01038 
01039     const Profiler::Stats& stats = Profiler::getStats("training");
01040     real ticksPerSec = Profiler::ticksPerSecond();
01041     real cpu_time = (stats.user_duration+stats.system_duration)/ticksPerSec;
01042     cumulative_training_time += cpu_time;
01043 
01044     if (verbosity > 1)
01045         cout << "The cumulative time spent in train() up until now is " << cumulative_training_time << " cpu seconds" << endl;
01046 
01047     train_costs_m.column(training_cpu_time_cost_index).fill(cpu_time);
01048     train_costs_m.column(cumulative_training_time_cost_index).fill(cumulative_training_time);
01049     train_stats->update( train_costs_m );
01050     train_stats->finalize();
01051 
01052 }
01053 
01055 // onlineStep //
01057 void SubsamplingDBN::onlineStep( const Vec& input, const Vec& target,
01058                                 Vec& train_costs)
01059 {
01060     PLASSERT(batch_size == 1);
01061 
01062     TVec<Vec> cost;
01063     if (!partial_costs.isEmpty())
01064         cost.resize(n_layers-1);
01065 
01066     layers[0]->expectation << input;
01067     // FORWARD PHASE
01068     //Vec layer_input;
01069     for( int i=0 ; i<n_layers-1 ; i++ )
01070     {
01071         // mean-field fprop from layer i to layer i+1
01072         connections[i]->setAsDownInput( layers[i]->expectation );
01073         // this does the actual matrix-vector computation
01074         layers[i+1]->getAllActivations( connections[i] );
01075         layers[i+1]->computeExpectation();
01076 
01077         // propagate into local cost associated to output of layer i+1
01078         if( !partial_costs.isEmpty() && partial_costs[ i ] )
01079         {
01080             partial_costs[ i ]->fprop( layers[ i+1 ]->expectation,
01081                                        target, cost[i] );
01082 
01083             // Backward pass
01084             // first time we set these gradients: do not accumulate
01085             partial_costs[ i ]->bpropUpdate( layers[ i+1 ]->expectation,
01086                                              target, cost[i][0],
01087                                              expectation_gradients[ i+1 ] );
01088 
01089             train_costs.subVec(partial_costs_indices[i], cost[i].length())
01090                 << cost[i];
01091         }
01092         else
01093             expectation_gradients[i+1].clear();
01094     }
01095 
01096     // top layer may be connected to a final_module followed by a
01097     // final_cost and / or may be used to predict class probabilities
01098     // through a joint classification_module
01099 
01100     if ( final_cost )
01101     {
01102         if( final_module )
01103         {
01104                 final_module->fprop( layers[ n_layers-1 ]->expectation,
01105                         final_cost_input );
01106                 final_cost->fprop( final_cost_input, target,
01107                         final_cost_value );
01108                 final_cost->bpropUpdate( final_cost_input, target,
01109                         final_cost_value[0],
01110                         final_cost_gradient );
01111 
01112                 final_module->bpropUpdate(
01113                         layers[ n_layers-1 ]->expectation,
01114                         final_cost_input,
01115                         expectation_gradients[ n_layers-1 ],
01116                         final_cost_gradient, true );
01117         }
01118         else
01119         {
01120                 final_cost->fprop( layers[ n_layers-1 ]->expectation,
01121                         target,
01122                         final_cost_value );
01123                 final_cost->bpropUpdate( layers[ n_layers-1 ]->expectation,
01124                         target, final_cost_value[0],
01125                         expectation_gradients[n_layers-1],
01126                         true);
01127         }
01128 
01129         train_costs.subVec(final_cost_index, final_cost_value.length())
01130             << final_cost_value;
01131     }
01132 
01133     if (final_cost || (!partial_costs.isEmpty() && partial_costs[n_layers-2]))
01134     {
01135         layers[n_layers-1]->setLearningRate( grad_learning_rate );
01136         connections[n_layers-2]->setLearningRate( grad_learning_rate );
01137 
01138         layers[ n_layers-1 ]->bpropUpdate( layers[ n_layers-1 ]->activation,
01139                                            layers[ n_layers-1 ]->expectation,
01140                                            activation_gradients[ n_layers-1 ],
01141                                            expectation_gradients[ n_layers-1 ],
01142                                            false);
01143 
01144         connections[ n_layers-2 ]->bpropUpdate(
01145             layers[ n_layers-2 ]->expectation,
01146             layers[ n_layers-1 ]->activation,
01147             expectation_gradients[ n_layers-2 ],
01148             activation_gradients[ n_layers-1 ],
01149             true);
01150         // accumulate into expectation_gradients[n_layers-2]
01151         // because a partial cost may have already put a gradient there
01152     }
01153 
01154     if( use_classification_cost )
01155     {
01156         classification_module->fprop( layers[ n_layers-2 ]->expectation,
01157                                       class_output );
01158         real nll_cost;
01159 
01160         // This doesn't work. gcc bug?
01161         // classification_cost->fprop( class_output, target, cost );
01162         classification_cost->CostModule::fprop( class_output, target,
01163                                                 nll_cost );
01164 
01165         real class_error =
01166             ( argmax(class_output) == (int) round(target[0]) ) ? 0: 1;
01167 
01168         train_costs[nll_cost_index] = nll_cost;
01169         train_costs[class_cost_index] = class_error;
01170 
01171         classification_cost->bpropUpdate( class_output, target, nll_cost,
01172                                           class_gradient );
01173 
01174         classification_module->bpropUpdate( layers[ n_layers-2 ]->expectation,
01175                                             class_output,
01176                                             expectation_gradients[n_layers-2],
01177                                             class_gradient,
01178                                             true );
01179         if( top_layer_joint_cd )
01180         {
01181             // set the input of the joint layer
01182             Vec target_exp = classification_module->target_layer->expectation;
01183             fill_one_hot( target_exp, (int) round(target[0]), real(0.), real(1.) );
01184 
01185             joint_layer->setLearningRate( cd_learning_rate );
01186             layers[ n_layers-1 ]->setLearningRate( cd_learning_rate );
01187             classification_module->joint_connection->setLearningRate(
01188                 cd_learning_rate );
01189 
01190             save_layer_activation.resize(layers[ n_layers-2 ]->size);
01191             save_layer_activation << layers[ n_layers-2 ]->activation;
01192             save_layer_expectation.resize(layers[ n_layers-2 ]->size);
01193             save_layer_expectation << layers[ n_layers-2 ]->expectation;
01194 
01195             contrastiveDivergenceStep(
01196                 get_pointer(joint_layer),
01197                 get_pointer(classification_module->joint_connection),
01198                 layers[ n_layers-1 ], n_layers-2);
01199 
01200             layers[ n_layers-2 ]->activation << save_layer_activation;
01201             layers[ n_layers-2 ]->expectation << save_layer_expectation;
01202         }
01203     }
01204 
01205     // DOWNWARD PHASE (the downward phase for top layer is already done above,
01206     // except for the contrastive divergence step in the case where either
01207     // 'use_classification_cost' or 'top_layer_joint_cd' is false).
01208     for( int i=n_layers-2 ; i>=0 ; i-- )
01209     {
01210         if (i <= n_layers - 3) {
01211         connections[ i ]->setLearningRate( grad_learning_rate );
01212         layers[ i+1 ]->setLearningRate( grad_learning_rate );
01213 
01214         layers[i+1]->bpropUpdate( layers[i+1]->activation,
01215                                   layers[i+1]->expectation,
01216                                   activation_gradients[i+1],
01217                                   expectation_gradients[i+1] );
01218 
01219         connections[i]->bpropUpdate( layers[i]->expectation,
01220                                      layers[i+1]->activation,
01221                                      expectation_gradients[i],
01222                                      activation_gradients[i+1],
01223                                      true);
01224         }
01225 
01226         if (i <= n_layers - 3 || !use_classification_cost ||
01227                                  !top_layer_joint_cd) {
01228 
01229         // N.B. the contrastiveDivergenceStep changes the activation and
01230         // expectation fields of top layer of the RBM, so it must be
01231         // done last
01232         layers[i]->setLearningRate( cd_learning_rate );
01233         layers[i+1]->setLearningRate( cd_learning_rate );
01234         connections[i]->setLearningRate( cd_learning_rate );
01235 
01236         if( i > 0 )
01237         {
01238             save_layer_activation.resize(layers[i]->size);
01239             save_layer_activation << layers[i]->activation;
01240             save_layer_expectation.resize(layers[i]->size);
01241             save_layer_expectation << layers[i]->expectation;
01242         }
01243         contrastiveDivergenceStep( layers[ i ],
01244                                    connections[ i ],
01245                                    layers[ i+1 ] ,
01246                                    i, true);
01247         if( i > 0 )
01248         {
01249             layers[i]->activation << save_layer_activation;
01250             layers[i]->expectation << save_layer_expectation;
01251         }
01252         }
01253     }
01254 
01255 
01256 
01257 }
01258 
01259 void SubsamplingDBN::onlineStep(const Mat& inputs, const Mat& targets,
01260                                Mat& train_costs)
01261 {
01262     // TODO Can we avoid this memory allocation?
01263     TVec<Mat> cost;
01264     Vec optimized_cost(inputs.length());
01265     if (partial_costs) {
01266         cost.resize(n_layers-1);
01267     }
01268 
01269     layers[0]->setExpectations(inputs);
01270     // FORWARD PHASE
01271     //Vec layer_input;
01272     for( int i=0 ; i<n_layers-1 ; i++ )
01273     {
01274         // mean-field fprop from layer i to layer i+1
01275         connections[i]->setAsDownInputs( layers[i]->getExpectations() );
01276         // this does the actual matrix-vector computation
01277         layers[i+1]->getAllActivations( connections[i], 0, true );
01278         layers[i+1]->computeExpectations();
01279 
01280         // propagate into local cost associated to output of layer i+1
01281         if( partial_costs && partial_costs[ i ] )
01282         {
01283             partial_costs[ i ]->fprop( layers[ i+1 ]->getExpectations(),
01284                                        targets, cost[i] );
01285 
01286             // Backward pass
01287             // first time we set these gradients: do not accumulate
01288             optimized_cost << cost[i].column(0); // TODO Can we optimize?
01289             partial_costs[ i ]->bpropUpdate( layers[ i+1 ]->getExpectations(),
01290                                              targets, optimized_cost,
01291                                              expectations_gradients[ i+1 ] );
01292 
01293             train_costs.subMatColumns(partial_costs_indices[i], cost[i].width())
01294                 << cost[i];
01295         }
01296         else
01297             expectations_gradients[i+1].clear();
01298     }
01299 
01300     // top layer may be connected to a final_module followed by a
01301     // final_cost and / or may be used to predict class probabilities
01302     // through a joint classification_module
01303 
01304     if ( final_cost )
01305     {
01306         if( final_module )
01307         {
01308                 final_module->fprop( layers[ n_layers-1 ]->getExpectations(),
01309                         final_cost_inputs );
01310                 final_cost->fprop( final_cost_inputs, targets,
01311                         final_cost_values );
01312                 optimized_cost << final_cost_values.column(0); // TODO optimize
01313                 final_cost->bpropUpdate( final_cost_inputs, targets,
01314                         optimized_cost,
01315                         final_cost_gradients );
01316 
01317                 final_module->bpropUpdate(
01318                         layers[ n_layers-1 ]->getExpectations(),
01319                         final_cost_inputs,
01320                         expectations_gradients[ n_layers-1 ],
01321                         final_cost_gradients, true );
01322         }
01323         else
01324         {
01325                 final_cost->fprop( layers[ n_layers-1 ]->getExpectations(),
01326                         targets,
01327                         final_cost_values );
01328                 optimized_cost << final_cost_values.column(0); // TODO optimize
01329                 final_cost->bpropUpdate( layers[n_layers-1]->getExpectations(),
01330                         targets, optimized_cost,
01331                         expectations_gradients[n_layers-1],
01332                         true);
01333         }
01334 
01335         train_costs.subMatColumns(final_cost_index, final_cost_values.width())
01336             << final_cost_values;
01337     }
01338 
01339     if (final_cost || (!partial_costs.isEmpty() && partial_costs[n_layers-2]))
01340     {
01341         layers[n_layers-1]->setLearningRate( grad_learning_rate );
01342         connections[n_layers-2]->setLearningRate( grad_learning_rate );
01343 
01344         layers[ n_layers-1 ]->bpropUpdate(
01345                 layers[ n_layers-1 ]->activations,
01346                 layers[ n_layers-1 ]->getExpectations(),
01347                 activations_gradients[ n_layers-1 ],
01348                 expectations_gradients[ n_layers-1 ],
01349                 false);
01350 
01351         connections[ n_layers-2 ]->bpropUpdate(
01352                 layers[ n_layers-2 ]->getExpectations(),
01353                 layers[ n_layers-1 ]->activations,
01354                 expectations_gradients[ n_layers-2 ],
01355                 activations_gradients[ n_layers-1 ],
01356                 true);
01357         // accumulate into expectations_gradients[n_layers-2]
01358         // because a partial cost may have already put a gradient there
01359     }
01360 
01361     if( use_classification_cost )
01362     {
01363         PLERROR("In SubsamplingDBN::onlineStep - 'use_classification_cost' not "
01364                 "implemented for mini-batches");
01365 
01366         /*
01367         classification_module->fprop( layers[ n_layers-2 ]->expectation,
01368                                       class_output );
01369         real nll_cost;
01370 
01371         // This doesn't work. gcc bug?
01372         // classification_cost->fprop( class_output, target, cost );
01373         classification_cost->CostModule::fprop( class_output, target,
01374                                                 nll_cost );
01375 
01376         real class_error =
01377             ( argmax(class_output) == (int) round(target[0]) ) ? 0: 1;
01378 
01379         train_costs[nll_cost_index] = nll_cost;
01380         train_costs[class_cost_index] = class_error;
01381 
01382         classification_cost->bpropUpdate( class_output, target, nll_cost,
01383                                           class_gradient );
01384 
01385         classification_module->bpropUpdate( layers[ n_layers-2 ]->expectation,
01386                                             class_output,
01387                                             expectation_gradients[n_layers-2],
01388                                             class_gradient,
01389                                             true );
01390         if( top_layer_joint_cd )
01391         {
01392             // set the input of the joint layer
01393             Vec target_exp = classification_module->target_layer->expectation;
01394             fill_one_hot( target_exp, (int) round(target[0]), real(0.), real(1.) );
01395 
01396             joint_layer->setLearningRate( cd_learning_rate );
01397             layers[ n_layers-1 ]->setLearningRate( cd_learning_rate );
01398             classification_module->joint_connection->setLearningRate(
01399                 cd_learning_rate );
01400 
01401             save_layer_activation.resize(layers[ n_layers-2 ]->size);
01402             save_layer_activation << layers[ n_layers-2 ]->activation;
01403             save_layer_expectation.resize(layers[ n_layers-2 ]->size);
01404             save_layer_expectation << layers[ n_layers-2 ]->expectation;
01405 
01406             contrastiveDivergenceStep(
01407                 get_pointer(joint_layer),
01408                 get_pointer(classification_module->joint_connection),
01409                 layers[ n_layers-1 ], n_layers-2);
01410 
01411             layers[ n_layers-2 ]->activation << save_layer_activation;
01412             layers[ n_layers-2 ]->expectation << save_layer_expectation;
01413         }
01414         */
01415     }
01416 
01417     Mat rc;
01418     if (reconstruct_layerwise)
01419     {
01420         rc = train_costs.column(reconstruction_cost_index);
01421         rc.clear();
01422     }
01423 
01424     // DOWNWARD PHASE (the downward phase for top layer is already done above,
01425     // except for the contrastive divergence step in the case where either
01426     // 'use_classification_cost' or 'top_layer_joint_cd' is false).
01427 
01428     for( int i=n_layers-2 ; i>=0 ; i-- )
01429     {
01430         if (i <= n_layers - 3) {
01431             connections[ i ]->setLearningRate( grad_learning_rate );
01432             layers[ i+1 ]->setLearningRate( grad_learning_rate );
01433 
01434             layers[i+1]->bpropUpdate( layers[i+1]->activations,
01435                                       layers[i+1]->getExpectations(),
01436                                       activations_gradients[i+1],
01437                                       expectations_gradients[i+1] );
01438 
01439             connections[i]->bpropUpdate( layers[i]->getExpectations(),
01440                                          layers[i+1]->activations,
01441                                          expectations_gradients[i],
01442                                          activations_gradients[i+1],
01443                                          true);
01444 
01445         }
01446 
01447         if (i <= n_layers - 3 || !use_classification_cost ||
01448                 !top_layer_joint_cd)
01449         {
01450 
01451             // N.B. the contrastiveDivergenceStep changes the activation and
01452             // expectation fields of top layer of the RBM, so it must be
01453             // done last
01454             layers[i]->setLearningRate( cd_learning_rate );
01455             layers[i+1]->setLearningRate( cd_learning_rate );
01456             connections[i]->setLearningRate( cd_learning_rate );
01457 
01458             if( i > 0 )
01459             {
01460                 const Mat& source_act = layers[i]->activations;
01461                 save_layer_activations.resize(source_act.length(),
01462                                               source_act.width());
01463                 save_layer_activations << source_act;
01464                 const Mat& source_exp = layers[i]->getExpectations();
01465                 save_layer_expectations.resize(source_exp.length(),
01466                                                source_exp.width());
01467                 save_layer_expectations << source_exp;
01468             }
01469 
01470             if (reconstruct_layerwise)
01471             {
01472                 connections[i]->setAsUpInputs(layers[i+1]->getExpectations());
01473                 layers[i]->getAllActivations(connections[i], 0, true);
01474                 layers[i]->fpropNLL(
01475                         save_layer_expectations,
01476                         train_costs.column(reconstruction_cost_index+i+1));
01477                 rc += train_costs.column(reconstruction_cost_index+i+1);
01478             }
01479 
01480             contrastiveDivergenceStep( layers[ i ],
01481                                        connections[ i ],
01482                                        layers[ i+1 ] ,
01483                                        i, true);
01484             if( i > 0 )
01485             {
01486                 layers[i]->activations << save_layer_activations;
01487                 layers[i]->getExpectations() << save_layer_expectations;
01488             }
01489         }
01490     }
01491 
01492 }
01493 
01495 // greedyStep //
01497 void SubsamplingDBN::greedyStep( const Vec& input, const Vec& target, int index )
01498 {
01499     PLASSERT( index < n_layers );
01500 
01501     reduced_layers[0]->expectation << input;
01502     for( int i=0 ; i<=index ; i++ )
01503     {
01504         connections[i]->setAsDownInput( reduced_layers[i]->expectation );
01505         layers[i+1]->getAllActivations( connections[i] );
01506         layers[i+1]->computeExpectation();
01507 
01508         if( i+1<n_layers-1 )
01509         {
01510             if( subsampling_modules[i+1] )
01511             {
01512                 subsampling_modules[i+1]->fprop(layers[i+1]->expectation,
01513                                                 reduced_layers[i+1]->expectation);
01514                 reduced_layers[i+1]->expectation_is_up_to_date = true;
01515             }
01516             else if( independent_biases )
01517             {
01518                 reduced_layers[i+1]->expectation << layers[i+1]->expectation;
01519                 reduced_layers[i+1]->expectation_is_up_to_date = true;
01520             }
01521         }
01522     }
01523 
01524     // TODO: add another learning rate?
01525     if( !partial_costs.isEmpty() && partial_costs[ index ] )
01526     {
01527         PLERROR("partial_costs doesn't work with subsampling yet");
01528         // put appropriate learning rate
01529         connections[ index ]->setLearningRate( grad_learning_rate );
01530         layers[ index+1 ]->setLearningRate( grad_learning_rate );
01531 
01532         // Backward pass
01533         real cost;
01534         partial_costs[ index ]->fprop( layers[ index+1 ]->expectation,
01535                                        target, cost );
01536 
01537         partial_costs[ index ]->bpropUpdate( layers[ index+1 ]->expectation,
01538                                              target, cost,
01539                                              expectation_gradients[ index+1 ]
01540                                              );
01541 
01542         layers[ index+1 ]->bpropUpdate( layers[ index+1 ]->activation,
01543                                         layers[ index+1 ]->expectation,
01544                                         activation_gradients[ index+1 ],
01545                                         expectation_gradients[ index+1 ] );
01546 
01547         connections[ index ]->bpropUpdate( layers[ index ]->expectation,
01548                                            layers[ index+1 ]->activation,
01549                                            expectation_gradients[ index ],
01550                                            activation_gradients[ index+1 ] );
01551 
01552         // put back old learning rate
01553         connections[ index ]->setLearningRate( cd_learning_rate );
01554         layers[ index+1 ]->setLearningRate( cd_learning_rate );
01555     }
01556 
01557     contrastiveDivergenceStep( reduced_layers[ index ],
01558                                connections[ index ],
01559                                layers[ index+1 ],
01560                                index, true);
01561 }
01562 
01564 // greedySteps //
01566 void SubsamplingDBN::greedyStep( const Mat& inputs, const Mat& targets, int index, Mat& train_costs_m )
01567 {
01568     PLERROR("minibatch doesn't work with subsampling yet");
01569     PLASSERT( index < n_layers );
01570 
01571     layers[0]->setExpectations(inputs);
01572     for( int i=0 ; i<=index ; i++ )
01573     {
01574         connections[i]->setAsDownInputs( layers[i]->getExpectations() );
01575         layers[i+1]->getAllActivations( connections[i], 0, true );
01576         layers[i+1]->computeExpectations();
01577     }
01578 
01579     // TODO: add another learning rate?
01580     if( !partial_costs.isEmpty() && partial_costs[ index ] )
01581     {
01582         // put appropriate learning rate
01583         connections[ index ]->setLearningRate( grad_learning_rate );
01584         layers[ index+1 ]->setLearningRate( grad_learning_rate );
01585 
01586         // Backward pass
01587         Vec costs;
01588         partial_costs[ index ]->fprop( layers[ index+1 ]->getExpectations(),
01589                                        targets, costs );
01590 
01591         partial_costs[ index ]->bpropUpdate(layers[index+1]->getExpectations(),
01592                 targets, costs,
01593                 expectations_gradients[ index+1 ]
01594                 );
01595 
01596         layers[ index+1 ]->bpropUpdate( layers[ index+1 ]->activations,
01597                                         layers[ index+1 ]->getExpectations(),
01598                                         activations_gradients[ index+1 ],
01599                                         expectations_gradients[ index+1 ] );
01600 
01601         connections[ index ]->bpropUpdate( layers[ index ]->getExpectations(),
01602                                            layers[ index+1 ]->activations,
01603                                            expectations_gradients[ index ],
01604                                            activations_gradients[ index+1 ] );
01605 
01606         // put back old learning rate
01607         connections[ index ]->setLearningRate( cd_learning_rate );
01608         layers[ index+1 ]->setLearningRate( cd_learning_rate );
01609     }
01610 
01611     if (reconstruct_layerwise)
01612     {
01613         layer_inputs.resize(minibatch_size,layers[index]->size);
01614         layer_inputs << layers[index]->getExpectations(); // we will perturb these, so save them
01615         connections[index]->setAsUpInputs(layers[index+1]->getExpectations());
01616         layers[index]->getAllActivations(connections[index], 0, true);
01617         layers[index]->fpropNLL(layer_inputs, train_costs_m.column(reconstruction_cost_index+index+1));
01618         Mat rc = train_costs_m.column(reconstruction_cost_index);
01619         rc += train_costs_m.column(reconstruction_cost_index+index+1);
01620         layers[index]->setExpectations(layer_inputs); // and restore them here
01621     }
01622 
01623     contrastiveDivergenceStep( layers[ index ],
01624                                connections[ index ],
01625                                layers[ index+1 ],
01626                                index, true);
01627 
01628 }
01629 
01631 // jointGreedyStep //
01633 void SubsamplingDBN::jointGreedyStep( const Vec& input, const Vec& target )
01634 {
01635     PLERROR("classification_module doesn't work with subsampling yet");
01636     PLASSERT( joint_layer );
01637     PLASSERT_MSG(batch_size == 1, "Not implemented for mini-batches");
01638 
01639     layers[0]->expectation << input;
01640     for( int i=0 ; i<n_layers-2 ; i++ )
01641     {
01642         connections[i]->setAsDownInput( layers[i]->expectation );
01643         layers[i+1]->getAllActivations( connections[i] );
01644         layers[i+1]->computeExpectation();
01645     }
01646 
01647     if( !partial_costs.isEmpty() && partial_costs[ n_layers-2 ] )
01648     {
01649         // Deterministic forward pass
01650         connections[ n_layers-2 ]->setAsDownInput(
01651             layers[ n_layers-2 ]->expectation );
01652         layers[ n_layers-1 ]->getAllActivations( connections[ n_layers-2 ] );
01653         layers[ n_layers-1 ]->computeExpectation();
01654 
01655         // put appropriate learning rate
01656         connections[ n_layers-2 ]->setLearningRate( grad_learning_rate );
01657         layers[ n_layers-1 ]->setLearningRate( grad_learning_rate );
01658 
01659         // Backward pass
01660         real cost;
01661         partial_costs[ n_layers-2 ]->fprop( layers[ n_layers-1 ]->expectation,
01662                                             target, cost );
01663 
01664         partial_costs[ n_layers-2 ]->bpropUpdate(
01665             layers[ n_layers-1 ]->expectation, target, cost,
01666             expectation_gradients[ n_layers-1 ] );
01667 
01668         layers[ n_layers-1 ]->bpropUpdate( layers[ n_layers-1 ]->activation,
01669                                            layers[ n_layers-1 ]->expectation,
01670                                            activation_gradients[ n_layers-1 ],
01671                                            expectation_gradients[ n_layers-1 ]
01672                                          );
01673 
01674         connections[ n_layers-2 ]->bpropUpdate(
01675             layers[ n_layers-2 ]->expectation,
01676             layers[ n_layers-1 ]->activation,
01677             expectation_gradients[ n_layers-2 ],
01678             activation_gradients[ n_layers-1 ] );
01679 
01680         // put back old learning rate
01681         connections[ n_layers-2 ]->setLearningRate( cd_learning_rate );
01682         layers[ n_layers-1 ]->setLearningRate( cd_learning_rate );
01683     }
01684 
01685     Vec target_exp = classification_module->target_layer->expectation;
01686     fill_one_hot( target_exp, (int) round(target[0]), real(0.), real(1.) );
01687 
01688     contrastiveDivergenceStep(
01689         get_pointer( joint_layer ),
01690         get_pointer( classification_module->joint_connection ),
01691         layers[ n_layers-1 ], n_layers-2);
01692 }
01693 
01695 // fineTuningStep //
01697 void SubsamplingDBN::fineTuningStep( const Vec& input, const Vec& target,
01698                                     Vec& train_costs )
01699 {
01700     final_cost_value.resize(0);
01701     // fprop
01702     reduced_layers[0]->expectation << input;
01703     for( int i=0 ; i<n_layers-2 ; i++ )
01704     {
01705         connections[i]->setAsDownInput( reduced_layers[i]->expectation );
01706         layers[i+1]->getAllActivations( connections[i] );
01707         layers[i+1]->computeExpectation();
01708 
01709         if( subsampling_modules[i+1] )
01710         {
01711             subsampling_modules[i+1]->fprop(layers[i+1]->expectation,
01712                                             reduced_layers[i+1]->expectation);
01713             reduced_layers[i+1]->expectation_is_up_to_date = true;
01714         }
01715         else if( independent_biases )
01716         {
01717             reduced_layers[i+1]->expectation << layers[i+1]->expectation;
01718             reduced_layers[i+1]->expectation_is_up_to_date = true;
01719         }
01720     }
01721 
01722     if( final_cost )
01723     {
01724         connections[ n_layers-2 ]->setAsDownInput(
01725             reduced_layers[ n_layers-2 ]->expectation );
01726         layers[ n_layers-1 ]->getAllActivations( connections[ n_layers-2 ] );
01727         layers[ n_layers-1 ]->computeExpectation();
01728 
01729         if( final_module )
01730         {
01731             final_module->fprop( layers[ n_layers-1 ]->expectation,
01732                                  final_cost_input );
01733             final_cost->fprop( final_cost_input, target, final_cost_value );
01734 
01735             final_cost->bpropUpdate( final_cost_input, target,
01736                                      final_cost_value[0],
01737                                      final_cost_gradient );
01738             final_module->bpropUpdate( layers[ n_layers-1 ]->expectation,
01739                                        final_cost_input,
01740                                        expectation_gradients[ n_layers-1 ],
01741                                        final_cost_gradient );
01742         }
01743         else
01744         {
01745             final_cost->fprop( layers[ n_layers-1 ]->expectation, target,
01746                                final_cost_value );
01747 
01748             final_cost->bpropUpdate( layers[ n_layers-1 ]->expectation,
01749                                      target, final_cost_value[0],
01750                                      expectation_gradients[ n_layers-1 ] );
01751         }
01752 
01753         train_costs.subVec(final_cost_index, final_cost_value.length())
01754             << final_cost_value;
01755 
01756         layers[ n_layers-1 ]->bpropUpdate( layers[ n_layers-1 ]->activation,
01757                                            layers[ n_layers-1 ]->expectation,
01758                                            activation_gradients[ n_layers-1 ],
01759                                            expectation_gradients[ n_layers-1 ]
01760                                          );
01761 
01762         connections[ n_layers-2 ]->bpropUpdate(
01763             reduced_layers[ n_layers-2 ]->expectation,
01764             layers[ n_layers-1 ]->activation,
01765             subsampling_gradients[ n_layers-2 ],
01766             activation_gradients[ n_layers-1 ] );
01767     }
01768     else  {
01769         subsampling_gradients[ n_layers-2 ].clear();
01770     }
01771 
01772     if( use_classification_cost )
01773     {
01774         PLERROR("classification_cost doesn't work with subsampling yet");
01775         classification_module->fprop( layers[ n_layers-2 ]->expectation,
01776                                       class_output );
01777         real nll_cost;
01778 
01779         // This doesn't work. gcc bug?
01780         // classification_cost->fprop( class_output, target, cost );
01781         classification_cost->CostModule::fprop( class_output, target,
01782                                                 nll_cost );
01783 
01784         real class_error =
01785             ( argmax(class_output) == (int) round(target[0]) ) ? 0
01786                                                                : 1;
01787 
01788         train_costs[nll_cost_index] = nll_cost;
01789         train_costs[class_cost_index] = class_error;
01790 
01791         classification_cost->bpropUpdate( class_output, target, nll_cost,
01792                                           class_gradient );
01793 
01794         classification_module->bpropUpdate( layers[ n_layers-2 ]->expectation,
01795                                             class_output,
01796                                             expectation_gradients[n_layers-2],
01797                                             class_gradient,
01798                                             true );
01799     }
01800 
01801     for( int i=n_layers-2 ; i>0 ; i-- )
01802     {
01803         if( subsampling_modules[i] )
01804         {
01805             subsampling_modules[i]->bpropUpdate( layers[i]->expectation,
01806                                                  reduced_layers[i]->expectation,
01807                                                  expectation_gradients[i],
01808                                                  subsampling_gradients[i] );
01809             layers[i]->bpropUpdate( layers[i]->activation,
01810                                     layers[i]->expectation,
01811                                     activation_gradients[i],
01812                                     expectation_gradients[i] );
01813         }
01814         else
01815         {
01816             layers[i]->bpropUpdate( layers[i]->activation,
01817                                     reduced_layers[i]->expectation,
01818                                     activation_gradients[i],
01819                                     subsampling_gradients[i] );
01820         }
01821         connections[i-1]->bpropUpdate( reduced_layers[i-1]->expectation,
01822                                        layers[i]->activation,
01823                                        expectation_gradients[i-1],
01824                                        activation_gradients[i] );
01825     }
01826 }
01827 
01828 void SubsamplingDBN::fineTuningStep(const Mat& inputs, const Mat& targets,
01829                                    Mat& train_costs )
01830 {
01831     PLERROR("minibatch doesn't work with subsampling yet");
01832     final_cost_values.resize(0, 0);
01833     // fprop
01834     layers[0]->getExpectations() << inputs;
01835     for( int i=0 ; i<n_layers-2 ; i++ )
01836     {
01837         connections[i]->setAsDownInputs( layers[i]->getExpectations() );
01838         layers[i+1]->getAllActivations( connections[i], 0, true );
01839         layers[i+1]->computeExpectations();
01840     }
01841 
01842     if( final_cost )
01843     {
01844         connections[ n_layers-2 ]->setAsDownInputs(
01845             layers[ n_layers-2 ]->getExpectations() );
01846         // TODO Also ensure getAllActivations fills everything.
01847         layers[ n_layers-1 ]->getAllActivations(connections[n_layers-2],
01848                                                 0, true);
01849         layers[ n_layers-1 ]->computeExpectations();
01850 
01851         if( final_module )
01852         {
01853             final_cost_inputs.resize(minibatch_size,
01854                                      final_module->output_size);
01855             final_module->fprop( layers[ n_layers-1 ]->getExpectations(),
01856                                  final_cost_inputs );
01857             final_cost->fprop( final_cost_inputs, targets, final_cost_values );
01858 
01859             // TODO This extra memory copy is annoying: how can we avoid it?
01860             optimized_costs << final_cost_values.column(0);
01861             final_cost->bpropUpdate( final_cost_inputs, targets,
01862                                      optimized_costs,
01863                                      final_cost_gradients );
01864             final_module->bpropUpdate( layers[ n_layers-1 ]->getExpectations(),
01865                                        final_cost_inputs,
01866                                        expectations_gradients[ n_layers-1 ],
01867                                        final_cost_gradients );
01868         }
01869         else
01870         {
01871             final_cost->fprop( layers[ n_layers-1 ]->getExpectations(), targets,
01872                                final_cost_values );
01873 
01874             optimized_costs << final_cost_values.column(0);
01875             final_cost->bpropUpdate( layers[ n_layers-1 ]->getExpectations(),
01876                                      targets, optimized_costs,
01877                                      expectations_gradients[ n_layers-1 ] );
01878         }
01879 
01880         train_costs.subMatColumns(final_cost_index, final_cost_values.width())
01881             << final_cost_values;
01882 
01883         layers[ n_layers-1 ]->bpropUpdate( layers[ n_layers-1 ]->activations,
01884                                            layers[ n_layers-1 ]->getExpectations(),
01885                                            activations_gradients[ n_layers-1 ],
01886                                            expectations_gradients[ n_layers-1 ]
01887                                          );
01888 
01889         connections[ n_layers-2 ]->bpropUpdate(
01890             layers[ n_layers-2 ]->getExpectations(),
01891             layers[ n_layers-1 ]->activations,
01892             expectations_gradients[ n_layers-2 ],
01893             activations_gradients[ n_layers-1 ] );
01894     }
01895     else  {
01896         expectations_gradients[ n_layers-2 ].clear();
01897     }
01898 
01899     if( use_classification_cost )
01900     {
01901         PLERROR("SubsamplingDBN::fineTuningStep - Not implemented for "
01902                 "mini-batches");
01903         /*
01904         classification_module->fprop( layers[ n_layers-2 ]->expectation,
01905                                       class_output );
01906         real nll_cost;
01907 
01908         // This doesn't work. gcc bug?
01909         // classification_cost->fprop( class_output, target, cost );
01910         classification_cost->CostModule::fprop( class_output, target,
01911                                                 nll_cost );
01912 
01913         real class_error =
01914             ( argmax(class_output) == (int) round(target[0]) ) ? 0
01915                                                                : 1;
01916 
01917         train_costs[nll_cost_index] = nll_cost;
01918         train_costs[class_cost_index] = class_error;
01919 
01920         classification_cost->bpropUpdate( class_output, target, nll_cost,
01921                                           class_gradient );
01922 
01923         classification_module->bpropUpdate( layers[ n_layers-2 ]->expectation,
01924                                             class_output,
01925                                             expectation_gradients[n_layers-2],
01926                                             class_gradient,
01927                                             true );
01928         */
01929     }
01930 
01931     for( int i=n_layers-2 ; i>0 ; i-- )
01932     {
01933         layers[i]->bpropUpdate( layers[i]->activations,
01934                                 layers[i]->getExpectations(),
01935                                 activations_gradients[i],
01936                                 expectations_gradients[i] );
01937 
01938         connections[i-1]->bpropUpdate( layers[i-1]->getExpectations(),
01939                                        layers[i]->activations,
01940                                        expectations_gradients[i-1],
01941                                        activations_gradients[i] );
01942     }
01943 
01944     // do it AFTER the bprop to avoid interfering with activations used in bprop
01945     // (and do not worry that the weights have changed a bit). This is incoherent
01946     // with the current implementation in the greedy stage.
01947     if (reconstruct_layerwise)
01948     {
01949         Mat rc = train_costs.column(reconstruction_cost_index);
01950         rc.clear();
01951         for( int index=0 ; index<n_layers-1 ; index++ )
01952         {
01953             layer_inputs.resize(minibatch_size,layers[index]->size);
01954             layer_inputs << layers[index]->getExpectations();
01955             connections[index]->setAsUpInputs(layers[index+1]->getExpectations());
01956             layers[index]->getAllActivations(connections[index], 0, true);
01957             layers[index]->fpropNLL(layer_inputs, train_costs.column(reconstruction_cost_index+index+1));
01958             rc += train_costs.column(reconstruction_cost_index+index+1);
01959         }
01960     }
01961 
01962 
01963 }
01964 
01966 // contrastiveDivergenceStep //
01968 void SubsamplingDBN::contrastiveDivergenceStep(
01969     const PP<RBMLayer>& down_layer,
01970     const PP<RBMConnection>& connection,
01971     const PP<RBMLayer>& up_layer,
01972     int layer_index, bool nofprop)
01973 {
01974     bool mbatch = minibatch_size > 1 || minibatch_hack;
01975 
01976     // positive phase
01977     if (!nofprop)
01978     {
01979         if (mbatch) {
01980             connection->setAsDownInputs( down_layer->getExpectations() );
01981             up_layer->getAllActivations( connection, 0, true );
01982             up_layer->computeExpectations();
01983         } else {
01984             connection->setAsDownInput( down_layer->expectation );
01985             up_layer->getAllActivations( connection );
01986             up_layer->computeExpectation();
01987         }
01988     }
01989 
01990     if (mbatch) {
01991         // accumulate positive stats using the expectation
01992         // we deep-copy because the value will change during negative phase
01993         pos_down_vals.resize(minibatch_size, down_layer->size);
01994         pos_up_vals.resize(minibatch_size, up_layer->size);
01995 
01996         pos_down_vals << down_layer->getExpectations();
01997         pos_up_vals << up_layer->getExpectations();
01998 
01999         // down propagation, starting from a sample of up_layer
02000         if (background_gibbs_update_ratio<1)
02001             // then do some contrastive divergence, o/w only background Gibbs
02002         {
02003             up_layer->generateSamples();
02004             connection->setAsUpInputs( up_layer->samples );
02005             down_layer->getAllActivations( connection, 0, true );
02006             down_layer->generateSamples();
02007             // negative phase
02008             connection->setAsDownInputs( down_layer->samples );
02009             up_layer->getAllActivations( connection, 0, mbatch );
02010             up_layer->computeExpectations();
02011 
02012             // accumulate negative stats
02013             // no need to deep-copy because the values won't change before update
02014             Mat neg_down_vals = down_layer->samples;
02015             Mat neg_up_vals = up_layer->getExpectations();
02016 
02017             if (background_gibbs_update_ratio==0)
02018             // update here only if there is ONLY contrastive divergence
02019             {
02020                 down_layer->update( pos_down_vals, neg_down_vals );
02021                 connection->update( pos_down_vals, pos_up_vals,
02022                                     neg_down_vals, neg_up_vals );
02023                 up_layer->update( pos_up_vals, neg_up_vals );
02024             }
02025             else
02026             {
02027                 connection->accumulatePosStats(pos_down_vals,pos_up_vals);
02028                 cd_neg_down_vals.resize(minibatch_size, down_layer->size);
02029                 cd_neg_up_vals.resize(minibatch_size, up_layer->size);
02030                 cd_neg_down_vals << neg_down_vals;
02031                 cd_neg_up_vals << neg_up_vals;
02032             }
02033         }
02034         //
02035         if (background_gibbs_update_ratio>0)
02036         {
02037             Mat down_state = gibbs_down_state[layer_index];
02038 
02039             if (initialize_gibbs_chain) // initializing or re-initializing the chain
02040             {
02041                 if (background_gibbs_update_ratio==1) // if <1 just use the CD state
02042                 {
02043                     up_layer->generateSamples();
02044                     connection->setAsUpInputs(up_layer->samples);
02045                     down_layer->getAllActivations(connection, 0, true);
02046                     down_layer->generateSamples();
02047                     down_state << down_layer->samples;
02048                 }
02049                 initialize_gibbs_chain=false;
02050             }
02051             // sample up state given down state
02052             connection->setAsDownInputs(down_state);
02053             up_layer->getAllActivations(connection, 0, true);
02054             up_layer->generateSamples();
02055 
02056             // sample down state given up state, to prepare for next time
02057             connection->setAsUpInputs(up_layer->samples);
02058             down_layer->getAllActivations(connection, 0, true);
02059             down_layer->generateSamples();
02060 
02061             // update using the down_state and up_layer->expectations for moving average in negative phase
02062             // (and optionally
02063             if (background_gibbs_update_ratio<1)
02064             {
02065                 down_layer->updateCDandGibbs(pos_down_vals,cd_neg_down_vals,
02066                                              down_state,
02067                                              background_gibbs_update_ratio);
02068                 connection->updateCDandGibbs(pos_down_vals,pos_up_vals,
02069                                              cd_neg_down_vals, cd_neg_up_vals,
02070                                              down_state,
02071                                              up_layer->getExpectations(),
02072                                              background_gibbs_update_ratio);
02073                 up_layer->updateCDandGibbs(pos_up_vals,cd_neg_up_vals,
02074                                            up_layer->getExpectations(),
02075                                            background_gibbs_update_ratio);
02076             }
02077             else
02078             {
02079                 down_layer->updateGibbs(pos_down_vals,down_state);
02080                 connection->updateGibbs(pos_down_vals,pos_up_vals,down_state,
02081                                         up_layer->getExpectations());
02082                 up_layer->updateGibbs(pos_up_vals,up_layer->getExpectations());
02083             }
02084 
02085             // Save Gibbs chain's state.
02086             down_state << down_layer->samples;
02087         }
02088     } else {
02089         up_layer->generateSample();
02090 
02091         // accumulate positive stats using the expectation
02092         // we deep-copy because the value will change during negative phase
02093         pos_down_val.resize( down_layer->size );
02094         pos_up_val.resize( up_layer->size );
02095 
02096         pos_down_val << down_layer->expectation;
02097         pos_up_val << up_layer->expectation;
02098 
02099         // down propagation, starting from a sample of up_layer
02100         connection->setAsUpInput( up_layer->sample );
02101 
02102         down_layer->getAllActivations( connection );
02103 
02104         down_layer->generateSample();
02105         // negative phase
02106         connection->setAsDownInput( down_layer->sample );
02107         up_layer->getAllActivations( connection, 0, mbatch );
02108         up_layer->computeExpectation();
02109         // accumulate negative stats
02110         // no need to deep-copy because the values won't change before update
02111         Vec neg_down_val = down_layer->sample;
02112         Vec neg_up_val = up_layer->expectation;
02113 
02114         // update
02115         down_layer->update( pos_down_val, neg_down_val );
02116         connection->update( pos_down_val, pos_up_val,
02117                 neg_down_val, neg_up_val );
02118         up_layer->update( pos_up_val, neg_up_val );
02119     }
02120 }
02121 
02122 
02124 // computeOutput //
02126 void SubsamplingDBN::computeOutput(const Vec& input, Vec& output) const
02127 {
02128 
02129     // Compute the output from the input.
02130     output.resize(0);
02131 
02132     // fprop
02133     reduced_layers[0]->expectation << input;
02134 
02135     if(reconstruct_layerwise)
02136         reconstruction_costs[0]=0;
02137 
02138     for( int i=0 ; i<n_layers-2 ; i++ )
02139     {
02140         connections[i]->setAsDownInput( reduced_layers[i]->expectation );
02141         layers[i+1]->getAllActivations( connections[i] );
02142         layers[i+1]->computeExpectation();
02143 
02144         if( subsampling_modules[i+1] )
02145         {
02146             subsampling_modules[i+1]->fprop(layers[i+1]->expectation,
02147                                             reduced_layers[i+1]->expectation);
02148             reduced_layers[i+1]->expectation_is_up_to_date = true;
02149         }
02150         else if( independent_biases )
02151         {
02152             reduced_layers[i+1]->expectation << layers[i+1]->expectation;
02153             reduced_layers[i+1]->expectation_is_up_to_date = true;
02154         }
02155 
02156         if (reconstruct_layerwise)
02157         {
02158             PLERROR( "reconstruct_layerwise and subsampling don't work yet" );
02159             layer_input.resize(layers[i]->size);
02160             layer_input << layers[i]->expectation;
02161             connections[i]->setAsUpInput(layers[i+1]->expectation);
02162             layers[i]->getAllActivations(connections[i]);
02163             real rc = reconstruction_costs[i+1] = layers[i]->fpropNLL( layer_input );
02164             reconstruction_costs[0] += rc;
02165         }
02166     }
02167 
02168 
02169     if( use_classification_cost )
02170         classification_module->fprop( layers[ n_layers-2 ]->expectation,
02171                                       output );
02172 
02173     if( final_cost || (!partial_costs.isEmpty() && partial_costs[n_layers-2] ))
02174     {
02175         connections[ n_layers-2 ]->setAsDownInput(
02176             reduced_layers[ n_layers-2 ]->expectation );
02177         layers[ n_layers-1 ]->getAllActivations( connections[ n_layers-2 ] );
02178         layers[ n_layers-1 ]->computeExpectation();
02179 
02180         if( final_module )
02181         {
02182             final_module->fprop( layers[ n_layers-1 ]->expectation,
02183                                  final_cost_input );
02184             output.append( final_cost_input );
02185         }
02186         else
02187         {
02188             output.append( layers[ n_layers-1 ]->expectation );
02189         }
02190 
02191         if (reconstruct_layerwise)
02192         {
02193             PLERROR( "reconstruct_layerwise and subsampling don't work yet" );
02194             layer_input.resize(layers[n_layers-2]->size);
02195             layer_input << layers[n_layers-2]->expectation;
02196             connections[n_layers-2]->setAsUpInput(layers[n_layers-1]->expectation);
02197             layers[n_layers-2]->getAllActivations(connections[n_layers-2]);
02198             real rc = reconstruction_costs[n_layers-1] = layers[n_layers-2]->fpropNLL( layer_input );
02199             reconstruction_costs[0] += rc;
02200         }
02201     }
02202 
02203 }
02204 
02205 void SubsamplingDBN::computeCostsFromOutputs(const Vec& input, const Vec& output,
02206                                            const Vec& target, Vec& costs) const
02207 {
02208 
02209     // Compute the costs from *already* computed output.
02210     costs.resize( cost_names.length() );
02211     costs.fill( MISSING_VALUE );
02212 
02213     // TO MAKE FOR CLEANER CODE INDEPENDENT OF ORDER OF CALLING THIS
02214     // METHOD AND computeOutput, THIS SHOULD BE IN A REDEFINITION OF computeOutputAndCosts
02215     if( use_classification_cost )
02216     {
02217         classification_cost->CostModule::fprop( output.subVec(0, n_classes),
02218                 target, costs[nll_cost_index] );
02219 
02220         costs[class_cost_index] =
02221             (argmax(output.subVec(0, n_classes)) == (int) round(target[0]))? 0
02222             : 1;
02223     }
02224 
02225     if( final_cost )
02226     {
02227         int init = use_classification_cost ? n_classes : 0;
02228         final_cost->fprop( output.subVec( init, output.size() - init ),
02229                            target, final_cost_value );
02230 
02231         costs.subVec(final_cost_index, final_cost_value.length())
02232             << final_cost_value;
02233     }
02234 
02235     if( !partial_costs.isEmpty() )
02236     {
02237         Vec pcosts;
02238         for( int i=0 ; i<n_layers-1 ; i++ )
02239             // propagate into local cost associated to output of layer i+1
02240             if( partial_costs[ i ] )
02241             {
02242                 partial_costs[ i ]->fprop( layers[ i+1 ]->expectation,
02243                                            target, pcosts);
02244 
02245                 costs.subVec(partial_costs_indices[i], pcosts.length())
02246                     << pcosts;
02247             }
02248     }
02249 
02250     if (reconstruct_layerwise)
02251         costs.subVec(reconstruction_cost_index, reconstruction_costs.length())
02252             << reconstruction_costs;
02253 
02254 }
02255 
02256 void SubsamplingDBN::test(VMat testset, PP<VecStatsCollector> test_stats, VMat testoutputs, VMat testcosts) const
02257 {
02258 
02259     //  Re-implementing simply because we want to measure the time it takes to
02260     //  do the testing. The reset is there for two purposes:
02261     //  1. to have fine-grained statistics at each call of test()
02262     //  2. to be able to have a more meaningful cumulative_testing_time
02263     //
02264     //  BIG Nota Bene:
02265     //  Get the statistics by E[testN.E[cumulative_test_time], where N is the
02266     //  index of the last split that you're testing.
02267     //  E[testN-1.E[cumulative_test_time] will basically be the cumulative test
02268     //  time until (and including) the N-1th split! So it's a pretty
02269     //  meaningless number (more or less).
02270       
02271     Profiler::reset("testing");
02272     Profiler::start("testing");
02273 
02274     inherited::test(testset, test_stats, testoutputs, testcosts);
02275 
02276     Profiler::end("testing");
02277 
02278     const Profiler::Stats& stats = Profiler::getStats("testing");
02279 
02280     real ticksPerSec = Profiler::ticksPerSecond();
02281     real cpu_time = (stats.user_duration+stats.system_duration)/ticksPerSec;
02282     cumulative_testing_time += cpu_time;
02283 
02284     if (testcosts)
02285         // if it is used (usually not) testcosts is a VMat that is of size
02286         // nexamples x ncosts. The last column will have missing values.
02287         // We just need to put a value in one of the rows of that column.
02288         testcosts->put(0,cumulative_testing_time_cost_index,cumulative_testing_time);
02289 
02290     if (test_stats) {
02291         // Here we simply update the corresponding stat index
02292         Vec test_time_stats(test_stats->length(), MISSING_VALUE);
02293         test_time_stats[cumulative_testing_time_cost_index] =
02294             cumulative_testing_time;
02295         test_stats->update(test_time_stats);
02296         test_stats->finalize();
02297     }
02298 }
02299 
02300 
02301 TVec<string> SubsamplingDBN::getTestCostNames() const
02302 {
02303     // Return the names of the costs computed by computeCostsFromOutputs
02304     // (these may or may not be exactly the same as what's returned by
02305     // getTrainCostNames).
02306 
02307     return cost_names;
02308 }
02309 
02310 TVec<string> SubsamplingDBN::getTrainCostNames() const
02311 {
02312     return cost_names;
02313 }
02314 
02315 
02316 //#####  Helper functions  ##################################################
02317 
02318 void SubsamplingDBN::setLearningRate( real the_learning_rate )
02319 {
02320     for( int i=0 ; i<n_layers-1 ; i++ )
02321     {
02322         layers[i]->setLearningRate( the_learning_rate );
02323         connections[i]->setLearningRate( the_learning_rate );
02324     }
02325     layers[n_layers-1]->setLearningRate( the_learning_rate );
02326 
02327     if( use_classification_cost )
02328     {
02329         classification_module->joint_connection->setLearningRate(
02330             the_learning_rate );
02331         joint_layer->setLearningRate( the_learning_rate );
02332     }
02333 
02334     if( final_module )
02335         final_module->setLearningRate( the_learning_rate );
02336 
02337     if( final_cost )
02338         final_cost->setLearningRate( the_learning_rate );
02339 }
02340 
02341 
02342 } // end of namespace PLearn
02343 
02344 
02345 /*
02346   Local Variables:
02347   mode:c++
02348   c-basic-offset:4
02349   c-file-style:"stroustrup"
02350   c-file-offsets:((innamespace . 0)(inline-open . 0))
02351   indent-tabs-mode:nil
02352   fill-column:79
02353   End:
02354 */
02355 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines