PLearn 0.1
DeepBeliefNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // DeepBeliefNet.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin
00036 
00040 #define PL_LOG_MODULE_NAME "DeepBeliefNet"
00041 #include "DeepBeliefNet.h"
00042 #include "RBMMatrixTransposeConnection.h"
00043 #include <plearn/io/pl_log.h>
00044 #include <plearn/io/load_and_save.h>
00045 
00046 #define minibatch_hack 0 // Do we force the minibatch setting? (debug hack)
00047 
00048 namespace PLearn {
00049 using namespace std;
00050 
00051 PLEARN_IMPLEMENT_OBJECT(
00052     DeepBeliefNet,
00053     "Neural network, learned layer-wise in a greedy fashion.",
00054     "This version supports different unit types, different connection types,\n"
00055     "and different cost functions, including the NLL in classification.\n");
00056 
00058 // DeepBeliefNet //
00060 DeepBeliefNet::DeepBeliefNet() :
00061     cd_learning_rate( 0. ),
00062     cd_decrease_ct( 0. ),
00063     up_down_learning_rate( 0. ),
00064     up_down_decrease_ct( 0. ),
00065     grad_learning_rate( 0. ),
00066     grad_decrease_ct( 0. ),
00067     // grad_weight_decay( 0. ),
00068     batch_size( 1 ),
00069     n_classes( -1 ),
00070     up_down_nstages( 0 ),
00071     use_classification_cost( true ),
00072     reconstruct_layerwise( false ),
00073     i_output_layer( -1 ),
00074     learnerExpdir(""),
00075     save_learner_before_fine_tuning( false ),
00076     use_sample_for_up_layer( false ),
00077     use_corrupted_posDownVal( "none" ),
00078     noise_type( "masking_noise" ),
00079     fraction_of_masked_inputs( 0 ),
00080     mask_with_pepper_salt( false ),
00081     prob_salt_noise( 0.5 ),
00082     online ( false ),
00083     background_gibbs_update_ratio(0),
00084     gibbs_chain_reinit_freq( INT_MAX ),
00085     mean_field_contrastive_divergence_ratio( 0 ),
00086     train_stats_window( -1 ),
00087     minibatch_size( 0 ),
00088     initialize_gibbs_chain( false ),
00089     nll_cost_index( -1 ),
00090     class_cost_index( -1 ),
00091     final_cost_index( -1 ),
00092     reconstruction_cost_index( -1 ),
00093     training_cpu_time_cost_index ( -1 ),
00094     cumulative_training_time_cost_index ( -1 ),
00095     cumulative_testing_time_cost_index ( -1 ),
00096     cumulative_training_time( 0 ),
00097     cumulative_testing_time( 0 ),
00098     up_down_stage( 0 )
00099 {
00100     random_gen = new PRandom();
00101     n_layers = 0;
00102 }
00103 
00104 
00105 void DeepBeliefNet::declareMethods(RemoteMethodMap& rmm)
00106 {
00107     // Insert a backpointer to remote methods; note that this is different from declareOptions().
00108     rmm.inherited(inherited::_getRemoteMethodMap_());
00109     declareMethod(
00110         rmm, "fantasizeKTime",
00111         &DeepBeliefNet::fantasizeKTime,
00112         (BodyDoc("On a trained learner, computes a codage-decodage (fantasize) through a specified number of hidden layer."),
00113          ArgDoc ("kTime", "Number of time we want to fantasize. \n"
00114                  "Next input image will again be the source Image (if alwaysFromSrcImg is True) \n"
00115                  "or next input image will be the last fantasize image (if alwaysFromSrcImg is False), and so on for kTime.)"),
00116          ArgDoc ("srcImg", "Source image vector (should have same width as raws layer)"),
00117          ArgDoc ("sampling", "Vector of bool indicating whether or not a sampling will be done for each hidden layer\n"
00118                 "during decodage. Its width indicates how many hidden layer will be used.)\n"
00119                 " (should have same width as maskNoiseFractOrProb)\n"
00120                 "smaller element of the vector correspond to lower layer"),
00121          ArgDoc ("alwaysFromSrcImg", "Booleen indicating whether each encode-decode \n"
00122                 "steps are done from the source image (sets to True) or \n"
00123                 "if the next input image is the last fantasize image (sets to False). "),
00124          RetDoc ("Fantasize images obtained for each kTime.")));
00125 
00126 
00127     declareMethod(
00128         rmm, "fantasizeKTimeOnMultiSrcImg",
00129         &DeepBeliefNet::fantasizeKTimeOnMultiSrcImg,
00130         (BodyDoc("Call the 'fantasizeKTime' function for each source images found in the matrix 'srcImg'."),
00131          ArgDoc ("kTime", "Number of time we want to fantasize for each source images. \n"
00132                  "Next input image will again be the source Image (if alwaysFromSrcImg is True) \n"
00133                  "or next input image will be the last fantasize image (if alwaysFromSrcImg is False), and so on for kTime.)"),
00134          ArgDoc ("srcImg", "Source images matrix (should have same width as raws layer)"),
00135          ArgDoc ("sampling", "Vector of bool indicating whether or not a sampling will be done for each hidden layer\n"
00136                 "during decodage. Its width indicates how many hidden layer will be used.)\n"
00137                 " (should have same width as maskNoiseFractOrProb)\n"
00138                 "smaller element of the vector correspond to lower layer"),
00139          ArgDoc ("alwaysFromSrcImg", "Booleen indicating whether each encode-decode \n"
00140                 "steps are done from the source image (sets to True) or \n"
00141                 "if the next input image is the preceding fantasize image obtained (sets to False). "),
00142          RetDoc ("For each source images, fantasize images obtained for each kTime.")));
00143 }
00144 
00145 
00146 
00148 // declareOptions //
00150 void DeepBeliefNet::declareOptions(OptionList& ol)
00151 {
00152     declareOption(ol, "cd_learning_rate", &DeepBeliefNet::cd_learning_rate,
00153                   OptionBase::buildoption,
00154                   "The learning rate used during contrastive divergence"
00155                   " learning");
00156 
00157     declareOption(ol, "cd_decrease_ct", &DeepBeliefNet::cd_decrease_ct,
00158                   OptionBase::buildoption,
00159                   "The decrease constant of the learning rate used during"
00160                   " contrastive divergence");
00161 
00162     declareOption(ol, "up_down_learning_rate",
00163                   &DeepBeliefNet::up_down_learning_rate,
00164                   OptionBase::buildoption,
00165                   "The learning rate used in the up-down algorithm during the\n"
00166                   "unsupervised fine tuning gradient descent.\n");
00167 
00168     declareOption(ol, "up_down_decrease_ct", &DeepBeliefNet::up_down_decrease_ct,
00169                   OptionBase::buildoption,
00170                   "The decrease constant of the learning rate used in the\n"
00171                   "up-down algorithm during the unsupervised fine tuning\n"
00172                   "gradient descent.\n");
00173 
00174     declareOption(ol, "grad_learning_rate", &DeepBeliefNet::grad_learning_rate,
00175                   OptionBase::buildoption,
00176                   "The learning rate used during gradient descent");
00177 
00178     declareOption(ol, "grad_decrease_ct", &DeepBeliefNet::grad_decrease_ct,
00179                   OptionBase::buildoption,
00180                   "The decrease constant of the learning rate used during"
00181                   " gradient descent");
00182 
00183     declareOption(ol, "batch_size", &DeepBeliefNet::batch_size,
00184                   OptionBase::buildoption,
00185         "Training batch size (1=stochastic learning, 0=full batch learning).");
00186 
00187     /* NOT IMPLEMENTED YET
00188     declareOption(ol, "grad_weight_decay", &DeepBeliefNet::grad_weight_decay,
00189                   OptionBase::buildoption,
00190                   "The weight decay used during the gradient descent");
00191     */
00192 
00193     declareOption(ol, "n_classes", &DeepBeliefNet::n_classes,
00194                   OptionBase::buildoption,
00195                   "Number of classes in the training set:\n"
00196                   "  - 0 means we are doing regression,\n"
00197                   "  - 1 means we have two classes, but only one output,\n"
00198                   "  - 2 means we also have two classes, but two outputs"
00199                   " summing to 1,\n"
00200                   "  - >2 is the usual multiclass case.\n"
00201                   );
00202 
00203     declareOption(ol, "training_schedule", &DeepBeliefNet::training_schedule,
00204                   OptionBase::buildoption,
00205                   "Number of examples to use during each phase of learning:\n"
00206                   "first the greedy phases, and then the fine-tuning phase.\n"
00207                   "However, the learning will stop as soon as we reach nstages.\n"
00208                   "For example for 2 hidden layers, with 1000 examples in each\n"
00209                   "greedy phase, and 500 in the fine-tuning phase, this option\n"
00210                   "should be [1000 1000 500], and nstages should be at least 2500.\n"
00211                   "When online = true, this vector is ignored and should be empty.\n");
00212 
00213     declareOption(ol, "up_down_nstages", &DeepBeliefNet::up_down_nstages,
00214                   OptionBase::buildoption,
00215                   "Number of samples to use for unsupervised fine-tuning\n"
00216                   "with the up-down algorithm. The unsupervised fine-tuning will\n"
00217                   "be executed between the greedy layer-wise learning and the\n"
00218                   "supervised fine-tuning. The up-down algorithm only works for\n"
00219                   "RBMMatrixConnection connections.\n");
00220 
00221     declareOption(ol, "use_classification_cost",
00222                   &DeepBeliefNet::use_classification_cost,
00223                   OptionBase::buildoption,
00224                   "Put the class target as an extra input of the top-level RBM\n"
00225                   "and compute and maximize conditional class probability in that\n"
00226                   "top layer (probability of the correct class given the other input\n"
00227                   "of the top-level RBM, which is the output of the rest of the network.\n");
00228 
00229     declareOption(ol, "reconstruct_layerwise",
00230                   &DeepBeliefNet::reconstruct_layerwise,
00231                   OptionBase::buildoption,
00232                   "Compute reconstruction error of each layer as an auto-encoder.\n"
00233                   "This is done using cross-entropy between actual and reconstructed.\n"
00234                   "This option automatically adds the following cost names:\n"
00235                   "   layerwise_reconstruction_error (sum over all layers)\n"
00236                   "   layer0.reconstruction_error (only layers[0])\n"
00237                   "   layer1.reconstruction_error (only layers[1])\n"
00238                   "   etc.\n");
00239 
00240     declareOption(ol, "layers", &DeepBeliefNet::layers,
00241                   OptionBase::buildoption,
00242                   "The layers of units in the network (including the input layer).");
00243 
00244     declareOption(ol, "i_output_layer", &DeepBeliefNet::i_output_layer,
00245                   OptionBase::buildoption,
00246                   "The index of the layers from which you want to compute output"
00247                   "when there is NO final_module NEITHER final_cost."
00248                   "If -1, then the outputs (with this setting) will be"
00249                   "the expectations of the last layer.");
00250 
00251     declareOption(ol, "connections", &DeepBeliefNet::connections,
00252                   OptionBase::buildoption,
00253                   "The weights of the connections between the layers");
00254 
00255     declareOption(ol, "greedy_target_layers", &DeepBeliefNet::greedy_target_layers,
00256                   OptionBase::buildoption,
00257                   "Optional target layers for greedy layer-wise pretraining");
00258 
00259     declareOption(ol, "greedy_target_connections", &DeepBeliefNet::greedy_target_connections,
00260                   OptionBase::buildoption,
00261                   "Optional target matrix connections for greedy layer-wise pretraining");
00262 
00263     declareOption(ol, "learnerExpdir",
00264                   &DeepBeliefNet::learnerExpdir,
00265                   OptionBase::buildoption,
00266                   "Experiment directory where the learner will be save\n"
00267                   "if save_learner_before_fine_tuning is true."
00268         );
00269 
00270     declareOption(ol, "save_learner_before_fine_tuning",
00271                   &DeepBeliefNet::save_learner_before_fine_tuning,
00272                   OptionBase::buildoption,
00273                   "Saves the learner before the supervised fine-tuning."
00274         );
00275 
00276     declareOption(ol, "classification_module",
00277                   &DeepBeliefNet::classification_module,
00278                   OptionBase::learntoption,
00279                   "The module computing the class probabilities (if"
00280                   " use_classification_cost)\n"
00281                   );
00282 
00283     declareOption(ol, "classification_cost",
00284                   &DeepBeliefNet::classification_cost,
00285                   OptionBase::nosave,
00286                   "The module computing the classification cost function (NLL)"
00287                   " on top\n"
00288                   "of classification_module.\n"
00289                   );
00290 
00291     declareOption(ol, "joint_layer", &DeepBeliefNet::joint_layer,
00292                   OptionBase::nosave,
00293                   "Concatenation of layers[n_layers-2] and the target layer\n"
00294                   "(that is inside classification_module), if"
00295                   " use_classification_cost.\n"
00296                  );
00297 
00298     declareOption(ol, "final_module", &DeepBeliefNet::final_module,
00299                   OptionBase::buildoption,
00300                   "Optional module that takes as input the output of the last"
00301                   " layer\n"
00302                   "layers[n_layers-1), and its output is fed to final_cost,"
00303                   " and\n"
00304                   "concatenated with the one of classification_cost (if"
00305                   " present)\n"
00306                   "as output of the learner.\n"
00307                   "If it is not provided, then the last layer will directly be"
00308                   " put as\n"
00309                   "input of final_cost.\n"
00310                  );
00311 
00312     declareOption(ol, "final_cost", &DeepBeliefNet::final_cost,
00313                   OptionBase::buildoption,
00314                   "The cost function to be applied on top of the DBN (or of\n"
00315                   "final_module if provided). Its gradients will be"
00316                   " backpropagated\n"
00317                   "to final_module, then combined with the one of"
00318                   " classification_cost and\n"
00319                   "backpropagated to the layers.\n"
00320                   );
00321 
00322     declareOption(ol, "partial_costs", &DeepBeliefNet::partial_costs,
00323                   OptionBase::buildoption,
00324                   "The different cost functions to be applied on top of each"
00325                   " layer\n"
00326                   "(except the first one) of the RBM. These costs are not\n"
00327                   "back-propagated to previous layers.\n");
00328 
00329     declareOption(ol, "use_sample_for_up_layer", &DeepBeliefNet::use_sample_for_up_layer,
00330                   OptionBase::buildoption,
00331                   "Indication that the update of the top layer during CD uses\n"
00332                   "a sample, not the expectation.\n");
00333 
00334     declareOption(ol, "use_corrupted_posDownVal",
00335                   &DeepBeliefNet::use_corrupted_posDownVal,
00336                   OptionBase::buildoption,
00337                   "Indicates whether we will use a corrupted version of the\n"
00338                   "positive down value during the CD step.\n"
00339                   "Choose among:\n"
00340                   " - \"for_cd_fprop\"\n"
00341                   " - \"for_cd_update\"\n"
00342                   " - \"none\"\n");
00343 
00344     declareOption(ol, "noise_type",
00345                   &DeepBeliefNet::noise_type,
00346                   OptionBase::buildoption,
00347                   "Type of noise that corrupts the pos_down_val. "
00348                   "Choose among:\n"
00349                   " - \"masking_noise\"\n"
00350                   " - \"none\"\n");
00351 
00352     declareOption(ol, "fraction_of_masked_inputs",
00353                   &DeepBeliefNet::fraction_of_masked_inputs,
00354                   OptionBase::buildoption,
00355                   "Fraction of the pos_down_val components which\n"
00356                   "will be masked.\n");
00357 
00358     declareOption(ol, "mask_with_pepper_salt",
00359                   &DeepBeliefNet::mask_with_pepper_salt,
00360                   OptionBase::buildoption,
00361                   "Indication that inputs should be masked with "
00362                   "0 or 1 according to prob_salt_noise.\n");
00363 
00364     declareOption(ol, "prob_salt_noise",
00365                   &DeepBeliefNet::prob_salt_noise,
00366                   OptionBase::buildoption,
00367                   "Probability that we mask the input by 1 instead of 0.\n");
00368 
00369     declareOption(ol, "online", &DeepBeliefNet::online,
00370                   OptionBase::buildoption,
00371                   "If true then all unsupervised training stages (as well as\n"
00372                   "the fine-tuning stage) are done simultaneously.\n");
00373 
00374     declareOption(ol, "background_gibbs_update_ratio", &DeepBeliefNet::background_gibbs_update_ratio,
00375                   OptionBase::buildoption,
00376                   "Coefficient between 0 and 1. If non-zero, run a background Gibbs chain and use\n"
00377                   "the visible-hidden statistics to contribute in the negative phase update\n"
00378                   "(in proportion background_gibbs_update_ratio wrt the contrastive divergence\n"
00379                   "negative phase statistics). If = 1, then do not perform any contrastive\n"
00380                   "divergence negative phase (use only the Gibbs chain statistics).\n");
00381 
00382     declareOption(ol, "gibbs_chain_reinit_freq",
00383                   &DeepBeliefNet::gibbs_chain_reinit_freq,
00384                   OptionBase::buildoption,
00385                   "After how many training examples to re-initialize the Gibbs chains.\n"
00386                   "If == INT_MAX, the default value of this option, then NEVER\n"
00387                   "re-initialize except at the beginning, when stage==0.\n");
00388 
00389     declareOption(ol, "mean_field_contrastive_divergence_ratio",
00390                   &DeepBeliefNet::mean_field_contrastive_divergence_ratio,
00391                   OptionBase::buildoption,
00392                   "Coefficient between 0 and 1. 0 means CD-1 update only and\n"
00393                   "1 means MF-CD only. Values in between means a weighted\n" 
00394                   "combination of both.\n");
00395 
00396     declareOption(ol, "train_stats_window",
00397                   &DeepBeliefNet::train_stats_window,
00398                   OptionBase::buildoption,
00399                   "The number of samples to use to compute training stats.\n"
00400                   "-1 (default) means the number of training samples.\n");
00401 
00402     declareOption(ol, "top_layer_joint_cd", &DeepBeliefNet::top_layer_joint_cd,
00403                   OptionBase::buildoption,
00404                   "Wether we do a step of joint contrastive divergence on"
00405                   " top-layer.\n"
00406                   "Only used if online for the moment.\n");
00407 
00408     declareOption(ol, "n_layers", &DeepBeliefNet::n_layers,
00409                   OptionBase::learntoption,
00410                   "Number of layers");
00411 
00412     declareOption(ol, "minibatch_size", &DeepBeliefNet::minibatch_size,
00413                   OptionBase::learntoption,
00414                   "Actual size of a mini-batch (size of the training set if"
00415                   " batch_size==1).");
00416 
00417     declareOption(ol, "gibbs_down_state", &DeepBeliefNet::gibbs_down_state,
00418                   OptionBase::learntoption,
00419                   "State of visible units of RBMs at each layer in background"
00420                   " Gibbs chain.");
00421 
00422     declareOption(ol, "cumulative_training_time",
00423                   &DeepBeliefNet::cumulative_training_time,
00424                   OptionBase::learntoption | OptionBase::nosave,
00425                   "Cumulative training time since age=0, in seconds.\n");
00426 
00427     declareOption(ol, "cumulative_testing_time",
00428                   &DeepBeliefNet::cumulative_testing_time,
00429                   OptionBase::learntoption | OptionBase::nosave,
00430                   "Cumulative testing time since age=0, in seconds.\n");
00431 
00432     declareOption(ol, "up_down_stage", &DeepBeliefNet::up_down_stage,
00433                   OptionBase::learntoption,
00434                   "Number of samples visited so far during unsupervised\n"
00435                   "fine-tuning.\n");
00436 
00437     declareOption(ol, "generative_connections",
00438                   &DeepBeliefNet::generative_connections,
00439                   OptionBase::learntoption,
00440                   "The untied generative weights of the connections"
00441                   "between the layers\n"
00442                   "for the up-down algorithm.\n");
00443 
00444     // Now call the parent class' declareOptions
00445     inherited::declareOptions(ol);
00446 }
00447 
00449 // build_ //
00451 void DeepBeliefNet::build_()
00452 {
00453     PLASSERT( batch_size >= 0 );
00454 
00455     MODULE_LOG << "build_() called" << endl;
00456 
00457     // Initialize some learnt variables
00458     if (layers.isEmpty())
00459         PLERROR("In DeepBeliefNet::build_ - You must provide at least one RBM "
00460                 "layer through the 'layers' option");
00461     else
00462         n_layers = layers.length();
00463 
00464     if( i_output_layer < 0)
00465         i_output_layer = n_layers - 1;
00466 
00467     if( online && up_down_nstages > 0)
00468         PLERROR("In DeepBeliefNet::build_ - up-down algorithm not implemented "
00469             "for online setting.");
00470 
00471     if( batch_size != 1 && up_down_nstages > 0 )
00472         PLERROR("In DeepBeliefNet::build_ - up-down algorithm not implemented "
00473             "for minibatch setting.");
00474 
00475     if( mean_field_contrastive_divergence_ratio > 0 &&
00476         background_gibbs_update_ratio != 0 )
00477         PLERROR("In DeepBeliefNet::build_ - mean-field CD cannot be used "
00478                 "with background_gibbs_update_ratio != 0.");
00479 
00480     if( mean_field_contrastive_divergence_ratio > 0 &&
00481         use_sample_for_up_layer )
00482         PLERROR("In DeepBeliefNet::build_ - mean-field CD cannot be used "
00483                 "with use_sample_for_up_layer.");
00484 
00485     if( mean_field_contrastive_divergence_ratio < 0 ||
00486         mean_field_contrastive_divergence_ratio > 1 )
00487         PLERROR("In DeepBeliefNet::build_ - mean_field_contrastive_divergence_ratio should "
00488             "be in [0,1].");
00489 
00490     if( use_corrupted_posDownVal != "for_cd_fprop" &&
00491         use_corrupted_posDownVal != "for_cd_update" &&
00492         use_corrupted_posDownVal != "none" )
00493         PLERROR("In DeepBeliefNet::build_ - use_corrupted_posDownVal should "
00494             "be chosen among {\"for_cd_fprop\",\"for_cd_update\",\"none\"}.");
00495 
00496     if( !online )
00497     {
00498         if( training_schedule.length() != n_layers )
00499         {
00500             PLWARNING("In DeepBeliefNet::build_ - training_schedule.length() "
00501                     "!= n_layers, resizing and zeroing");
00502             training_schedule.resize( n_layers );
00503             training_schedule.fill( 0 );
00504         }
00505 
00506         cumulative_schedule.resize( n_layers+1 );
00507         cumulative_schedule[0] = 0;
00508         for( int i=0 ; i<n_layers ; i++ )
00509         {
00510             cumulative_schedule[i+1] = cumulative_schedule[i] +
00511                 training_schedule[i];
00512         }
00513     }
00514 
00515     build_layers_and_connections();
00516 
00517     // Activate the profiler
00518     Profiler::activate();
00519 
00520     build_costs();
00521 }
00522 
00524 // build_costs //
00526 void DeepBeliefNet::build_costs()
00527 {
00528     cost_names.resize(0);
00529     int current_index = 0;
00530 
00531     // build the classification module, its cost and the joint layer
00532     if( use_classification_cost )
00533     {
00534         PLASSERT( n_classes >= 2 );
00535         build_classification_cost();
00536 
00537         cost_names.append("NLL");
00538         nll_cost_index = current_index;
00539         current_index++;
00540 
00541         cost_names.append("class_error");
00542         class_cost_index = current_index;
00543         current_index++;
00544     }
00545 
00546     if( final_cost )
00547     {
00548         build_final_cost();
00549 
00550         TVec<string> final_names = final_cost->costNames();
00551         int n_final_costs = final_names.length();
00552 
00553         for( int i=0; i<n_final_costs; i++ )
00554             cost_names.append("final." + final_names[i]);
00555 
00556         final_cost_index = current_index;
00557         current_index += n_final_costs;
00558     }
00559 
00560     if( partial_costs )
00561     {
00562         int n_partial_costs = partial_costs.length();
00563         if( n_partial_costs != n_layers - 1)
00564             PLERROR("DeepBeliefNet::build_costs() - \n"
00565                     "partial_costs.length() (%d) != n_layers-1 (%d).\n",
00566                     n_partial_costs, n_layers-1);
00567         partial_costs_indices.resize(n_partial_costs);
00568 
00569         for( int i=0; i<n_partial_costs; i++ )
00570             if( partial_costs[i] )
00571             {
00572                 TVec<string> names = partial_costs[i]->costNames();
00573                 int n_partial_costs_i = names.length();
00574                 for( int j=0; j<n_partial_costs_i; j++ )
00575                     cost_names.append("partial"+tostring(i)+"."+names[j]);
00576                 partial_costs_indices[i] = current_index;
00577                 current_index += n_partial_costs_i;
00578 
00579                 // Share random_gen with partial_costs[i], unless it already
00580                 // has one
00581                 if( !(partial_costs[i]->random_gen) )
00582                 {
00583                     partial_costs[i]->random_gen = random_gen;
00584                     partial_costs[i]->forget();
00585                 }
00586             }
00587             else
00588                 partial_costs_indices[i] = -1;
00589     }
00590     else
00591         partial_costs_indices.resize(0);
00592 
00593     if( reconstruct_layerwise )
00594     {
00595         reconstruction_costs.resize(n_layers);
00596 
00597         cost_names.append("layerwise_reconstruction_error");
00598         reconstruction_cost_index = current_index;
00599         current_index++;
00600 
00601         for( int i=0; i<n_layers-1; i++ )
00602             cost_names.append("layer"+tostring(i)+".reconstruction_error");
00603         current_index += n_layers-1;
00604     }
00605     else
00606         reconstruction_costs.resize(0);
00607 
00608     if( !greedy_target_layers.isEmpty() )
00609     {
00610         greedy_target_layer_nlls_index = current_index;
00611         target_one_hot.resize(n_classes);
00612         for( int i=0; i<n_layers-1; i++ )
00613         {
00614             cost_names.append("layer"+tostring(i)+".nll");
00615             current_index++;
00616         }
00617     }
00618 
00619 
00620     cost_names.append("cpu_time");
00621     cost_names.append("cumulative_train_time");
00622     cost_names.append("cumulative_test_time");
00623 
00624     training_cpu_time_cost_index = current_index;
00625     current_index++;
00626     cumulative_training_time_cost_index = current_index;
00627     current_index++;
00628     cumulative_testing_time_cost_index = current_index;
00629     current_index++;
00630 
00631     PLASSERT( current_index == cost_names.length() );
00632 }
00633 
00635 // build_layers_and_connections //
00637 void DeepBeliefNet::build_layers_and_connections()
00638 {
00639     MODULE_LOG << "build_layers_and_connections() called" << endl;
00640 
00641     if( connections.length() != n_layers-1 )
00642         PLERROR("DeepBeliefNet::build_layers_and_connections() - \n"
00643                 "connections.length() (%d) != n_layers-1 (%d).\n",
00644                 connections.length(), n_layers-1);
00645 
00646     if( inputsize_ >= 0 )
00647         PLASSERT( layers[0]->size == inputsize() );
00648 
00649     activation_gradients.resize( n_layers );
00650     activations_gradients.resize( n_layers );
00651     expectation_gradients.resize( n_layers );
00652     expectations_gradients.resize( n_layers );
00653     gibbs_down_state.resize( n_layers-1 );
00654     expectation_indices.resize( n_layers-1 );
00655 
00656     for( int i=0 ; i<n_layers-1 ; i++ )
00657     {
00658         if( layers[i]->size != connections[i]->down_size )
00659             PLERROR("DeepBeliefNet::build_layers_and_connections() - \n"
00660                     "layers[%i]->size (%d) != connections[%i]->down_size (%d)."
00661                     "\n", i, layers[i]->size, i, connections[i]->down_size);
00662 
00663         if( connections[i]->up_size != layers[i+1]->size )
00664             PLERROR("DeepBeliefNet::build_layers_and_connections() - \n"
00665                     "connections[%i]->up_size (%d) != layers[%i]->size (%d)."
00666                     "\n", i, connections[i]->up_size, i+1, layers[i+1]->size);
00667 
00668         // Assign random_gen to layers[i] and connections[i], unless they
00669         // already have one
00670         if( !(layers[i]->random_gen) )
00671         {
00672             layers[i]->random_gen = random_gen;
00673             layers[i]->forget();
00674         }
00675         if( !(connections[i]->random_gen) )
00676         {
00677             connections[i]->random_gen = random_gen;
00678             connections[i]->forget();
00679         }
00680 
00681         activation_gradients[i].resize( layers[i]->size );
00682         expectation_gradients[i].resize( layers[i]->size );
00683 
00684 
00685         if( greedy_target_layers.length()>i && greedy_target_layers[i] )
00686         {
00687             if( use_classification_cost )
00688                 PLERROR("DeepBeliefNet::build_layers_and_connections() - \n"
00689                         "use_classification_cost not implemented for greedy_target_layers.");
00690 
00691             if( greedy_target_connections.length()>i && !greedy_target_connections[i] )
00692                 PLERROR("DeepBeliefNet::build_layers_and_connections() - \n"
00693                         "some greedy_target_connections are missing.");
00694 
00695             if( greedy_target_layers[i]->size != n_classes)
00696                 PLERROR("DeepBeliefNet::build_layers_and_connections() - \n"
00697                         "greedy_target_layers[%d] should be of size %d.",i,n_classes);
00698 
00699             if( greedy_target_connections[i]->down_size != n_classes ||
00700                 greedy_target_connections[i]->up_size != layers[i+1]->size )
00701                 PLERROR("DeepBeliefNet::build_layers_and_connections() - \n"
00702                         "greedy_target_connections[%d] should be of size (%d,%d).",
00703                         i,layers[i+1]->size,n_classes);
00704                 
00705             if( partial_costs.length() != 0 )
00706                 PLERROR("DeepBeliefNet::build_layers_and_connections() - \n"
00707                         "greedy_target_layers can't be used with partial_costs.");
00708                 
00709             greedy_target_expectations.resize(n_layers-1);
00710             greedy_target_activations.resize(n_layers-1);
00711             greedy_target_expectation_gradients.resize(n_layers-1);
00712             greedy_target_activation_gradients.resize(n_layers-1);
00713             greedy_target_probability_gradients.resize(n_layers-1);
00714 
00715             greedy_target_expectations[i].resize(n_classes);
00716             greedy_target_activations[i].resize(n_classes);
00717             greedy_target_expectation_gradients[i].resize(n_classes);
00718             greedy_target_activation_gradients[i].resize(n_classes);
00719             greedy_target_probability_gradients[i].resize(n_classes);
00720             for( int c=0; c<n_classes; c++) 
00721             {
00722                 greedy_target_expectations[i][c].resize(layers[i+1]->size);
00723                 greedy_target_activations[i][c].resize(layers[i+1]->size);
00724                 greedy_target_expectation_gradients[i][c].resize(layers[i+1]->size);
00725                 greedy_target_activation_gradients[i][c].resize(layers[i+1]->size);
00726             }
00727 
00728             greedy_joint_layers.resize(n_layers-1);
00729             PP<RBMMixedLayer> ml = new RBMMixedLayer();
00730             ml->sub_layers.resize(2);
00731             ml->sub_layers[0] = layers[ i ];
00732             ml->sub_layers[1] = greedy_target_layers[ i ];
00733             ml->random_gen = random_gen;
00734             ml->build();
00735             greedy_joint_layers[i] = (RBMMixedLayer *)ml;
00736 
00737             greedy_joint_connections.resize(n_layers-1);
00738             PP<RBMMixedConnection> mc = new RBMMixedConnection();
00739             mc->sub_connections.resize(1,2);
00740             mc->sub_connections(0,0) = connections[i];
00741             mc->sub_connections(0,1) = greedy_target_connections[i];
00742             mc->build();
00743             greedy_joint_connections[i] = (RBMMixedConnection *)mc;
00744 
00745             if( !(greedy_target_connections[i]->random_gen) )
00746             {
00747                 greedy_target_connections[i]->random_gen = random_gen;
00748                 greedy_target_connections[i]->forget();
00749             }
00750             if( !(greedy_target_layers[i]->random_gen) )
00751             {
00752                 greedy_target_layers[i]->random_gen = random_gen;
00753                 greedy_target_layers[i]->forget();
00754             }
00755         }
00756         if( use_corrupted_posDownVal != "none" )
00757         {
00758             if( greedy_target_layers.length() != 0 )
00759                 PLERROR("DeepBeliefNet::build_layers_and_connections() - \n"
00760                         "use_corrupted_posDownVal not implemented for greedy_target_layers.");
00761 
00762             if( online )
00763                 PLERROR("DeepBeliefNet::build_layers_and_connections() - \n"
00764                         "use_corrupted_posDownVal not implemented for online.");
00765 
00766             if( use_classification_cost )
00767                 PLERROR("DeepBeliefNet::build_layers_and_connections() - \n"
00768                         "use_classification_cost not implemented for use_corrupted_posDownVal.");
00769 
00770             if( background_gibbs_update_ratio != 0 )
00771                 PLERROR("DeepBeliefNet::build_layers_and_connections() - \n"
00772                         "use_corrupted_posDownVal not implemented with background_gibbs_update_ratio!=0.");
00773 
00774             if( batch_size != 1 || minibatch_hack )
00775                 PLERROR("DeepBeliefNet::build_layers_and_connections() - \n"
00776                         "use_corrupted_posDownVal not implemented for batch_size != 1 or minibatch_hack.");
00777     
00778             if( !partial_costs.isEmpty() )
00779                 PLERROR("DeepBeliefNet::build_layers_and_connections() - \n"
00780                         "use_corrupted_posDownVal not implemented for partial_costs.");
00781 
00782             if( noise_type == "masking_noise" && fraction_of_masked_inputs > 0 )
00783             {
00784                 expectation_indices[i].resize( layers[i]->size );
00785                 for( int j=0 ; j < expectation_indices[i].length() ; j++ )
00786                     expectation_indices[i][j] = j;
00787             }
00788         }
00789     }
00790     if( !(layers[n_layers-1]->random_gen) )
00791     {
00792         layers[n_layers-1]->random_gen = random_gen;
00793         layers[n_layers-1]->forget();
00794     }
00795     int last_layer_size = layers[n_layers-1]->size;
00796     PLASSERT_MSG(last_layer_size >= 0,
00797                  "Size of last layer must be non-negative");
00798     activation_gradients[n_layers-1].resize(last_layer_size);
00799     expectation_gradients[n_layers-1].resize(last_layer_size);
00800 }
00801 
00803 // build_classification_cost //
00805 void DeepBeliefNet::build_classification_cost()
00806 {
00807     MODULE_LOG << "build_classification_cost() called" << endl;
00808 
00809     PLASSERT_MSG(batch_size == 1, "DeepBeliefNet::build_classification_cost - "
00810             "This method has not been verified yet for minibatch "
00811             "compatibility");
00812 
00813     PP<RBMMatrixConnection> last_to_target;
00814     if (classification_module)
00815         last_to_target = classification_module->last_to_target;
00816     if (!last_to_target ||
00817          last_to_target->up_size != layers[n_layers-1]->size ||
00818          last_to_target->down_size != n_classes ||
00819          last_to_target->random_gen != random_gen)
00820     {
00821         // We need to (re-)create 'last_to_target', and thus the classification
00822         // module too.
00823         // This is not systematically done so that the learner can be
00824         // saved and loaded without losing learned parameters.
00825         last_to_target = new RBMMatrixConnection();
00826         last_to_target->up_size = layers[n_layers-1]->size;
00827         last_to_target->down_size = n_classes;
00828         last_to_target->random_gen = random_gen;
00829         last_to_target->build();
00830 
00831         PP<RBMMultinomialLayer> target_layer = new RBMMultinomialLayer();
00832         target_layer->size = n_classes;
00833         target_layer->random_gen = random_gen;
00834         target_layer->build();
00835 
00836         PLASSERT_MSG(n_layers >= 2, "You must specify at least two layers (the "
00837                 "input layer and one hidden layer)");
00838 
00839         classification_module = new RBMClassificationModule();
00840         classification_module->previous_to_last = connections[n_layers-2];
00841         classification_module->last_layer =
00842             (RBMBinomialLayer*) (RBMLayer*) layers[n_layers-1];
00843         classification_module->last_to_target = last_to_target;
00844         classification_module->target_layer = target_layer;
00845         classification_module->random_gen = random_gen;
00846         classification_module->build();
00847     }
00848 
00849     classification_cost = new NLLCostModule();
00850     classification_cost->input_size = n_classes;
00851     classification_cost->target_size = 1;
00852     classification_cost->build();
00853 
00854     joint_layer = new RBMMixedLayer();
00855     joint_layer->sub_layers.resize( 2 );
00856     joint_layer->sub_layers[0] = layers[ n_layers-2 ];
00857     joint_layer->sub_layers[1] = classification_module->target_layer;
00858     joint_layer->random_gen = random_gen;
00859     joint_layer->build();
00860 }
00861 
00863 // build_final_cost //
00865 void DeepBeliefNet::build_final_cost()
00866 {
00867     MODULE_LOG << "build_final_cost() called" << endl;
00868 
00869     PLASSERT_MSG(final_cost->input_size >= 0, "The input size of the final "
00870             "cost must be non-negative");
00871 
00872     final_cost_gradient.resize( final_cost->input_size );
00873     final_cost->setLearningRate( grad_learning_rate );
00874 
00875     if( final_module )
00876     {
00877         if( layers[n_layers-1]->size != final_module->input_size )
00878             PLERROR("DeepBeliefNet::build_final_cost() - "
00879                     "layers[%i]->size (%d) != final_module->input_size (%d)."
00880                     "\n", n_layers-1, layers[n_layers-1]->size,
00881                     final_module->input_size);
00882 
00883         if( final_module->output_size != final_cost->input_size )
00884             PLERROR("DeepBeliefNet::build_final_cost() - "
00885                     "final_module->output_size (%d) != final_cost->input_size (%d)."
00886                     "\n", final_module->output_size,
00887                     final_module->input_size);
00888 
00889         final_module->setLearningRate( grad_learning_rate );
00890 
00891         // Share random_gen with final_module, unless it already has one
00892         if( !(final_module->random_gen) )
00893         {
00894             final_module->random_gen = random_gen;
00895             final_module->forget();
00896         }
00897 
00898         // check target size and final_cost->input_size
00899         if( n_classes == 0 ) // regression
00900         {
00901             if( targetsize_ >= 0 && final_cost->input_size != targetsize() )
00902                 PLERROR("DeepBeliefNet::build_final_cost() - "
00903                     "final_cost->input_size (%d) != targetsize() (%d), "
00904                     "although we are doing regression (n_classes == 0).\n",
00905                     final_cost->input_size, targetsize());
00906         }
00907         else
00908         {
00909             if( final_cost->input_size != n_classes )
00910                 PLERROR("DeepBeliefNet::build_final_cost() - "
00911                     "final_cost->input_size (%d) != n_classes (%d), "
00912                     "although we are doing classification (n_classes != 0).\n",
00913                     final_cost->input_size, n_classes);
00914 
00915             if( targetsize_ >= 0 && targetsize() != 1 )
00916                 PLERROR("DeepBeliefNet::build_final_cost() - "
00917                     "targetsize() (%d) != 1, "
00918                     "although we are doing classification (n_classes != 0).\n",
00919                     targetsize());
00920         }
00921     }
00922     else
00923     {
00924         if( layers[n_layers-1]->size != final_cost->input_size )
00925             PLERROR("DeepBeliefNet::build_final_cost() - "
00926                     "layers[%i]->size (%d) != final_cost->input_size (%d)."
00927                     "\n", n_layers-1, layers[n_layers-1]->size,
00928                     final_cost->input_size);
00929     }
00930 
00931 
00932     // Share random_gen with final_cost, unless it already has one
00933     if( !(final_cost->random_gen) )
00934     {
00935         final_cost->random_gen = random_gen;
00936         final_cost->forget();
00937     }
00938 }
00939 
00941 // build //
00943 void DeepBeliefNet::build()
00944 {
00945     inherited::build();
00946     build_();
00947 }
00948 
00950 // makeDeepCopyFromShallowCopy //
00952 void DeepBeliefNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00953 {
00954     inherited::makeDeepCopyFromShallowCopy(copies);
00955 
00956     deepCopyField(training_schedule,        copies);
00957     deepCopyField(layers,                   copies);
00958     deepCopyField(connections,              copies);
00959     deepCopyField(greedy_target_layers,     copies);
00960     deepCopyField(greedy_target_connections,copies);
00961     deepCopyField(final_module,             copies);
00962     deepCopyField(final_cost,               copies);
00963     deepCopyField(partial_costs,            copies);
00964     deepCopyField(classification_module,    copies);
00965     deepCopyField(cost_names,               copies);
00966     deepCopyField(timer,                    copies);
00967     deepCopyField(classification_cost,      copies);
00968     deepCopyField(joint_layer,              copies);
00969     deepCopyField(activation_gradients,     copies);
00970     deepCopyField(activations_gradients,    copies);
00971     deepCopyField(expectation_gradients,    copies);
00972     deepCopyField(expectations_gradients,   copies);
00973     deepCopyField(greedy_target_expectations,copies);
00974     deepCopyField(greedy_target_activations, copies);
00975     deepCopyField(greedy_target_expectation_gradients,copies);
00976     deepCopyField(greedy_target_activation_gradients,copies);
00977     deepCopyField(greedy_target_probability_gradients,copies);
00978     deepCopyField(greedy_joint_layers   ,   copies);
00979     deepCopyField(greedy_joint_connections, copies);
00980     deepCopyField(final_cost_input,         copies);
00981     deepCopyField(final_cost_inputs,        copies);
00982     deepCopyField(final_cost_value,         copies);
00983     deepCopyField(final_cost_values,        copies);
00984     deepCopyField(final_cost_output,        copies);
00985     deepCopyField(class_output,             copies);
00986     deepCopyField(class_gradient,           copies);
00987     deepCopyField(final_cost_gradient,      copies);
00988     deepCopyField(final_cost_gradients,     copies);
00989     deepCopyField(save_layer_activation,    copies);
00990     deepCopyField(save_layer_expectation,   copies);
00991     deepCopyField(save_layer_activations,   copies);
00992     deepCopyField(save_layer_expectations,  copies);
00993     deepCopyField(pos_down_val,             copies);
00994     deepCopyField(corrupted_pos_down_val,   copies);
00995     deepCopyField(pos_up_val,               copies);
00996     deepCopyField(pos_down_vals,            copies);
00997     deepCopyField(pos_up_vals,              copies);
00998     deepCopyField(cd_neg_down_vals,         copies);
00999     deepCopyField(cd_neg_up_vals,           copies);
01000     deepCopyField(mf_cd_neg_down_vals,      copies);
01001     deepCopyField(mf_cd_neg_up_vals,        copies);
01002     deepCopyField(mf_cd_neg_down_val,       copies);
01003     deepCopyField(mf_cd_neg_up_val,         copies);
01004     deepCopyField(gibbs_down_state,         copies);
01005     deepCopyField(optimized_costs,          copies);
01006     deepCopyField(target_one_hot,           copies);
01007     deepCopyField(reconstruction_costs,     copies);
01008     deepCopyField(partial_costs_indices,    copies);
01009     deepCopyField(cumulative_schedule,      copies);
01010     deepCopyField(layer_input,              copies);
01011     deepCopyField(layer_inputs,             copies);
01012     deepCopyField(generative_connections,   copies);
01013     deepCopyField(up_sample,                copies);
01014     deepCopyField(down_sample,              copies);
01015     deepCopyField(expectation_indices,      copies);
01016 }
01017 
01018 
01020 // outputsize //
01022 int DeepBeliefNet::outputsize() const
01023 {
01024     int out_size = 0;
01025     if( use_classification_cost )
01026         out_size += n_classes;
01027 
01028     if( final_module )
01029         out_size += final_module->output_size;
01030 
01031     if( !use_classification_cost && !final_module )
01032         out_size += layers[i_output_layer]->size;
01033 
01034     return out_size;
01035 }
01036 
01038 // forget //
01040 void DeepBeliefNet::forget()
01041 {
01042     inherited::forget();
01043 
01044     for( int i=0 ; i<n_layers ; i++ )
01045         layers[i]->forget();
01046 
01047     for( int i=0 ; i<n_layers-1 ; i++ )
01048         connections[i]->forget();
01049 
01050     if( use_classification_cost )
01051     {
01052         classification_cost->forget();
01053         classification_module->forget();
01054     }
01055 
01056     if( final_module )
01057         final_module->forget();
01058 
01059     if( final_cost )
01060         final_cost->forget();
01061 
01062     if( !partial_costs.isEmpty() )
01063         for( int i=0 ; i<n_layers-1 ; i++ )
01064             if( partial_costs[i] )
01065                 partial_costs[i]->forget();
01066 
01067     for( int i=0 ; i<generative_connections.length() ; i++ )
01068         generative_connections[i]->forget();
01069 
01070     for( int i=0; i<greedy_target_connections.length(); i++ )
01071         greedy_target_connections[i]->forget();
01072 
01073     for( int i=0; i<greedy_target_layers.length(); i++ )
01074         greedy_target_layers[i]->forget();
01075 
01076     cumulative_training_time = 0;
01077     cumulative_testing_time = 0;
01078     up_down_stage = 0;
01079 }
01080 
01082 // train //
01084 void DeepBeliefNet::train()
01085 {
01086     MODULE_LOG << "train() called " << endl;
01087 
01088     if (!online)
01089     {
01090         // Enforce value of cumulative_schedule because build_() might
01091         // not be called if we change training_schedule inside a HyperLearner
01092         for( int i=0 ; i<n_layers ; i++ )
01093             cumulative_schedule[i+1] = cumulative_schedule[i] +
01094                 training_schedule[i];
01095     }
01096 
01097     MODULE_LOG << "  training_schedule = " << training_schedule << endl;
01098     MODULE_LOG << "  cumulative_schedule = " << cumulative_schedule << endl;
01099     MODULE_LOG << "stage = " << stage
01100         << ", target nstages = " << nstages << endl;
01101 
01102     PLASSERT( train_set );
01103     int n_train_stats_samples = (train_stats_window >= 0)
01104         ? train_stats_window
01105         : train_set->length();
01106 
01107     // Training set-dependent initialization.
01108     minibatch_size = batch_size > 0 ? batch_size : train_set->length();
01109     for (int i = 0 ; i < n_layers; i++)
01110     {
01111         activations_gradients[i].resize(minibatch_size, layers[i]->size);
01112         expectations_gradients[i].resize(minibatch_size, layers[i]->size);
01113 
01114         if (background_gibbs_update_ratio>0 && i<n_layers-1)
01115             gibbs_down_state[i].resize(minibatch_size, layers[i]->size);
01116     }
01117     if (final_cost)
01118         final_cost_gradients.resize(minibatch_size, final_cost->input_size);
01119     optimized_costs.resize(minibatch_size);
01120 
01121     Vec input( inputsize() );
01122     Vec target( targetsize() );
01123     real weight; // unused
01124     Mat inputs(minibatch_size, inputsize());
01125     Mat targets(minibatch_size, targetsize());
01126     Vec weights;
01127 
01128     TVec<string> train_cost_names = getTrainCostNames() ;
01129     Vec train_costs( train_cost_names.length() );
01130     Mat train_costs_m(minibatch_size, train_cost_names.length());
01131     train_costs.fill(MISSING_VALUE) ;
01132     train_costs_m.fill(MISSING_VALUE);
01133 
01134     int nsamples = train_set->length();
01135 
01136     if( !initTrain() )
01137     {
01138         MODULE_LOG << "train() aborted" << endl;
01139         return;
01140     }
01141 
01142     PP<ProgressBar> pb;
01143 
01144     // Start the actual time counting
01145     Profiler::reset("training");
01146     Profiler::start("training");
01147 
01148     // clear stats of previous epoch
01149     train_stats->forget();
01150 
01151     if (online)
01152     {
01153         // Train all layers simultaneously AND fine-tuning as well!
01154         int init_stage = stage;
01155         if( report_progress && stage < nstages )
01156             pb = new ProgressBar( "Training "+classname(),
01157                                   nstages - init_stage );
01158 
01159         setLearningRate( grad_learning_rate );
01160         train_stats->forget();
01161 
01162         for( ; stage < nstages; stage++)
01163         {
01164             initialize_gibbs_chain=(stage%gibbs_chain_reinit_freq==0);
01165 
01166             // Do a step every 'minibatch_size' examples.
01167             if (stage % minibatch_size == 0)
01168             {
01169                 int sample_start = stage % nsamples;
01170                 if( !fast_exact_is_equal( grad_decrease_ct, 0. ) )
01171                     setLearningRate( grad_learning_rate
01172                                      / (1. + grad_decrease_ct * stage ));
01173 
01174                 if (minibatch_size > 1 || minibatch_hack)
01175                 {
01176                     train_set->getExamples(sample_start, minibatch_size,
01177                                            inputs, targets, weights, NULL, true);
01178                     train_costs_m.fill(MISSING_VALUE);
01179 
01180                     if (reconstruct_layerwise)
01181                         train_costs_m.column(reconstruction_cost_index).clear();
01182 
01183                     onlineStep( inputs, targets, train_costs_m );
01184                 }
01185                 else
01186                 {
01187                     train_set->getExample(sample_start, input, target, weight);
01188                     onlineStep( input, target, train_costs );
01189                 }
01190 
01191                 // Update stats if we are in the last n_train_stats_samples
01192                 if (stage >= nstages - n_train_stats_samples){
01193                     if (minibatch_size > 1 || minibatch_hack)
01194                         for (int k = 0; k < minibatch_size; k++)
01195                             train_stats->update(train_costs_m(k));
01196                     else
01197                         train_stats->update(train_costs);
01198                 }
01199             }
01200 
01201             if( pb )
01202                 pb->update( stage - init_stage + 1 );
01203         }
01204     }
01205     else // Greedy learning, one layer at a time.
01206     {
01207         /***** initial greedy training *****/
01208         for( int i=0 ; i<n_layers-1 ; i++ )
01209         {
01210             if( use_classification_cost && i == n_layers-2 )
01211                 break; // we will do a joint supervised learning instead
01212 
01213             int end_stage = min(cumulative_schedule[i+1], nstages);
01214             if( stage >= end_stage )
01215                 continue;
01216 
01217             MODULE_LOG << "Training connection weights between layers " << i
01218                        << " and " << i+1 << endl;
01219             MODULE_LOG << "  stage = " << stage << endl;
01220             MODULE_LOG << "  end_stage = " << end_stage << endl;
01221             MODULE_LOG << "  cd_learning_rate = " << cd_learning_rate << endl;
01222 
01223             if( report_progress )
01224                 pb = new ProgressBar( "Training layer "+tostring(i)
01225                                       +" of "+classname(),
01226                                       end_stage - stage );
01227 
01228             layers[i]->setLearningRate( cd_learning_rate );
01229             connections[i]->setLearningRate( cd_learning_rate );
01230             layers[i+1]->setLearningRate( cd_learning_rate );
01231 
01232             if( greedy_target_layers.length() && greedy_target_layers[i] )
01233                 greedy_target_layers[i]->setLearningRate( cd_learning_rate );
01234             if( greedy_target_connections.length() && greedy_target_connections[i] )
01235                 greedy_target_connections[i]->setLearningRate( cd_learning_rate );
01236             if( greedy_joint_layers.length() && greedy_joint_layers[i] )
01237                 greedy_joint_layers[i]->setLearningRate( cd_learning_rate );
01238             if( greedy_joint_connections.length() && greedy_joint_connections[i] )
01239                 greedy_joint_connections[i]->setLearningRate( cd_learning_rate );
01240 
01241             for( ; stage<end_stage ; stage++ )
01242             {
01243                 if( !fast_exact_is_equal( cd_decrease_ct, 0. ) )
01244                 {
01245                     real lr = cd_learning_rate
01246                         / (1. + cd_decrease_ct *
01247                            (stage - cumulative_schedule[i]));
01248 
01249                     layers[i]->setLearningRate( lr );
01250                     connections[i]->setLearningRate( lr );
01251                     layers[i+1]->setLearningRate( lr );
01252                     if( greedy_target_layers.length() && greedy_target_layers[i] )
01253                         greedy_target_layers[i]->setLearningRate( lr );
01254                     if( greedy_target_connections.length() && greedy_target_connections[i] )
01255                         greedy_target_connections[i]->setLearningRate( lr );
01256                     if( greedy_joint_layers.length() && greedy_joint_layers[i] )
01257                         greedy_joint_layers[i]->setLearningRate( lr );
01258                     if( greedy_joint_connections.length() && greedy_joint_connections[i] )
01259                         greedy_joint_connections[i]->setLearningRate( lr );
01260                 }
01261 
01262                 initialize_gibbs_chain=(stage%gibbs_chain_reinit_freq==0);
01263                 // Do a step every 'minibatch_size' examples.
01264                 if (stage % minibatch_size == 0) {
01265                     int sample_start = stage % nsamples;
01266                     if (minibatch_size > 1 || minibatch_hack) {
01267                         train_set->getExamples(sample_start, minibatch_size,
01268                                 inputs, targets, weights, NULL, true);
01269                         train_costs_m.fill(MISSING_VALUE);
01270                         if (reconstruct_layerwise)
01271                             train_costs_m.column(reconstruction_cost_index).clear();
01272                         greedyStep( inputs, targets, i , train_costs_m);
01273                         for (int k = 0; k < minibatch_size; k++)
01274                             train_stats->update(train_costs_m(k));
01275                     } else {
01276                         train_set->getExample(sample_start, input, target, weight);
01277                         greedyStep( input, target, i );
01278                     }
01279                 }
01280                 if( pb )
01281                     pb->update( stage - cumulative_schedule[i] + 1 );
01282             }
01283         }
01284 
01285         // possible supervised part
01286         int end_stage = min(cumulative_schedule[n_layers-1], nstages);
01287         if( use_classification_cost && (stage < end_stage) )
01288         {
01289             PLASSERT_MSG(batch_size == 1, "'use_classification_cost' code not "
01290                     "verified with mini-batch learning yet");
01291 
01292             MODULE_LOG << "Training the classification module" << endl;
01293             MODULE_LOG << "  stage = " << stage << endl;
01294             MODULE_LOG << "  end_stage = " << end_stage << endl;
01295             MODULE_LOG << "  cd_learning_rate = " << cd_learning_rate << endl;
01296 
01297             if( report_progress )
01298                 pb = new ProgressBar( "Training the classification module",
01299                                       end_stage - stage );
01300 
01301             // set appropriate learning rate
01302             joint_layer->setLearningRate( cd_learning_rate );
01303             classification_module->joint_connection->setLearningRate(
01304                 cd_learning_rate );
01305             layers[ n_layers-1 ]->setLearningRate( cd_learning_rate );
01306 
01307             int previous_stage = cumulative_schedule[n_layers-2];
01308             for( ; stage<end_stage ; stage++ )
01309             {
01310                 if( !fast_exact_is_equal( cd_decrease_ct, 0. ) )
01311                 {
01312                     real lr = cd_learning_rate /
01313                         (1. + cd_decrease_ct *
01314                          (stage - cumulative_schedule[n_layers-2]));
01315                     joint_layer->setLearningRate( lr );
01316                     classification_module->joint_connection->setLearningRate( lr );
01317                     layers[n_layers-1]->setLearningRate( lr );
01318                 }
01319                 initialize_gibbs_chain=(stage%gibbs_chain_reinit_freq==0);
01320                 int sample = stage % nsamples;
01321                 train_set->getExample( sample, input, target, weight );
01322                 jointGreedyStep( input, target );
01323 
01324                 if( pb )
01325                     pb->update( stage - previous_stage + 1 );
01326             }
01327         }
01328 
01329         if( up_down_stage < up_down_nstages )
01330         {
01331 
01332             if( up_down_stage == 0 )
01333             {
01334                 // Untie weights
01335                 generative_connections.resize(connections.length()-1);
01336                 PP<RBMMatrixConnection> w;
01337                 RBMMatrixTransposeConnection* wt;
01338                 for(int c=0; c<generative_connections.length(); c++)
01339                 {
01340                     CopiesMap map;
01341                     w = dynamic_cast<RBMMatrixConnection*>((RBMConnection*) connections[c]->deepCopy(map));
01342                     wt = new RBMMatrixTransposeConnection();
01343                     wt->rbm_matrix_connection = w;
01344                     wt->build();
01345                     generative_connections[c] = wt;
01346                 }
01347 
01348                 up_sample.resize(n_layers);
01349                 down_sample.resize(n_layers);
01350                 
01351                 for( int i=0 ; i<n_layers ; i++ )
01352                 {
01353                     up_sample[i].resize(layers[i]->size);
01354                     down_sample[i].resize(layers[i]->size);
01355                 }
01356             }
01357             /***** up-down algorithm *****/
01358             MODULE_LOG << "Up-down gradient descent algorithm" << endl;
01359             MODULE_LOG << "  up_down_stage = " << up_down_stage << endl;
01360             MODULE_LOG << "  up_down_nstages = " << up_down_nstages << endl;
01361             MODULE_LOG << "  up_down_learning_rate = " << up_down_learning_rate << endl;
01362 
01363             int init_stage = up_down_stage;
01364             if( report_progress )
01365                 pb = new ProgressBar( "Up-down gradient descent algorithm "
01366                                       + classname(),
01367                                       up_down_nstages - init_stage );
01368 
01369             setLearningRate( up_down_learning_rate );
01370 
01371             train_stats->forget();
01372             int sample_start;
01373             for( ; up_down_stage<up_down_nstages ; up_down_stage++ )
01374             {
01375                 sample_start = up_down_stage % nsamples;
01376                 if( !fast_exact_is_equal( up_down_decrease_ct, 0. ) )
01377                     setLearningRate( up_down_learning_rate
01378                                      / (1. + up_down_decrease_ct *
01379                                         up_down_stage) );
01380 
01381                 train_set->getExample( sample_start, input, target, weight );
01382                 upDownStep( input, target, train_costs );
01383                 train_stats->update( train_costs );
01384 
01385                 if( pb )
01386                     pb->update( up_down_stage - init_stage + 1 );
01387             }
01388         }
01389 
01390         if( save_learner_before_fine_tuning )
01391         {
01392             if( learnerExpdir == "" )
01393                 PLWARNING("DeepBeliefNet::train() - \n"
01394                     "cannot save model before fine-tuning because\n"
01395                     "no experiment directory has been set.");
01396             else
01397                 PLearn::save(learnerExpdir + "/learner_before_finetuning.psave",*this);
01398         }
01399 
01400         /***** fine-tuning by gradient descent *****/
01401         end_stage = min(cumulative_schedule[n_layers], nstages);
01402         if( stage >= end_stage )
01403             return;
01404         MODULE_LOG << "Fine-tuning all parameters, by gradient descent" << endl;
01405         MODULE_LOG << "  stage = " << stage << endl;
01406         MODULE_LOG << "  end_stage = " << end_stage << endl;
01407         MODULE_LOG << "  grad_learning_rate = " << grad_learning_rate << endl;
01408 
01409         int init_stage = stage;
01410         if( report_progress )
01411             pb = new ProgressBar( "Fine-tuning parameters of all layers of "
01412                                   + classname(),
01413                                   end_stage - init_stage );
01414 
01415         setLearningRate( grad_learning_rate );
01416         train_stats->forget();
01417 
01418         for( ; stage < end_stage; stage++)
01419         {
01420             if (stage % minibatch_size == 0)
01421             {
01422                 int sample_start = stage % nsamples;
01423 
01424                 if( !fast_exact_is_equal( grad_decrease_ct, 0. ) )
01425                     setLearningRate( grad_learning_rate
01426                             / (1. + grad_decrease_ct *
01427                                (stage - cumulative_schedule[n_layers-1])) );
01428 
01429                 if (minibatch_size > 1 || minibatch_hack)
01430                 {
01431                     train_set->getExamples(sample_start, minibatch_size, inputs,
01432                             targets, weights, NULL, true);
01433                     train_costs_m.fill(MISSING_VALUE);
01434                     fineTuningStep(inputs, targets, train_costs_m);
01435                 }
01436                 else
01437                 {
01438                     train_set->getExample( sample_start, input, target, weight );
01439                     fineTuningStep( input, target, train_costs );
01440                 }
01441 
01442                 // Update stats if we are in the last n_train_stats_samples samples
01443                 if (stage >= end_stage - n_train_stats_samples){
01444                     if (minibatch_size > 1 || minibatch_hack)
01445                         for (int k = 0; k < minibatch_size; k++)
01446                             train_stats->update(train_costs_m(k));
01447                     else
01448                         train_stats->update(train_costs);
01449                 }
01450             }
01451 
01452             if( pb )
01453                 pb->update( stage - init_stage + 1 );
01454         }
01455     }
01456 
01457     Profiler::end("training");
01458     // The report is pretty informative and therefore quite verbose.
01459     if (verbosity > 1)
01460         Profiler::report(cout);
01461 
01462     const Profiler::Stats& stats = Profiler::getStats("training");
01463     real ticksPerSec = Profiler::ticksPerSecond();
01464     real cpu_time = (stats.user_duration+stats.system_duration)/ticksPerSec;
01465     cumulative_training_time += cpu_time;
01466 
01467     if (verbosity > 1)
01468         cout << "The cumulative time spent in train() up until now is " << cumulative_training_time << " cpu seconds" << endl;
01469 
01470     train_costs.fill(MISSING_VALUE);
01471     train_costs[training_cpu_time_cost_index] = cpu_time;
01472     train_costs[cumulative_training_time_cost_index] = cumulative_training_time;
01473     train_stats->update( train_costs );
01474     train_stats->finalize();
01475 }
01476 
01478 // onlineStep //
01480 void DeepBeliefNet::onlineStep(const Vec& input, const Vec& target,
01481                                Vec& train_costs)
01482 {
01483     real lr;
01484     PLASSERT(batch_size == 1);
01485 
01486     if( greedy_target_layers.length() )
01487         PLERROR("In DeepBeliefNet::onlineStep(): greedy_target_layers not implemented\n"
01488                 "for online setting");
01489 
01490     TVec<Vec> cost;
01491     if (!partial_costs.isEmpty())
01492         cost.resize(n_layers-1);
01493 
01494     layers[0]->expectation << input;
01495     // FORWARD PHASE
01496     //Vec layer_input;
01497     for( int i=0 ; i<n_layers-1 ; i++ )
01498     {
01499         // mean-field fprop from layer i to layer i+1
01500         connections[i]->setAsDownInput( layers[i]->expectation );
01501         // this does the actual matrix-vector computation
01502         layers[i+1]->getAllActivations( connections[i] );
01503         layers[i+1]->computeExpectation();
01504 
01505         // propagate into local cost associated to output of layer i+1
01506         if( !partial_costs.isEmpty() && partial_costs[ i ] )
01507         {
01508             partial_costs[ i ]->fprop( layers[ i+1 ]->expectation,
01509                                        target, cost[i] );
01510 
01511             // Backward pass
01512             // first time we set these gradients: do not accumulate
01513             partial_costs[ i ]->bpropUpdate( layers[ i+1 ]->expectation,
01514                                              target, cost[i][0],
01515                                              expectation_gradients[ i+1 ] );
01516 
01517             train_costs.subVec(partial_costs_indices[i], cost[i].length())
01518                 << cost[i];
01519         }
01520         else
01521             expectation_gradients[i+1].clear();
01522     }
01523 
01524     // top layer may be connected to a final_module followed by a
01525     // final_cost and / or may be used to predict class probabilities
01526     // through a joint classification_module
01527 
01528     if ( final_cost )
01529     {
01530         if( final_module )
01531         {
01532                 final_module->fprop( layers[ n_layers-1 ]->expectation,
01533                         final_cost_input );
01534                 final_cost->fprop( final_cost_input, target,
01535                         final_cost_value );
01536                 final_cost->bpropUpdate( final_cost_input, target,
01537                         final_cost_value[0],
01538                         final_cost_gradient );
01539 
01540                 final_module->bpropUpdate(
01541                         layers[ n_layers-1 ]->expectation,
01542                         final_cost_input,
01543                         expectation_gradients[ n_layers-1 ],
01544                         final_cost_gradient, true );
01545         }
01546         else
01547         {
01548                 final_cost->fprop( layers[ n_layers-1 ]->expectation,
01549                         target,
01550                         final_cost_value );
01551                 final_cost->bpropUpdate( layers[ n_layers-1 ]->expectation,
01552                         target, final_cost_value[0],
01553                         expectation_gradients[n_layers-1],
01554                         true);
01555         }
01556 
01557         train_costs.subVec(final_cost_index, final_cost_value.length())
01558             << final_cost_value;
01559     }
01560 
01561     if (final_cost || (!partial_costs.isEmpty() && partial_costs[n_layers-2]))
01562     {
01563         if( !fast_exact_is_equal( grad_decrease_ct, 0. ) )
01564             lr = grad_learning_rate / (1. + grad_decrease_ct * stage );
01565         else
01566             lr = grad_learning_rate;
01567 
01568         layers[n_layers-1]->setLearningRate( lr );
01569         connections[n_layers-2]->setLearningRate( lr );
01570 
01571         layers[ n_layers-1 ]->bpropUpdate( layers[ n_layers-1 ]->activation,
01572                                            layers[ n_layers-1 ]->expectation,
01573                                            activation_gradients[ n_layers-1 ],
01574                                            expectation_gradients[ n_layers-1 ],
01575                                            false);
01576 
01577         connections[ n_layers-2 ]->bpropUpdate(
01578             layers[ n_layers-2 ]->expectation,
01579             layers[ n_layers-1 ]->activation,
01580             expectation_gradients[ n_layers-2 ],
01581             activation_gradients[ n_layers-1 ],
01582             true);
01583         // accumulate into expectation_gradients[n_layers-2]
01584         // because a partial cost may have already put a gradient there
01585     }
01586 
01587     if( use_classification_cost )
01588     {
01589         classification_module->fprop( layers[ n_layers-2 ]->expectation,
01590                                       class_output );
01591         real nll_cost;
01592 
01593         // This doesn't work. gcc bug?
01594         // classification_cost->fprop( class_output, target, cost );
01595         classification_cost->CostModule::fprop( class_output, target,
01596                                                 nll_cost );
01597 
01598         real class_error =
01599             ( argmax(class_output) == (int) round(target[0]) ) ? 0: 1;
01600 
01601         train_costs[nll_cost_index] = nll_cost;
01602         train_costs[class_cost_index] = class_error;
01603 
01604         classification_cost->bpropUpdate( class_output, target, nll_cost,
01605                                           class_gradient );
01606 
01607         classification_module->bpropUpdate( layers[ n_layers-2 ]->expectation,
01608                                             class_output,
01609                                             expectation_gradients[n_layers-2],
01610                                             class_gradient,
01611                                             true );
01612         if( top_layer_joint_cd )
01613         {
01614             // set the input of the joint layer
01615             Vec target_exp = classification_module->target_layer->expectation;
01616             fill_one_hot( target_exp, (int) round(target[0]), real(0.), real(1.) );
01617 
01618             if( !fast_exact_is_equal( cd_decrease_ct, 0. ) )
01619                 lr = cd_learning_rate / (1. + cd_decrease_ct * stage );
01620             else
01621                 lr = cd_learning_rate;
01622 
01623             joint_layer->setLearningRate( lr );
01624             layers[ n_layers-1 ]->setLearningRate( lr );
01625             classification_module->joint_connection->setLearningRate( lr );
01626 
01627             save_layer_activation.resize(layers[ n_layers-2 ]->size);
01628             save_layer_activation << layers[ n_layers-2 ]->activation;
01629             save_layer_expectation.resize(layers[ n_layers-2 ]->size);
01630             save_layer_expectation << layers[ n_layers-2 ]->expectation;
01631 
01632             contrastiveDivergenceStep(
01633                 get_pointer(joint_layer),
01634                 get_pointer(classification_module->joint_connection),
01635                 layers[ n_layers-1 ], n_layers-2);
01636 
01637             layers[ n_layers-2 ]->activation << save_layer_activation;
01638             layers[ n_layers-2 ]->expectation << save_layer_expectation;
01639         }
01640     }
01641 
01642     // DOWNWARD PHASE (the downward phase for top layer is already done above,
01643     // except for the contrastive divergence step in the case where either
01644     // 'use_classification_cost' or 'top_layer_joint_cd' is false).
01645     for( int i=n_layers-2 ; i>=0 ; i-- )
01646     {
01647         if (i <= n_layers - 3) {
01648             if( !fast_exact_is_equal( grad_decrease_ct, 0. ) )
01649                 lr = grad_learning_rate / (1. + grad_decrease_ct * stage );
01650             else
01651                 lr = grad_learning_rate;
01652 
01653             connections[ i ]->setLearningRate( lr );
01654             layers[ i+1 ]->setLearningRate( lr );
01655 
01656 
01657             layers[i+1]->bpropUpdate( layers[i+1]->activation,
01658                                       layers[i+1]->expectation,
01659                                       activation_gradients[i+1],
01660                                       expectation_gradients[i+1] );
01661 
01662             connections[i]->bpropUpdate( layers[i]->expectation,
01663                                          layers[i+1]->activation,
01664                                          expectation_gradients[i],
01665                                          activation_gradients[i+1],
01666                                          true);
01667         }
01668 
01669         if (i <= n_layers - 3 || !use_classification_cost ||
01670             !top_layer_joint_cd) {
01671 
01672             // N.B. the contrastiveDivergenceStep changes the activation and
01673             // expectation fields of top layer of the RBM, so it must be
01674             // done last
01675             if( !fast_exact_is_equal( cd_decrease_ct, 0. ) )
01676                 lr = cd_learning_rate / (1. + cd_decrease_ct * stage );
01677             else
01678                 lr = cd_learning_rate;
01679 
01680             layers[i]->setLearningRate( lr );
01681             layers[i+1]->setLearningRate( lr );
01682             connections[i]->setLearningRate( lr );
01683 
01684             if( i > 0 )
01685             {
01686                 save_layer_activation.resize(layers[i]->size);
01687                 save_layer_activation << layers[i]->activation;
01688                 save_layer_expectation.resize(layers[i]->size);
01689                 save_layer_expectation << layers[i]->expectation;
01690             }
01691             contrastiveDivergenceStep( layers[ i ],
01692                                        connections[ i ],
01693                                        layers[ i+1 ] ,
01694                                        i, true);
01695             if( i > 0 )
01696             {
01697                 layers[i]->activation << save_layer_activation;
01698                 layers[i]->expectation << save_layer_expectation;
01699             }
01700         }
01701     }
01702 }
01703 
01704 void DeepBeliefNet::onlineStep(const Mat& inputs, const Mat& targets,
01705                                Mat& train_costs)
01706 {
01707     real lr;
01708     // TODO Can we avoid this memory allocation?
01709     TVec<Mat> cost;
01710     Vec optimized_cost(inputs.length());
01711     if (partial_costs) {
01712         cost.resize(n_layers-1);
01713     }
01714 
01715     if( greedy_target_layers.length() )
01716         PLERROR("In DeepBeliefNet::onlineStep(): greedy_target_layers not implemented\n"
01717                 "for online setting");
01718 
01719     layers[0]->setExpectations(inputs);
01720     // FORWARD PHASE
01721     //Vec layer_input;
01722     for( int i=0 ; i<n_layers-1 ; i++ )
01723     {
01724         // mean-field fprop from layer i to layer i+1
01725         connections[i]->setAsDownInputs( layers[i]->getExpectations() );
01726         // this does the actual matrix-vector computation
01727         layers[i+1]->getAllActivations( connections[i], 0, true );
01728         layers[i+1]->computeExpectations();
01729 
01730         // propagate into local cost associated to output of layer i+1
01731         if( partial_costs && partial_costs[ i ] )
01732         {
01733             partial_costs[ i ]->fprop( layers[ i+1 ]->getExpectations(),
01734                                        targets, cost[i] );
01735 
01736             // Backward pass
01737             // first time we set these gradients: do not accumulate
01738             optimized_cost << cost[i].column(0); // TODO Can we optimize?
01739             partial_costs[ i ]->bpropUpdate( layers[ i+1 ]->getExpectations(),
01740                                              targets, optimized_cost,
01741                                              expectations_gradients[ i+1 ] );
01742 
01743             train_costs.subMatColumns(partial_costs_indices[i], cost[i].width())
01744                 << cost[i];
01745         }
01746         else
01747             expectations_gradients[i+1].clear();
01748     }
01749 
01750     // top layer may be connected to a final_module followed by a
01751     // final_cost and / or may be used to predict class probabilities
01752     // through a joint classification_module
01753 
01754     if ( final_cost )
01755     {
01756         if( final_module )
01757         {
01758                 final_module->fprop( layers[ n_layers-1 ]->getExpectations(),
01759                         final_cost_inputs );
01760                 final_cost->fprop( final_cost_inputs, targets,
01761                         final_cost_values );
01762                 optimized_cost << final_cost_values.column(0); // TODO optimize
01763                 final_cost->bpropUpdate( final_cost_inputs, targets,
01764                         optimized_cost,
01765                         final_cost_gradients );
01766 
01767                 final_module->bpropUpdate(
01768                         layers[ n_layers-1 ]->getExpectations(),
01769                         final_cost_inputs,
01770                         expectations_gradients[ n_layers-1 ],
01771                         final_cost_gradients, true );
01772         }
01773         else
01774         {
01775                 final_cost->fprop( layers[ n_layers-1 ]->getExpectations(),
01776                         targets,
01777                         final_cost_values );
01778                 optimized_cost << final_cost_values.column(0); // TODO optimize
01779                 final_cost->bpropUpdate( layers[n_layers-1]->getExpectations(),
01780                         targets, optimized_cost,
01781                         expectations_gradients[n_layers-1],
01782                         true);
01783         }
01784 
01785         train_costs.subMatColumns(final_cost_index, final_cost_values.width())
01786             << final_cost_values;
01787     }
01788 
01789     if (final_cost || (!partial_costs.isEmpty() && partial_costs[n_layers-2]))
01790     {
01791         if( !fast_exact_is_equal( grad_decrease_ct, 0. ) )
01792             lr = grad_learning_rate / (1. + grad_decrease_ct * stage );
01793         else
01794             lr = grad_learning_rate;
01795 
01796         layers[n_layers-1]->setLearningRate( lr );
01797         connections[n_layers-2]->setLearningRate( lr );
01798 
01799         layers[ n_layers-1 ]->bpropUpdate(
01800                 layers[ n_layers-1 ]->activations,
01801                 layers[ n_layers-1 ]->getExpectations(),
01802                 activations_gradients[ n_layers-1 ],
01803                 expectations_gradients[ n_layers-1 ],
01804                 false);
01805 
01806         connections[ n_layers-2 ]->bpropUpdate(
01807                 layers[ n_layers-2 ]->getExpectations(),
01808                 layers[ n_layers-1 ]->activations,
01809                 expectations_gradients[ n_layers-2 ],
01810                 activations_gradients[ n_layers-1 ],
01811                 true);
01812         // accumulate into expectations_gradients[n_layers-2]
01813         // because a partial cost may have already put a gradient there
01814     }
01815 
01816     if( use_classification_cost )
01817     {
01818         PLERROR("In DeepBeliefNet::onlineStep - 'use_classification_cost' not "
01819                 "implemented for mini-batches");
01820 
01821         /*
01822         classification_module->fprop( layers[ n_layers-2 ]->expectation,
01823                                       class_output );
01824         real nll_cost;
01825 
01826         // This doesn't work. gcc bug?
01827         // classification_cost->fprop( class_output, target, cost );
01828         classification_cost->CostModule::fprop( class_output, target,
01829                                                 nll_cost );
01830 
01831         real class_error =
01832             ( argmax(class_output) == (int) round(target[0]) ) ? 0: 1;
01833 
01834         train_costs[nll_cost_index] = nll_cost;
01835         train_costs[class_cost_index] = class_error;
01836 
01837         classification_cost->bpropUpdate( class_output, target, nll_cost,
01838                                           class_gradient );
01839 
01840         classification_module->bpropUpdate( layers[ n_layers-2 ]->expectation,
01841                                             class_output,
01842                                             expectation_gradients[n_layers-2],
01843                                             class_gradient,
01844                                             true );
01845         if( top_layer_joint_cd )
01846         {
01847             // set the input of the joint layer
01848             Vec target_exp = classification_module->target_layer->expectation;
01849             fill_one_hot( target_exp, (int) round(target[0]), real(0.), real(1.) );
01850 
01851             if( !fast_exact_is_equal( cd_decrease_ct, 0. ) )
01852                lr = cd_learning_rate / (1. + cd_decrease_ct * stage );
01853             else
01854                lr = cd_learning_rate;
01855 
01856             joint_layer->setLearningRate( lr );
01857             layers[ n_layers-1 ]->setLearningRate( lr );
01858             classification_module->joint_connection->setLearningRate( lr );
01859 
01860             save_layer_activation.resize(layers[ n_layers-2 ]->size);
01861             save_layer_activation << layers[ n_layers-2 ]->activation;
01862             save_layer_expectation.resize(layers[ n_layers-2 ]->size);
01863             save_layer_expectation << layers[ n_layers-2 ]->expectation;
01864 
01865             contrastiveDivergenceStep(
01866                 get_pointer(joint_layer),
01867                 get_pointer(classification_module->joint_connection),
01868                 layers[ n_layers-1 ], n_layers-2);
01869 
01870             layers[ n_layers-2 ]->activation << save_layer_activation;
01871             layers[ n_layers-2 ]->expectation << save_layer_expectation;
01872         }
01873         */
01874     }
01875 
01876     Mat rc;
01877     if (reconstruct_layerwise)
01878     {
01879         rc = train_costs.column(reconstruction_cost_index);
01880         rc.clear();
01881     }
01882 
01883     // DOWNWARD PHASE (the downward phase for top layer is already done above,
01884     // except for the contrastive divergence step in the case where either
01885     // 'use_classification_cost' or 'top_layer_joint_cd' is false).
01886 
01887     for( int i=n_layers-2 ; i>=0 ; i-- )
01888     {
01889         if (i <= n_layers - 3) {
01890             if( !fast_exact_is_equal( grad_decrease_ct, 0. ) )
01891                 lr = grad_learning_rate / (1. + grad_decrease_ct * stage );
01892             else
01893                 lr = grad_learning_rate;
01894 
01895             connections[ i ]->setLearningRate( lr );
01896             layers[ i+1 ]->setLearningRate( lr );
01897 
01898             layers[i+1]->bpropUpdate( layers[i+1]->activations,
01899                                       layers[i+1]->getExpectations(),
01900                                       activations_gradients[i+1],
01901                                       expectations_gradients[i+1] );
01902 
01903             connections[i]->bpropUpdate( layers[i]->getExpectations(),
01904                                          layers[i+1]->activations,
01905                                          expectations_gradients[i],
01906                                          activations_gradients[i+1],
01907                                          true);
01908 
01909         }
01910 
01911         if (i <= n_layers - 3 || !use_classification_cost ||
01912                 !top_layer_joint_cd)
01913         {
01914 
01915             // N.B. the contrastiveDivergenceStep changes the activation and
01916             // expectation fields of top layer of the RBM, so it must be
01917             // done last
01918             if( !fast_exact_is_equal( cd_decrease_ct, 0. ) )
01919                 lr = cd_learning_rate / (1. + cd_decrease_ct * stage );
01920             else
01921                 lr = cd_learning_rate;
01922             layers[i]->setLearningRate( lr );
01923             layers[i+1]->setLearningRate( lr );
01924             connections[i]->setLearningRate( lr );
01925 
01926             if( i > 0 )
01927             {
01928                 const Mat& source_act = layers[i]->activations;
01929                 save_layer_activations.resize(source_act.length(),
01930                                               source_act.width());
01931                 save_layer_activations << source_act;
01932             }
01933             const Mat& source_exp = layers[i]->getExpectations();
01934             save_layer_expectations.resize(source_exp.length(),
01935                                            source_exp.width());
01936             save_layer_expectations << source_exp;
01937 
01938             if (reconstruct_layerwise)
01939             {
01940                 connections[i]->setAsUpInputs(layers[i+1]->getExpectations());
01941                 layers[i]->getAllActivations(connections[i], 0, true);
01942                 layers[i]->fpropNLL(
01943                         save_layer_expectations,
01944                         train_costs.column(reconstruction_cost_index+i+1));
01945                 rc += train_costs.column(reconstruction_cost_index+i+1);
01946             }
01947 
01948             contrastiveDivergenceStep( layers[ i ],
01949                                        connections[ i ],
01950                                        layers[ i+1 ] ,
01951                                        i, true);
01952             if( i > 0 )
01953             {
01954                 layers[i]->activations << save_layer_activations;
01955             }
01956             layers[i]->getExpectations() << save_layer_expectations;
01957 
01958         }
01959     }
01960 
01961 }
01962 
01964 // greedyStep //
01966 void DeepBeliefNet::greedyStep(const Vec& input, const Vec& target, int index)
01967 {
01968     real lr;
01969     PLASSERT( index < n_layers );
01970 
01971     layers[0]->expectation << input;
01972     for( int i=0 ; i<=index ; i++ )
01973     {
01974         if( greedy_target_layers.length() && greedy_target_layers[i] )
01975         {
01976             connections[i]->setAsDownInput( layers[i]->expectation );
01977             layers[i+1]->getAllActivations( connections[i] );
01978 
01979             if( i != index )
01980             {
01981                 greedy_target_layers[i]->activation.clear();
01982                 greedy_target_layers[i]->activation += greedy_target_layers[i]->bias;
01983                 for( int c=0; c<n_classes; c++ )
01984                 {
01985                     // Compute class free-energy
01986                     layers[i+1]->activation.toMat(layers[i+1]->size,1) += 
01987                         greedy_target_connections[i]->weights.column(c);
01988                     greedy_target_layers[i]->activation[c] -= 
01989                         layers[i+1]->freeEnergyContribution(layers[i+1]->activation);
01990                     
01991                     // Compute class dependent expectation and store it
01992                     layers[i+1]->expectation_is_not_up_to_date();
01993                     layers[i+1]->computeExpectation();
01994                     greedy_target_expectations[i][c] << layers[i+1]->expectation;
01995                     
01996                     // Remove class-dependent energy for next free-energy computations
01997                     layers[i+1]->activation.toMat(layers[i+1]->size,1) -= greedy_target_connections[i]->weights.column(c);
01998                 }
01999                 greedy_target_layers[i]->expectation_is_not_up_to_date();
02000                 greedy_target_layers[i]->computeExpectation();
02001             
02002                 // Computing next layer representation
02003                 layers[i+1]->expectation.clear();
02004                 Vec expectation = layers[i+1]->expectation;
02005                 for( int c=0; c<n_classes; c++ )
02006                 {
02007                     Vec expectation_c = greedy_target_expectations[i][c];
02008                     real p_c = greedy_target_layers[i]->expectation[c];
02009                     multiplyScaledAdd(expectation_c, real(1.), p_c, expectation);
02010                 }
02011             }
02012             else
02013             {
02014                 fill_one_hot( greedy_target_layers[i]->expectation, 
02015                               (int) round(target[0]), real(0.), real(1.) );
02016             }
02017         }
02018         else
02019         {
02020             if( i == index && use_corrupted_posDownVal == "for_cd_fprop" )
02021             {
02022                 corrupted_pos_down_val.resize( layers[i]->size );
02023                 corrupt_input( layers[i]->expectation, corrupted_pos_down_val, index );
02024                 connections[i]->setAsDownInput( corrupted_pos_down_val );
02025             }
02026             else
02027                 connections[i]->setAsDownInput( layers[i]->expectation );
02028             layers[i+1]->getAllActivations( connections[i] );
02029             layers[i+1]->computeExpectation();
02030         }
02031     }
02032 
02033     if( !partial_costs.isEmpty() && partial_costs[ index ] )
02034     {
02035         // put appropriate learning rate
02036         if( !fast_exact_is_equal( grad_decrease_ct, 0. ) )
02037             lr = grad_learning_rate /
02038                 (1. + grad_decrease_ct *
02039                  (stage - cumulative_schedule[index]));
02040         else
02041             lr = grad_learning_rate;
02042 
02043         partial_costs[ index ]->setLearningRate( lr );
02044         connections[ index ]->setLearningRate( lr );
02045         layers[ index+1 ]->setLearningRate( lr );
02046 
02047         // Backward pass
02048         real cost;
02049         partial_costs[ index ]->fprop( layers[ index+1 ]->expectation,
02050                                        target, cost );
02051 
02052         partial_costs[ index ]->bpropUpdate( layers[ index+1 ]->expectation,
02053                                              target, cost,
02054                                              expectation_gradients[ index+1 ]
02055                                              );
02056 
02057         layers[ index+1 ]->bpropUpdate( layers[ index+1 ]->activation,
02058                                         layers[ index+1 ]->expectation,
02059                                         activation_gradients[ index+1 ],
02060                                         expectation_gradients[ index+1 ] );
02061 
02062         connections[ index ]->bpropUpdate( layers[ index ]->expectation,
02063                                            layers[ index+1 ]->activation,
02064                                            expectation_gradients[ index ],
02065                                            activation_gradients[ index+1 ] );
02066 
02067         // put back old learning rate
02068         if( !fast_exact_is_equal( cd_decrease_ct, 0. ) )
02069             lr = cd_learning_rate / (1. + cd_decrease_ct *
02070                                      (stage - cumulative_schedule[index]));
02071         else
02072             lr = cd_learning_rate;
02073 
02074         connections[ index ]->setLearningRate( lr );
02075         layers[ index+1 ]->setLearningRate( lr );
02076     }
02077 
02078     if( greedy_target_layers.length() && greedy_target_layers[index] )
02079     {
02080         contrastiveDivergenceStep( greedy_joint_layers[ index ],
02081                                    greedy_joint_connections[ index ],
02082                                    layers[ index+1 ],
02083                                    index, false);
02084     }
02085     else
02086     {
02087         contrastiveDivergenceStep( layers[ index ],
02088                                    connections[ index ],
02089                                    layers[ index+1 ],
02090                                    index, true);
02091     }
02092 }
02093 
02095 // greedySteps //
02097 void DeepBeliefNet::greedyStep(const Mat& inputs, const Mat& targets,
02098                                int index, Mat& train_costs_m)
02099 {
02100     real lr;
02101     PLASSERT( index < n_layers );
02102 
02103     layers[0]->setExpectations(inputs);
02104 
02105     if( greedy_target_layers.length() && greedy_target_layers[0] )
02106         PLERROR("In DeepBeliefNet::greedyStep(): greedy_target_layers not implemented\n"
02107                 "for minibatch setting");
02108 
02109     for( int i=0 ; i<=index ; i++ )
02110     {
02111         
02112         connections[i]->setAsDownInputs( layers[i]->getExpectations() );
02113         layers[i+1]->getAllActivations( connections[i], 0, true );
02114         layers[i+1]->computeExpectations();
02115     }
02116 
02117     if( !partial_costs.isEmpty() && partial_costs[ index ] )
02118     {
02119         // put appropriate learning rate
02120         if( !fast_exact_is_equal( grad_decrease_ct, 0. ) )
02121             lr = grad_learning_rate /
02122                 (1. + grad_decrease_ct *
02123                  (stage - cumulative_schedule[index]));
02124         else
02125             lr = grad_learning_rate;
02126 
02127         partial_costs[ index ]->setLearningRate( lr );
02128         connections[ index ]->setLearningRate( lr );
02129         layers[ index+1 ]->setLearningRate( lr );
02130 
02131         // Backward pass
02132         Vec costs;
02133         partial_costs[ index ]->fprop( layers[ index+1 ]->getExpectations(),
02134                                        targets, costs );
02135 
02136         partial_costs[ index ]->bpropUpdate(layers[index+1]->getExpectations(),
02137                 targets, costs,
02138                 expectations_gradients[ index+1 ]
02139                 );
02140 
02141         layers[ index+1 ]->bpropUpdate( layers[ index+1 ]->activations,
02142                                         layers[ index+1 ]->getExpectations(),
02143                                         activations_gradients[ index+1 ],
02144                                         expectations_gradients[ index+1 ] );
02145 
02146         connections[ index ]->bpropUpdate( layers[ index ]->getExpectations(),
02147                                            layers[ index+1 ]->activations,
02148                                            expectations_gradients[ index ],
02149                                            activations_gradients[ index+1 ] );
02150 
02151         // put back old learning rate
02152         if( !fast_exact_is_equal( cd_decrease_ct, 0. ) )
02153             lr = cd_learning_rate / (1. + cd_decrease_ct *
02154                                      (stage - cumulative_schedule[index]));
02155         else
02156             lr = cd_learning_rate;
02157         connections[ index ]->setLearningRate( lr );
02158         layers[ index+1 ]->setLearningRate( lr );
02159     }
02160 
02161     if (reconstruct_layerwise)
02162     {
02163         layer_inputs.resize(minibatch_size,layers[index]->size);
02164         layer_inputs << layers[index]->getExpectations(); // we will perturb these, so save them
02165         connections[index]->setAsUpInputs(layers[index+1]->getExpectations());
02166         layers[index]->getAllActivations(connections[index], 0, true);
02167         layers[index]->fpropNLL(layer_inputs, train_costs_m.column(reconstruction_cost_index+index+1));
02168         Mat rc = train_costs_m.column(reconstruction_cost_index);
02169         rc += train_costs_m.column(reconstruction_cost_index+index+1);
02170         layers[index]->setExpectations(layer_inputs); // and restore them here
02171     }
02172 
02173     contrastiveDivergenceStep( layers[ index ],
02174                                connections[ index ],
02175                                layers[ index+1 ],
02176                                index, true);
02177 
02178 }
02179 
02181 // jointGreedyStep //
02183 void DeepBeliefNet::jointGreedyStep( const Vec& input, const Vec& target )
02184 {
02185     real lr;
02186     PLASSERT( joint_layer );
02187     PLASSERT_MSG(batch_size == 1, "Not implemented for mini-batches");
02188 
02189     layers[0]->expectation << input;
02190     for( int i=0 ; i<n_layers-2 ; i++ )
02191     {
02192         connections[i]->setAsDownInput( layers[i]->expectation );
02193         layers[i+1]->getAllActivations( connections[i] );
02194         layers[i+1]->computeExpectation();
02195     }
02196 
02197     if( !partial_costs.isEmpty() && partial_costs[ n_layers-2 ] )
02198     {
02199         // Deterministic forward pass
02200         connections[ n_layers-2 ]->setAsDownInput(
02201             layers[ n_layers-2 ]->expectation );
02202         layers[ n_layers-1 ]->getAllActivations( connections[ n_layers-2 ] );
02203         layers[ n_layers-1 ]->computeExpectation();
02204 
02205         // put appropriate learning rate
02206         if( !fast_exact_is_equal( grad_decrease_ct, 0. ) )
02207             lr = grad_learning_rate
02208                 / (1. + grad_decrease_ct *
02209                    (stage - cumulative_schedule[n_layers-2]));
02210         else
02211             lr = grad_learning_rate;
02212 
02213         partial_costs[ n_layers-2 ]->setLearningRate( lr );
02214         connections[ n_layers-2 ]->setLearningRate( lr );
02215         layers[ n_layers-1 ]->setLearningRate( lr );
02216 
02217 
02218         // Backward pass
02219         real cost;
02220         partial_costs[ n_layers-2 ]->fprop( layers[ n_layers-1 ]->expectation,
02221                                             target, cost );
02222 
02223         partial_costs[ n_layers-2 ]->bpropUpdate(
02224             layers[ n_layers-1 ]->expectation, target, cost,
02225             expectation_gradients[ n_layers-1 ] );
02226 
02227         layers[ n_layers-1 ]->bpropUpdate( layers[ n_layers-1 ]->activation,
02228                                            layers[ n_layers-1 ]->expectation,
02229                                            activation_gradients[ n_layers-1 ],
02230                                            expectation_gradients[ n_layers-1 ]
02231                                          );
02232 
02233         connections[ n_layers-2 ]->bpropUpdate(
02234             layers[ n_layers-2 ]->expectation,
02235             layers[ n_layers-1 ]->activation,
02236             expectation_gradients[ n_layers-2 ],
02237             activation_gradients[ n_layers-1 ] );
02238 
02239         // put back old learning rate
02240         if( !fast_exact_is_equal( cd_decrease_ct, 0. ) )
02241             lr = cd_learning_rate
02242                 / (1. + cd_decrease_ct *
02243                    (stage - cumulative_schedule[n_layers-2]));
02244         else
02245             lr = cd_learning_rate;
02246 
02247         connections[ n_layers-2 ]->setLearningRate( lr );
02248         layers[ n_layers-1 ]->setLearningRate( lr );
02249     }
02250 
02251     Vec target_exp = classification_module->target_layer->expectation;
02252     fill_one_hot( target_exp, (int) round(target[0]), real(0.), real(1.) );
02253 
02254     contrastiveDivergenceStep(
02255         get_pointer( joint_layer ),
02256         get_pointer( classification_module->joint_connection ),
02257         layers[ n_layers-1 ], n_layers-2);
02258 }
02259 
02260 void DeepBeliefNet::jointGreedyStep(const Mat& inputs, const Mat& targets)
02261 {
02262     PLCHECK_MSG(false, "Not implemented for mini-batches");
02263 }
02264 
02265 
02267 // upDownStep //
02269 void DeepBeliefNet::upDownStep( const Vec& input, const Vec& target,
02270                                 Vec& train_costs )
02271 {
02272 
02273     if( greedy_target_layers.length() )
02274         PLERROR("In DeepBeliefNet::onlineStep(): greedy_target_layers not implemented\n"
02275                 "for up-down setting");
02276 
02277     // Up pass
02278     up_sample[0] << input;
02279     for( int i=0 ; i<n_layers-2 ; i++ )
02280     {
02281         connections[i]->setAsDownInput( up_sample[i] );
02282         layers[i+1]->getAllActivations( connections[i] );
02283         layers[i+1]->computeExpectation();
02284         layers[i+1]->generateSample();
02285         up_sample[i+1] << layers[i+1]->sample;
02286     }
02287 
02288     // Top RBM update
02289     if( use_classification_cost )
02290     {
02291         Vec target_exp = classification_module->target_layer->expectation;
02292         fill_one_hot( target_exp, (int) round(target[0]), real(0.), real(1.) );
02293 
02294         contrastiveDivergenceStep(
02295             get_pointer( joint_layer ),
02296             get_pointer( classification_module->joint_connection ),
02297             layers[ n_layers-1 ], n_layers-2,false);
02298     }
02299     else
02300     {
02301         contrastiveDivergenceStep( layers[ n_layers-2 ],
02302                                    connections[ n_layers-2 ],
02303                                    layers[ n_layers-1 ],
02304                                    n_layers-2, false);
02305     }
02306     down_sample[n_layers-2] << layers[n_layers-2]->sample;
02307 
02308     // Down pass
02309     for( int i=n_layers-3 ; i>=0 ; i-- )
02310     {
02311         generative_connections[i]->setAsDownInput( down_sample[i+1] );
02312         layers[i]->getAllActivations( generative_connections[i] );
02313         layers[i]->computeExpectation();
02314         layers[i]->generateSample();
02315         down_sample[i] << layers[i]->sample;
02316     }
02317 
02318     // Updates
02319     real nll = 0.; // Actually unused
02320     for( int i=0 ; i<n_layers-2 ; i++ )
02321     {
02322         // Update recognition weights
02323         connections[i]->setAsDownInput( down_sample[i] );
02324         layers[i+1]->getAllActivations( connections[i] );
02325         layers[i+1]->computeExpectation();
02326         layers[i+1]->bpropNLL(down_sample[i+1], nll, activation_gradients[i+1]);
02327         layers[i+1]->update( activation_gradients[i+1] );
02328         connections[i]->bpropUpdate( down_sample[i],
02329                                   layers[i+1]->activation,
02330                                   activation_gradients[i],
02331                                   activation_gradients[i+1]);
02332 
02333         // Update generative weights
02334         generative_connections[i]->setAsDownInput( up_sample[i+1] );
02335         layers[i]->getAllActivations( generative_connections[i] );
02336         layers[i]->computeExpectation();
02337         layers[i]->bpropNLL(up_sample[i], nll, activation_gradients[i]);
02338         layers[i]->update( activation_gradients[i] );
02339         generative_connections[i]->bpropUpdate( up_sample[i+1],
02340                                              layers[i]->activation,
02341                                              activation_gradients[i+1],
02342                                              activation_gradients[i]);
02343     }
02344 }
02345 
02346 void DeepBeliefNet::upDownStep(const Mat& inputs, const Mat& targets,
02347                                Mat& train_costs)
02348 {
02349     PLCHECK_MSG(false, "Not implemented for mini-batches");
02350 }
02351 
02353 // fineTuningStep //
02355 void DeepBeliefNet::fineTuningStep( const Vec& input, const Vec& target,
02356                                     Vec& train_costs )
02357 {
02358     final_cost_value.resize(0);
02359     // fprop
02360     layers[0]->expectation << input;
02361     for( int i=0 ; i<n_layers-2 ; i++ )
02362     {
02363         if( greedy_target_layers.length() && greedy_target_layers[i] )
02364         {
02365             connections[i]->setAsDownInput( layers[i]->expectation );
02366             layers[i+1]->getAllActivations( connections[i] );
02367             
02368             greedy_target_layers[i]->activation.clear();
02369             greedy_target_layers[i]->activation += greedy_target_layers[i]->bias;
02370             for( int c=0; c<n_classes; c++ )
02371             {
02372                 // Compute class free-energy
02373                 layers[i+1]->activation.toMat(layers[i+1]->size,1) += greedy_target_connections[i]->weights.column(c);
02374                 greedy_target_layers[i]->activation[c] -= layers[i+1]->freeEnergyContribution(layers[i+1]->activation);
02375                 
02376                 // Compute class dependent expectation and store it
02377                 layers[i+1]->expectation_is_not_up_to_date();
02378                 layers[i+1]->computeExpectation();
02379                 greedy_target_expectations[i][c] << layers[i+1]->expectation;
02380                 
02381                 // Remove class-dependent energy for next free-energy computations
02382                 layers[i+1]->activation.toMat(layers[i+1]->size,1) -= greedy_target_connections[i]->weights.column(c);
02383             }
02384             greedy_target_layers[i]->expectation_is_not_up_to_date();
02385             greedy_target_layers[i]->computeExpectation();
02386             
02387             // Computing next layer representation
02388             layers[i+1]->expectation.clear();
02389             Vec expectation = layers[i+1]->expectation;
02390             for( int c=0; c<n_classes; c++ )
02391             {
02392                 Vec expectation_c = greedy_target_expectations[i][c];
02393                 real p_c = greedy_target_layers[i]->expectation[c];
02394                 multiplyScaledAdd(expectation_c, real(1.), p_c, expectation);
02395             }
02396         }
02397         else
02398         {
02399             connections[i]->setAsDownInput( layers[i]->expectation );
02400             layers[i+1]->getAllActivations( connections[i] );
02401             layers[i+1]->computeExpectation();
02402         }
02403     }
02404 
02405     if( final_cost )
02406     {
02407         if( greedy_target_layers.length() && greedy_target_layers[n_layers-2] )
02408         {
02409             connections[n_layers-2]->setAsDownInput( layers[n_layers-2]->expectation );
02410             layers[n_layers-1]->getAllActivations( connections[n_layers-2] );
02411             
02412             greedy_target_layers[n_layers-2]->activation.clear();
02413             greedy_target_layers[n_layers-2]->activation += 
02414                 greedy_target_layers[n_layers-2]->bias;
02415             for( int c=0; c<n_classes; c++ )
02416             {
02417                 // Compute class free-energy
02418                 layers[n_layers-1]->activation.toMat(layers[n_layers-1]->size,1) += 
02419                     greedy_target_connections[n_layers-2]->weights.column(c);
02420                 greedy_target_layers[n_layers-2]->activation[c] -= 
02421                     layers[n_layers-1]->freeEnergyContribution(layers[n_layers-1]->activation);
02422                 
02423                 // Compute class dependent expectation and store it
02424                 layers[n_layers-1]->expectation_is_not_up_to_date();
02425                 layers[n_layers-1]->computeExpectation();
02426                 greedy_target_expectations[n_layers-2][c] << layers[n_layers-1]->expectation;
02427                 
02428                 // Remove class-dependent energy for next free-energy computations
02429                 layers[n_layers-1]->activation.toMat(layers[n_layers-1]->size,1) -= 
02430                     greedy_target_connections[n_layers-2]->weights.column(c);
02431             }
02432             greedy_target_layers[n_layers-2]->expectation_is_not_up_to_date();
02433             greedy_target_layers[n_layers-2]->computeExpectation();
02434             
02435             // Computing next layer representation
02436             layers[n_layers-1]->expectation.clear();
02437             Vec expectation = layers[n_layers-1]->expectation;
02438             for( int c=0; c<n_classes; c++ )
02439             {
02440                 Vec expectation_c = greedy_target_expectations[n_layers-2][c];
02441                 real p_c = greedy_target_layers[n_layers-2]->expectation[c];
02442                 multiplyScaledAdd(expectation_c, real(1.), p_c, expectation);
02443             }
02444         }
02445         else
02446         {
02447             connections[ n_layers-2 ]->setAsDownInput(
02448                 layers[ n_layers-2 ]->expectation );
02449             layers[ n_layers-1 ]->getAllActivations( connections[ n_layers-2 ] );
02450             layers[ n_layers-1 ]->computeExpectation();
02451         }
02452         
02453         if( final_module )
02454         {
02455             final_module->fprop( layers[ n_layers-1 ]->expectation,
02456                                  final_cost_input );
02457             final_cost->fprop( final_cost_input, target, final_cost_value );
02458 
02459             final_cost->bpropUpdate( final_cost_input, target,
02460                                      final_cost_value[0],
02461                                      final_cost_gradient );
02462             final_module->bpropUpdate( layers[ n_layers-1 ]->expectation,
02463                                        final_cost_input,
02464                                        expectation_gradients[ n_layers-1 ],
02465                                        final_cost_gradient );
02466         }
02467         else
02468         {
02469             final_cost->fprop( layers[ n_layers-1 ]->expectation, target,
02470                                final_cost_value );
02471 
02472             final_cost->bpropUpdate( layers[ n_layers-1 ]->expectation,
02473                                      target, final_cost_value[0],
02474                                      expectation_gradients[ n_layers-1 ] );
02475         }
02476 
02477         train_costs.subVec(final_cost_index, final_cost_value.length())
02478             << final_cost_value;
02479 
02480         if( greedy_target_layers.length() && greedy_target_layers[n_layers-2] )
02481         {
02482             activation_gradients[n_layers-1].clear();
02483             for( int c=0; c<n_classes; c++ )
02484             {
02485                 greedy_target_expectation_gradients[n_layers-2][c] << 
02486                     expectation_gradients[ n_layers-1 ];
02487                 greedy_target_expectation_gradients[n_layers-2][c] *= 
02488                     greedy_target_layers[n_layers-2]->expectation[c];
02489                 layers[ n_layers-1 ]->bpropUpdate( 
02490                     greedy_target_activations[n_layers-2][c],
02491                     greedy_target_expectations[n_layers-2][c],
02492                     greedy_target_activation_gradients[n_layers-2][c],
02493                     greedy_target_expectation_gradients[n_layers-2][c] );
02494 
02495                 activation_gradients[n_layers-1] += 
02496                     greedy_target_activation_gradients[n_layers-2][c];
02497 
02498                 // Update target connections, with gradient from p(h_l | h_l-1, y)
02499                 multiplyScaledAdd( greedy_target_activation_gradients[n_layers-2][c].toMat(layers[n_layers-1]->size,1),
02500                                    real(1.), -greedy_target_connections[n_layers-2]->learning_rate,
02501                                    greedy_target_connections[n_layers-2]->weights.column(c));
02502                 
02503                 greedy_target_probability_gradients[n_layers-2][c] = 
02504                     dot( expectation_gradients[ n_layers-1 ], 
02505                          greedy_target_expectations[ n_layers-2 ][c] );
02506             }
02507 
02508             // Update bias
02509             greedy_target_layers[n_layers-2]->bpropUpdate(
02510                 greedy_target_layers[n_layers-2]->expectation, // Isn't used
02511                 greedy_target_layers[n_layers-2]->expectation,
02512                 greedy_target_probability_gradients[n_layers-2], 
02513                 greedy_target_probability_gradients[n_layers-2] );
02514 
02515             for( int c=0; c<n_classes; c++ )
02516             {
02517                 layers[n_layers-1]->freeEnergyContributionGradient(
02518                     greedy_target_activations[n_layers-2][c],
02519                     greedy_target_activation_gradients[n_layers-2][c], // Overwrite previous activation gradient
02520                     -greedy_target_probability_gradients[n_layers-2][c] );
02521 
02522                 activation_gradients[n_layers-1] += 
02523                     greedy_target_activation_gradients[n_layers-2][c];
02524 
02525                 // Update target connections, with gradient from p(y | h_l-1 )
02526                 multiplyScaledAdd( greedy_target_activation_gradients[n_layers-2][c].toMat(layers[n_layers-1]->size,1),
02527                                    real(1.), -greedy_target_connections[n_layers-2]->learning_rate,
02528                                    greedy_target_connections[n_layers-2]->weights.column(c));
02529             }
02530 
02531             connections[ n_layers-2 ]->bpropUpdate(
02532                 layers[ n_layers-2 ]->expectation,
02533                 layers[ n_layers-1 ]->activation, //Not really, but this isn't used for matrix connections
02534                 expectation_gradients[ n_layers-2 ],
02535                 activation_gradients[ n_layers-1 ] );
02536             
02537         }
02538         else
02539         {
02540             layers[ n_layers-1 ]->bpropUpdate( layers[ n_layers-1 ]->activation,
02541                                                layers[ n_layers-1 ]->expectation,
02542                                                activation_gradients[ n_layers-1 ],
02543                                                expectation_gradients[ n_layers-1 ]
02544                 );
02545             
02546             connections[ n_layers-2 ]->bpropUpdate(
02547                 layers[ n_layers-2 ]->expectation,
02548                 layers[ n_layers-1 ]->activation,
02549                 expectation_gradients[ n_layers-2 ],
02550                 activation_gradients[ n_layers-1 ] );
02551         }
02552     }
02553     else  {
02554         expectation_gradients[ n_layers-2 ].clear();
02555     }
02556 
02557     if( use_classification_cost )
02558     {
02559         classification_module->fprop( layers[ n_layers-2 ]->expectation,
02560                                       class_output );
02561         real nll_cost;
02562 
02563         // This doesn't work. gcc bug?
02564         // classification_cost->fprop( class_output, target, cost );
02565         classification_cost->CostModule::fprop( class_output, target,
02566                                                 nll_cost );
02567 
02568         real class_error =
02569             ( argmax(class_output) == (int) round(target[0]) ) ? 0
02570                                                                : 1;
02571 
02572         train_costs[nll_cost_index] = nll_cost;
02573         train_costs[class_cost_index] = class_error;
02574 
02575         classification_cost->bpropUpdate( class_output, target, nll_cost,
02576                                           class_gradient );
02577 
02578         classification_module->bpropUpdate( layers[ n_layers-2 ]->expectation,
02579                                             class_output,
02580                                             expectation_gradients[n_layers-2],
02581                                             class_gradient,
02582                                             true );
02583     }
02584 
02585     for( int i=n_layers-2 ; i>0 ; i-- )
02586     {
02587         if( greedy_target_layers.length() && greedy_target_layers[i] )
02588         {
02589             activation_gradients[i-1].clear();
02590             for( int c=0; c<n_classes; c++ )
02591             {
02592                 greedy_target_expectation_gradients[i-1][c] << 
02593                     expectation_gradients[ i ];
02594                 greedy_target_expectation_gradients[i-1][c] *= 
02595                     greedy_target_layers[i-1]->expectation[c];
02596                 layers[ i ]->bpropUpdate( 
02597                     greedy_target_activations[i-1][c],
02598                     greedy_target_expectations[i-1][c],
02599                     greedy_target_activation_gradients[i-1][c],
02600                     greedy_target_expectation_gradients[i-1][c] );
02601 
02602                 activation_gradients[i ] += 
02603                     greedy_target_activation_gradients[i-1][c];
02604 
02605                 // Update target connections, with gradient from p(h_l | h_l-1, y)
02606                 multiplyScaledAdd( greedy_target_activation_gradients[i-1][c].toMat(layers[i]->size,1),
02607                                    real(1.), -greedy_target_connections[i-1]->learning_rate,
02608                                    greedy_target_connections[i-1]->weights.column(c));
02609                 
02610                 greedy_target_probability_gradients[i-1][c] = 
02611                     dot( expectation_gradients[ i ], 
02612                          greedy_target_expectations[ i-1 ][c] );
02613             }
02614 
02615             // Update bias
02616             greedy_target_layers[i-1]->bpropUpdate(
02617                 greedy_target_layers[i-1]->expectation, // Isn't used
02618                 greedy_target_layers[i-1]->expectation,
02619                 greedy_target_probability_gradients[i-1], 
02620                 greedy_target_probability_gradients[i-1] );
02621 
02622             for( int c=0; c<n_classes; c++ )
02623             {
02624                 layers[i]->freeEnergyContributionGradient(
02625                     greedy_target_activations[i-1][c],
02626                     greedy_target_activation_gradients[i-1][c], // Overwrite previous activation gradient
02627                     -greedy_target_probability_gradients[i-1][c] );
02628 
02629                 activation_gradients[i] += 
02630                     greedy_target_activation_gradients[i-1][c];
02631 
02632                 // Update target connections, with gradient from p(y | h_l-1 )
02633                 multiplyScaledAdd( greedy_target_activation_gradients[i-1][c].toMat(layers[i]->size,1),
02634                                    real(1.), -greedy_target_connections[i-1]->learning_rate,
02635                                    greedy_target_connections[i-1]->weights.column(c));
02636             }
02637 
02638             connections[ i-1 ]->bpropUpdate(
02639                 layers[ i-1 ]->expectation,
02640                 layers[ i ]->activation, //Not really, but this isn't used for matrix connections
02641                 expectation_gradients[ i-1 ],
02642                 activation_gradients[ i ] );
02643         }
02644         else
02645         {
02646             layers[i]->bpropUpdate( layers[i]->activation,
02647                                     layers[i]->expectation,
02648                                     activation_gradients[i],
02649                                     expectation_gradients[i] );
02650             
02651             connections[i-1]->bpropUpdate( layers[i-1]->expectation,
02652                                            layers[i]->activation,
02653                                            expectation_gradients[i-1],
02654                                            activation_gradients[i] );
02655         }
02656     }
02657 }
02658 
02659 void DeepBeliefNet::fineTuningStep(const Mat& inputs, const Mat& targets,
02660                                    Mat& train_costs)
02661 {
02662     if( greedy_target_layers.length() )
02663         PLERROR("In DeepBeliefNet::fineTuningStep(): greedy_target_layers not implemented\n"
02664                 "for minibatch setting");
02665 
02666     final_cost_values.resize(0, 0);
02667     // fprop
02668     layers[0]->getExpectations() << inputs;
02669     for( int i=0 ; i<n_layers-2 ; i++ )
02670     {
02671         connections[i]->setAsDownInputs( layers[i]->getExpectations() );
02672         layers[i+1]->getAllActivations( connections[i], 0, true );
02673         layers[i+1]->computeExpectations();
02674     }
02675 
02676     if( final_cost )
02677     {
02678         connections[ n_layers-2 ]->setAsDownInputs(
02679             layers[ n_layers-2 ]->getExpectations() );
02680         // TODO Also ensure getAllActivations fills everything.
02681         layers[ n_layers-1 ]->getAllActivations(connections[n_layers-2],
02682                                                 0, true);
02683         layers[ n_layers-1 ]->computeExpectations();
02684 
02685         if( final_module )
02686         {
02687             final_cost_inputs.resize(minibatch_size,
02688                                      final_module->output_size);
02689             final_module->fprop( layers[ n_layers-1 ]->getExpectations(),
02690                                  final_cost_inputs );
02691             final_cost->fprop( final_cost_inputs, targets, final_cost_values );
02692 
02693             // TODO This extra memory copy is annoying: how can we avoid it?
02694             optimized_costs << final_cost_values.column(0);
02695             final_cost->bpropUpdate( final_cost_inputs, targets,
02696                                      optimized_costs,
02697                                      final_cost_gradients );
02698             final_module->bpropUpdate( layers[ n_layers-1 ]->getExpectations(),
02699                                        final_cost_inputs,
02700                                        expectations_gradients[ n_layers-1 ],
02701                                        final_cost_gradients );
02702         }
02703         else
02704         {
02705             final_cost->fprop( layers[ n_layers-1 ]->getExpectations(), targets,
02706                                final_cost_values );
02707 
02708             optimized_costs << final_cost_values.column(0);
02709             final_cost->bpropUpdate( layers[ n_layers-1 ]->getExpectations(),
02710                                      targets, optimized_costs,
02711                                      expectations_gradients[ n_layers-1 ] );
02712         }
02713 
02714         train_costs.subMatColumns(final_cost_index, final_cost_values.width())
02715             << final_cost_values;
02716 
02717         layers[ n_layers-1 ]->bpropUpdate( layers[ n_layers-1 ]->activations,
02718                                            layers[ n_layers-1 ]->getExpectations(),
02719                                            activations_gradients[ n_layers-1 ],
02720                                            expectations_gradients[ n_layers-1 ]
02721                                          );
02722 
02723         connections[ n_layers-2 ]->bpropUpdate(
02724             layers[ n_layers-2 ]->getExpectations(),
02725             layers[ n_layers-1 ]->activations,
02726             expectations_gradients[ n_layers-2 ],
02727             activations_gradients[ n_layers-1 ] );
02728     }
02729     else  {
02730         expectations_gradients[ n_layers-2 ].clear();
02731     }
02732 
02733     if( use_classification_cost )
02734     {
02735         PLERROR("DeepBeliefNet::fineTuningStep - Not implemented for "
02736                 "mini-batches");
02737         /*
02738         classification_module->fprop( layers[ n_layers-2 ]->expectation,
02739                                       class_output );
02740         real nll_cost;
02741 
02742         // This doesn't work. gcc bug?
02743         // classification_cost->fprop( class_output, target, cost );
02744         classification_cost->CostModule::fprop( class_output, target,
02745                                                 nll_cost );
02746 
02747         real class_error =
02748             ( argmax(class_output) == (int) round(target[0]) ) ? 0
02749                                                                : 1;
02750 
02751         train_costs[nll_cost_index] = nll_cost;
02752         train_costs[class_cost_index] = class_error;
02753 
02754         classification_cost->bpropUpdate( class_output, target, nll_cost,
02755                                           class_gradient );
02756 
02757         classification_module->bpropUpdate( layers[ n_layers-2 ]->expectation,
02758                                             class_output,
02759                                             expectation_gradients[n_layers-2],
02760                                             class_gradient,
02761                                             true );
02762         */
02763     }
02764 
02765     for( int i=n_layers-2 ; i>0 ; i-- )
02766     {
02767         layers[i]->bpropUpdate( layers[i]->activations,
02768                                 layers[i]->getExpectations(),
02769                                 activations_gradients[i],
02770                                 expectations_gradients[i] );
02771 
02772         connections[i-1]->bpropUpdate( layers[i-1]->getExpectations(),
02773                                        layers[i]->activations,
02774                                        expectations_gradients[i-1],
02775                                        activations_gradients[i] );
02776     }
02777 
02778     // do it AFTER the bprop to avoid interfering with activations used in bprop
02779     // (and do not worry that the weights have changed a bit). This is incoherent
02780     // with the current implementation in the greedy stage.
02781     if ( reconstruct_layerwise )
02782     {
02783         Mat rc = train_costs.column(reconstruction_cost_index);
02784         rc.clear();
02785         for( int index=0 ; index<n_layers-1 ; index++ )
02786         {
02787             layer_inputs.resize(minibatch_size,layers[index]->size);
02788             layer_inputs << layers[index]->getExpectations();
02789             connections[index]->setAsUpInputs(layers[index+1]->getExpectations());
02790             layers[index]->getAllActivations(connections[index], 0, true);
02791             layers[index]->fpropNLL(layer_inputs, train_costs.column(reconstruction_cost_index+index+1));
02792             rc += train_costs.column(reconstruction_cost_index+index+1);
02793         }
02794     }
02795 
02796 
02797 }
02798 
02800 // contrastiveDivergenceStep //
02802 void DeepBeliefNet::contrastiveDivergenceStep(
02803     const PP<RBMLayer>& down_layer,
02804     const PP<RBMConnection>& connection,
02805     const PP<RBMLayer>& up_layer,
02806     int layer_index, bool nofprop)
02807 {
02808     bool mbatch = minibatch_size > 1 || minibatch_hack;
02809 
02810     // positive phase
02811     if (!nofprop)
02812     {
02813         if (mbatch) {
02814             connection->setAsDownInputs( down_layer->getExpectations() );
02815             up_layer->getAllActivations( connection, 0, true );
02816             up_layer->computeExpectations();
02817         } else {
02818             if( use_corrupted_posDownVal == "for_cd_fprop" )
02819             {
02820                 corrupted_pos_down_val.resize( down_layer->size );
02821                 corrupt_input( down_layer->expectation, corrupted_pos_down_val, layer_index );
02822                 connection->setAsDownInput( corrupted_pos_down_val );
02823             }
02824             else
02825                 connection->setAsDownInput( down_layer->expectation );
02826             up_layer->getAllActivations( connection );
02827             up_layer->computeExpectation();
02828         }
02829     }
02830 
02831     if (mbatch)
02832     {
02833         // accumulate positive stats using the expectation
02834         // we deep-copy because the value will change during negative phase
02835         pos_down_vals.resize(minibatch_size, down_layer->size);
02836         pos_up_vals.resize(minibatch_size, up_layer->size);
02837 
02838         pos_down_vals << down_layer->getExpectations();
02839         pos_up_vals << up_layer->getExpectations();
02840         up_layer->generateSamples();
02841 
02842         // down propagation, starting from a sample of up_layer
02843         if (background_gibbs_update_ratio<1)
02844             // then do some contrastive divergence, o/w only background Gibbs
02845         {
02846             Mat neg_down_vals;
02847             Mat neg_up_vals;
02848             if( mean_field_contrastive_divergence_ratio > 0 )
02849             {
02850                 mf_cd_neg_down_vals.resize(minibatch_size, down_layer->size);
02851                 mf_cd_neg_up_vals.resize(minibatch_size, up_layer->size);
02852 
02853                 connection->setAsUpInputs( up_layer->getExpectations() );
02854                 down_layer->getAllActivations( connection, 0, true );
02855                 down_layer->computeExpectations();
02856                 // negative phase
02857                 connection->setAsDownInputs( down_layer->getExpectations() );
02858                 up_layer->getAllActivations( connection, 0, mbatch );
02859                 up_layer->computeExpectations();
02860 
02861                 mf_cd_neg_down_vals << down_layer->getExpectations();
02862                 mf_cd_neg_up_vals << up_layer->getExpectations();
02863             }
02864             
02865             if( mean_field_contrastive_divergence_ratio <  1 )
02866             {
02867                 if( use_sample_for_up_layer )
02868                     pos_up_vals << up_layer->samples;
02869                 connection->setAsUpInputs( up_layer->samples );
02870                 down_layer->getAllActivations( connection, 0, true );
02871                 down_layer->computeExpectations();
02872                 down_layer->generateSamples();
02873                 // negative phase
02874                 connection->setAsDownInputs( down_layer->samples );
02875                 up_layer->getAllActivations( connection, 0, mbatch );
02876                 up_layer->computeExpectations();
02877 
02878                 neg_down_vals = down_layer->samples;
02879                 if( use_sample_for_up_layer)
02880                 {
02881                     up_layer->generateSamples();
02882                     neg_up_vals = up_layer->samples;
02883                 }
02884                 else
02885                     neg_up_vals = up_layer->getExpectations();
02886             }
02887 
02888             if (background_gibbs_update_ratio==0)
02889             // update here only if there is ONLY contrastive divergence
02890             {
02891                 if( mean_field_contrastive_divergence_ratio < 1 )
02892                 {
02893                     real lr_dl = down_layer->learning_rate;
02894                     real lr_ul = up_layer->learning_rate;
02895                     real lr_c = connection->learning_rate;
02896 
02897                     down_layer->setLearningRate(lr_dl * (1-mean_field_contrastive_divergence_ratio));
02898                     up_layer->setLearningRate(lr_ul * (1-mean_field_contrastive_divergence_ratio));
02899                     connection->setLearningRate(lr_c * (1-mean_field_contrastive_divergence_ratio));
02900 
02901                     down_layer->update( pos_down_vals, neg_down_vals );
02902                     connection->update( pos_down_vals, pos_up_vals,
02903                                         neg_down_vals, neg_up_vals );
02904                     up_layer->update( pos_up_vals, neg_up_vals );
02905 
02906                     down_layer->setLearningRate(lr_dl);
02907                     up_layer->setLearningRate(lr_ul);
02908                     connection->setLearningRate(lr_c);
02909                 }
02910 
02911                 if( mean_field_contrastive_divergence_ratio > 0 )
02912                 {
02913                     real lr_dl = down_layer->learning_rate;
02914                     real lr_ul = up_layer->learning_rate;
02915                     real lr_c = connection->learning_rate;
02916 
02917                     down_layer->setLearningRate(lr_dl * mean_field_contrastive_divergence_ratio);
02918                     up_layer->setLearningRate(lr_ul * mean_field_contrastive_divergence_ratio);
02919                     connection->setLearningRate(lr_c * mean_field_contrastive_divergence_ratio);
02920 
02921                     down_layer->update( pos_down_vals, mf_cd_neg_down_vals );
02922                     connection->update( pos_down_vals, pos_up_vals,
02923                                         mf_cd_neg_down_vals, mf_cd_neg_up_vals );
02924                     up_layer->update( pos_up_vals, mf_cd_neg_up_vals );
02925 
02926                     down_layer->setLearningRate(lr_dl);
02927                     up_layer->setLearningRate(lr_ul);
02928                     connection->setLearningRate(lr_c);
02929                 }
02930             }
02931             else
02932             {
02933                 connection->accumulatePosStats(pos_down_vals,pos_up_vals);
02934                 cd_neg_down_vals.resize(minibatch_size, down_layer->size);
02935                 cd_neg_up_vals.resize(minibatch_size, up_layer->size);
02936                 cd_neg_down_vals << neg_down_vals;
02937                 cd_neg_up_vals << neg_up_vals;
02938             }
02939         }
02940         //
02941         if (background_gibbs_update_ratio>0)
02942         {
02943             Mat down_state = gibbs_down_state[layer_index];
02944 
02945             if (initialize_gibbs_chain) // initializing or re-initializing the chain
02946             {
02947                 if (background_gibbs_update_ratio==1) // if <1 just use the CD state
02948                 {
02949                     up_layer->generateSamples();
02950                     connection->setAsUpInputs(up_layer->samples);
02951                     down_layer->getAllActivations(connection, 0, true);
02952                     down_layer->generateSamples();
02953                     down_state << down_layer->samples;
02954                 }
02955                 initialize_gibbs_chain=false;
02956             }
02957             // sample up state given down state
02958             connection->setAsDownInputs(down_state);
02959             up_layer->getAllActivations(connection, 0, true);
02960             up_layer->generateSamples();
02961 
02962             // sample down state given up state, to prepare for next time
02963             connection->setAsUpInputs(up_layer->samples);
02964             down_layer->getAllActivations(connection, 0, true);
02965             down_layer->generateSamples();
02966 
02967             // update using the down_state and up_layer->expectations for moving average in negative phase
02968             // (and optionally
02969             if (background_gibbs_update_ratio<1)
02970             {
02971                 down_layer->updateCDandGibbs(pos_down_vals,cd_neg_down_vals,
02972                                              down_state,
02973                                              background_gibbs_update_ratio);
02974                 connection->updateCDandGibbs(pos_down_vals,pos_up_vals,
02975                                              cd_neg_down_vals, cd_neg_up_vals,
02976                                              down_state,
02977                                              up_layer->getExpectations(),
02978                                              background_gibbs_update_ratio);
02979                 up_layer->updateCDandGibbs(pos_up_vals,cd_neg_up_vals,
02980                                            up_layer->getExpectations(),
02981                                            background_gibbs_update_ratio);
02982             }
02983             else
02984             {
02985                 down_layer->updateGibbs(pos_down_vals,down_state);
02986                 connection->updateGibbs(pos_down_vals,pos_up_vals,down_state,
02987                                         up_layer->getExpectations());
02988                 up_layer->updateGibbs(pos_up_vals,up_layer->getExpectations());
02989             }
02990 
02991             // Save Gibbs chain's state.
02992             down_state << down_layer->samples;
02993         }
02994     } else {
02995         // accumulate positive stats using the expectation
02996         // we deep-copy because the value will change during negative phase
02997         pos_down_val.resize( down_layer->size );
02998         pos_up_val.resize( up_layer->size );
02999 
03000         Vec neg_down_val;
03001         Vec neg_up_val;
03002 
03003         pos_down_val << down_layer->expectation;
03004 
03005         pos_up_val << up_layer->expectation;
03006         up_layer->generateSample();
03007             
03008         // negative phase
03009         // down propagation, starting from a sample of up_layer
03010         if( mean_field_contrastive_divergence_ratio > 0 )
03011         {
03012             connection->setAsUpInput( up_layer->expectation );
03013             down_layer->getAllActivations( connection );
03014             down_layer->computeExpectation();
03015             connection->setAsDownInput( down_layer->expectation );
03016             up_layer->getAllActivations( connection, 0, mbatch );
03017             up_layer->computeExpectation();
03018             mf_cd_neg_down_val.resize( down_layer->size );
03019             mf_cd_neg_up_val.resize( up_layer->size );
03020             mf_cd_neg_down_val << down_layer->expectation;
03021             mf_cd_neg_up_val << up_layer->expectation;
03022         }
03023 
03024         if( mean_field_contrastive_divergence_ratio < 1 )
03025         {
03026             if( use_sample_for_up_layer )
03027                 pos_up_val << up_layer->sample;
03028             connection->setAsUpInput( up_layer->sample );
03029             down_layer->getAllActivations( connection );
03030             down_layer->computeExpectation();
03031             down_layer->generateSample();
03032             connection->setAsDownInput( down_layer->sample );
03033             up_layer->getAllActivations( connection, 0, mbatch );
03034             up_layer->computeExpectation();
03035 
03036             neg_down_val = down_layer->sample;
03037             if( use_sample_for_up_layer )
03038             {
03039                 up_layer->generateSample();
03040                 neg_up_val = up_layer->sample;
03041             }
03042             else
03043                 neg_up_val = up_layer->expectation;
03044         }
03045 
03046         // update
03047         if( mean_field_contrastive_divergence_ratio < 1 )
03048         {
03049             real lr_dl = down_layer->learning_rate;
03050             real lr_ul = up_layer->learning_rate;
03051             real lr_c = connection->learning_rate;
03052             
03053             down_layer->setLearningRate(lr_dl * (1-mean_field_contrastive_divergence_ratio));
03054             up_layer->setLearningRate(lr_ul * (1-mean_field_contrastive_divergence_ratio));
03055             connection->setLearningRate(lr_c * (1-mean_field_contrastive_divergence_ratio));
03056            
03057             if( use_corrupted_posDownVal == "for_cd_update" )
03058             {
03059                 corrupted_pos_down_val.resize( down_layer->size );
03060                 corrupt_input( pos_down_val, corrupted_pos_down_val, layer_index );
03061                 down_layer->update( corrupted_pos_down_val, neg_down_val );
03062                 connection->update( corrupted_pos_down_val, pos_up_val,
03063                                 neg_down_val, neg_up_val );
03064             }
03065             else
03066             {
03067                 down_layer->update( pos_down_val, neg_down_val );
03068                 connection->update( pos_down_val, pos_up_val,
03069                                 neg_down_val, neg_up_val );
03070             }
03071             up_layer->update( pos_up_val, neg_up_val );
03072             
03073             down_layer->setLearningRate(lr_dl);
03074             up_layer->setLearningRate(lr_ul);
03075             connection->setLearningRate(lr_c);
03076         }
03077 
03078         if( mean_field_contrastive_divergence_ratio > 0 )
03079         {
03080             real lr_dl = down_layer->learning_rate;
03081             real lr_ul = up_layer->learning_rate;
03082             real lr_c = connection->learning_rate;
03083             
03084             down_layer->setLearningRate(lr_dl * mean_field_contrastive_divergence_ratio);
03085             up_layer->setLearningRate(lr_ul * mean_field_contrastive_divergence_ratio);
03086             connection->setLearningRate(lr_c * mean_field_contrastive_divergence_ratio);
03087             
03088             if( use_corrupted_posDownVal == "for_cd_update" )
03089             {
03090                 corrupted_pos_down_val.resize( down_layer->size );
03091                 corrupt_input( pos_down_val, corrupted_pos_down_val, layer_index );
03092                 down_layer->update( corrupted_pos_down_val, mf_cd_neg_down_val );
03093                 connection->update( corrupted_pos_down_val, pos_up_val,
03094                                 mf_cd_neg_down_val, mf_cd_neg_up_val );
03095             }
03096             else
03097             {
03098                 down_layer->update( pos_down_val, mf_cd_neg_down_val );
03099                 connection->update( pos_down_val, pos_up_val,
03100                                 mf_cd_neg_down_val, mf_cd_neg_up_val );
03101             }
03102             up_layer->update( pos_up_val, mf_cd_neg_up_val );
03103             
03104             down_layer->setLearningRate(lr_dl);
03105             up_layer->setLearningRate(lr_ul);
03106             connection->setLearningRate(lr_c);
03107         }
03108     }
03109 }
03110 
03111 
03113 // computeOutput //
03115 void DeepBeliefNet::computeOutput(const Vec& input, Vec& output) const
03116 {
03117 
03118     // Compute the output from the input.
03119     output.resize(0);
03120 
03121     // fprop
03122     layers[0]->expectation << input;
03123 
03124     if(reconstruct_layerwise)
03125         reconstruction_costs[0]=0;
03126 
03127     for( int i=0 ; i<n_layers-2 ; i++ )
03128     {
03129         if( greedy_target_layers.length() && greedy_target_layers[i] )
03130         {
03131             connections[i]->setAsDownInput( layers[i]->expectation );
03132             layers[i+1]->getAllActivations( connections[i] );
03133             
03134             greedy_target_layers[i]->activation.clear();
03135             greedy_target_layers[i]->activation += greedy_target_layers[i]->bias;
03136             for( int c=0; c<n_classes; c++ )
03137             {
03138                 // Compute class free-energy
03139                 layers[i+1]->activation.toMat(layers[i+1]->size,1) += greedy_target_connections[i]->weights.column(c);
03140                 greedy_target_layers[i]->activation[c] -= layers[i+1]->freeEnergyContribution(layers[i+1]->activation);
03141                 
03142                 // Compute class dependent expectation and store it
03143                 layers[i+1]->expectation_is_not_up_to_date();
03144                 layers[i+1]->computeExpectation();
03145                 greedy_target_expectations[i][c] << layers[i+1]->expectation;
03146                 
03147                 // Remove class-dependent energy for next free-energy computations
03148                 layers[i+1]->activation.toMat(layers[i+1]->size,1) -= greedy_target_connections[i]->weights.column(c);
03149             }
03150             greedy_target_layers[i]->expectation_is_not_up_to_date();
03151             greedy_target_layers[i]->computeExpectation();
03152             
03153             // Computing next layer representation
03154             layers[i+1]->expectation.clear();
03155             Vec expectation = layers[i+1]->expectation;
03156             for( int c=0; c<n_classes; c++ )
03157             {
03158                 Vec expectation_c = greedy_target_expectations[i][c];
03159                 real p_c = greedy_target_layers[i]->expectation[c];
03160                 multiplyScaledAdd(expectation_c, real(1.), p_c, expectation);
03161             }
03162         }
03163         else
03164         {
03165             connections[i]->setAsDownInput( layers[i]->expectation );
03166             layers[i+1]->getAllActivations( connections[i] );
03167             layers[i+1]->computeExpectation();
03168         }
03169         if( i_output_layer==i && (!use_classification_cost && !final_module))
03170         {
03171             output.resize(outputsize());
03172             output << layers[ i ]->expectation;
03173         }
03174 
03175         if (reconstruct_layerwise)
03176         {
03177             layer_input.resize(layers[i]->size);
03178             layer_input << layers[i]->expectation;
03179             connections[i]->setAsUpInput(layers[i+1]->expectation);
03180             layers[i]->getAllActivations(connections[i]);
03181             real rc = reconstruction_costs[i+1] = layers[i]->fpropNLL( layer_input );
03182             reconstruction_costs[0] += rc;
03183         }
03184     }
03185     if( i_output_layer>=n_layers-2 && (!use_classification_cost && !final_module))
03186     {
03188         if(i_output_layer==n_layers-1)
03189         {
03190             connections[ n_layers-2 ]->setAsDownInput(layers[ n_layers-2 ]->expectation );
03191             layers[ n_layers-1 ]->getAllActivations( connections[ n_layers-2 ] );
03192             layers[ n_layers-1 ]->computeExpectation();
03193         }
03194         output.resize(outputsize());
03195         output << layers[ i_output_layer ]->expectation;
03196     }
03197 
03198     if( use_classification_cost )
03199         classification_module->fprop( layers[ n_layers-2 ]->expectation,
03200                                       output );
03201 
03202     if( final_cost || (!partial_costs.isEmpty() && partial_costs[n_layers-2] ))
03203     {
03204         if( greedy_target_layers.length() && greedy_target_layers[n_layers-2] )
03205         {
03206             connections[n_layers-2]->setAsDownInput( layers[n_layers-2]->expectation );
03207             layers[n_layers-1]->getAllActivations( connections[n_layers-2] );
03208             
03209             greedy_target_layers[n_layers-2]->activation.clear();
03210             greedy_target_layers[n_layers-2]->activation += 
03211                 greedy_target_layers[n_layers-2]->bias;
03212             for( int c=0; c<n_classes; c++ )
03213             {
03214                 // Compute class free-energy
03215                 layers[n_layers-1]->activation.toMat(layers[n_layers-1]->size,1) += 
03216                     greedy_target_connections[n_layers-2]->weights.column(c);
03217                 greedy_target_layers[n_layers-2]->activation[c] -= 
03218                     layers[n_layers-1]->freeEnergyContribution(layers[n_layers-1]->activation);
03219                 
03220                 // Compute class dependent expectation and store it
03221                 layers[n_layers-1]->expectation_is_not_up_to_date();
03222                 layers[n_layers-1]->computeExpectation();
03223                 greedy_target_expectations[n_layers-2][c] << layers[n_layers-1]->expectation;
03224                 
03225                 // Remove class-dependent energy for next free-energy computations
03226                 layers[n_layers-1]->activation.toMat(layers[n_layers-1]->size,1) -= 
03227                     greedy_target_connections[n_layers-2]->weights.column(c);
03228             }
03229             greedy_target_layers[n_layers-2]->expectation_is_not_up_to_date();
03230             greedy_target_layers[n_layers-2]->computeExpectation();
03231             
03232             // Computing next layer representation
03233             layers[n_layers-1]->expectation.clear();
03234             Vec expectation = layers[n_layers-1]->expectation;
03235             for( int c=0; c<n_classes; c++ )
03236             {
03237                 Vec expectation_c = greedy_target_expectations[n_layers-2][c];
03238                 real p_c = greedy_target_layers[n_layers-2]->expectation[c];
03239                 multiplyScaledAdd(expectation_c,real(1.), p_c, expectation);
03240             }
03241         }
03242         else
03243         {
03244             connections[ n_layers-2 ]->setAsDownInput(
03245                 layers[ n_layers-2 ]->expectation );
03246             layers[ n_layers-1 ]->getAllActivations( connections[ n_layers-2 ] );
03247             layers[ n_layers-1 ]->computeExpectation();
03248         }
03249 
03250         if( final_module )
03251         {
03252             final_module->fprop( layers[ n_layers-1 ]->expectation,
03253                                  final_cost_input );
03254             output.append( final_cost_input );
03255         }
03256         else
03257         {
03258             output.append( layers[ n_layers-1 ]->expectation );
03259         }
03260 
03261         if (reconstruct_layerwise)
03262         {
03263             layer_input.resize(layers[n_layers-2]->size);
03264             layer_input << layers[n_layers-2]->expectation;
03265             connections[n_layers-2]->setAsUpInput(layers[n_layers-1]->expectation);
03266             layers[n_layers-2]->getAllActivations(connections[n_layers-2]);
03267             real rc = reconstruction_costs[n_layers-1] = layers[n_layers-2]->fpropNLL( layer_input );
03268             reconstruction_costs[0] += rc;
03269         }
03270     }
03271 
03272     if(!use_classification_cost && !final_module)
03273     {
03275         if (reconstruct_layerwise)
03276         {
03277             layer_input.resize(layers[n_layers-2]->size);
03278             layer_input << layers[n_layers-2]->expectation;
03279             connections[n_layers-2]->setAsUpInput(layers[n_layers-1]->expectation);
03280             layers[n_layers-2]->getAllActivations(connections[n_layers-2]);
03281             real rc = reconstruction_costs[n_layers-1] = layers[n_layers-2]->fpropNLL( layer_input );
03282             reconstruction_costs[0] += rc;
03283         }
03284     }
03285 }
03286 
03287 
03288 void DeepBeliefNet::computeCostsFromOutputs(const Vec& input, const Vec& output,
03289                                            const Vec& target, Vec& costs) const
03290 {
03291 
03292     // Compute the costs from *already* computed output.
03293     costs.resize( cost_names.length() );
03294     costs.fill( MISSING_VALUE );
03295 
03296     // TO MAKE FOR CLEANER CODE INDEPENDENT OF ORDER OF CALLING THIS
03297     // METHOD AND computeOutput, THIS SHOULD BE IN A REDEFINITION OF computeOutputAndCosts
03298     if( use_classification_cost )
03299     {
03300         classification_cost->CostModule::fprop( output.subVec(0, n_classes),
03301                 target, costs[nll_cost_index] );
03302 
03303         costs[class_cost_index] =
03304             (argmax(output.subVec(0, n_classes)) == (int) round(target[0]))? 0 : 1;
03305     }
03306 
03307     if( final_cost )
03308     {
03309         int init = use_classification_cost ? n_classes : 0;
03310         final_cost->fprop( output.subVec( init, output.size() - init ),
03311                            target, final_cost_value );
03312 
03313         costs.subVec(final_cost_index, final_cost_value.length())
03314             << final_cost_value;
03315     }
03316 
03317     if( !partial_costs.isEmpty() )
03318     {
03319         Vec pcosts;
03320         for( int i=0 ; i<n_layers-1 ; i++ )
03321             // propagate into local cost associated to output of layer i+1
03322             if( partial_costs[ i ] )
03323             {
03324                 partial_costs[ i ]->fprop( layers[ i+1 ]->expectation,
03325                                            target, pcosts);
03326 
03327                 costs.subVec(partial_costs_indices[i], pcosts.length())
03328                     << pcosts;
03329             }
03330     }
03331 
03332     if( !greedy_target_layers.isEmpty() )
03333     {
03334         target_one_hot.clear();
03335         fill_one_hot( target_one_hot, 
03336                       (int) round(target[0]), real(0.), real(1.) );
03337         for( int i=0 ; i<n_layers-1 ; i++ )
03338             if( greedy_target_layers[i] )
03339                 costs[greedy_target_layer_nlls_index+i] = 
03340                     greedy_target_layers[i]->fpropNLL(target_one_hot);
03341             else
03342                 costs[greedy_target_layer_nlls_index+i] = MISSING_VALUE;
03343     }
03344 
03345     if (reconstruct_layerwise)
03346         costs.subVec(reconstruction_cost_index, reconstruction_costs.length())
03347             << reconstruction_costs;
03348 
03349 }
03350 
03353 void DeepBeliefNet::computeOutputsAndCosts(const Mat& inputs, const Mat& targets,
03354                                       Mat& outputs, Mat& costs) const
03355 {
03356     int nsamples = inputs.length();
03357     PLASSERT( targets.length() == nsamples );
03358     outputs.resize( nsamples, outputsize() );
03359     costs.resize( nsamples, cost_names.length() );
03360     costs.fill( MISSING_VALUE );
03361     for (int isample = 0; isample < nsamples; isample++ )
03362     {
03363         Vec in_i = inputs(isample);
03364         Vec out_i = outputs(isample);
03365         computeOutput(in_i, out_i);
03366         if( !partial_costs.isEmpty() )
03367         {
03368             Vec pcosts;
03369             for( int i=0 ; i<n_layers-1 ; i++ )
03370                 // propagate into local cost associated to output of layer i+1
03371                 if( partial_costs[ i ] )
03372                 {
03373                     partial_costs[ i ]->fprop( layers[ i+1 ]->expectation,
03374                                                targets(isample), pcosts);
03375 
03376                     costs(isample).subVec(partial_costs_indices[i], pcosts.length())
03377                         << pcosts;
03378                 }
03379         }
03380         if (reconstruct_layerwise)
03381            costs(isample).subVec(reconstruction_cost_index, reconstruction_costs.length())
03382                 << reconstruction_costs;
03383     }
03384     computeClassifAndFinalCostsFromOutputs(inputs, outputs, targets, costs);
03385 }
03386 
03387 void DeepBeliefNet::computeClassifAndFinalCostsFromOutputs(const Mat& inputs, const Mat& outputs,
03388                                            const Mat& targets, Mat& costs) const
03389 {
03390     // Compute the costs from *already* computed output.
03391 
03392     int nsamples = inputs.length();
03393     PLASSERT( nsamples > 0 );
03394     PLASSERT( targets.length() == nsamples );
03395     PLASSERT( targets.width() == 1 );
03396     PLASSERT( outputs.length() == nsamples );
03397     PLASSERT( costs.length() == nsamples );
03398 
03399 
03400     if( use_classification_cost )
03401     {
03402         Vec pcosts;
03403         classification_cost->CostModule::fprop( outputs.subMat(0, 0, nsamples, n_classes),
03404                                                 targets, pcosts );
03405         costs.subMat( 0, nll_cost_index, nsamples, 1) << pcosts;
03406 
03407         for (int isample = 0; isample < nsamples; isample++ )
03408             costs(isample,class_cost_index) =
03409                 (argmax(outputs(isample).subVec(0, n_classes)) == (int) round(targets(isample,0))) ? 0 : 1;
03410     }
03411 
03412     if( final_cost )
03413     {
03414         int init = use_classification_cost ? n_classes : 0;
03415         final_cost->fprop( outputs.subMat(0, init, nsamples, outputs(0).size() - init ),
03416                            targets, final_cost_values );
03417 
03418         costs.subMat(0, final_cost_index, nsamples, final_cost_values.width())
03419             << final_cost_values;
03420     }
03421 
03422     if( !partial_costs.isEmpty() )
03423         PLERROR("cannot compute partial costs in DeepBeliefNet::computeCostsFromOutputs(Mat&, Mat&, Mat&, Mat&)"
03424                 "(expectations are not up to date in the batch version)");
03425 }
03426 
03428 //  corrupt_input  //
03430 void DeepBeliefNet::corrupt_input(const Vec& input, Vec& corrupted_input, int layer)
03431 {
03432     corrupted_input.resize(input.length());
03433 
03434     if( noise_type == "masking_noise" )
03435     {
03436         corrupted_input << input;
03437         if( fraction_of_masked_inputs != 0 )
03438         {
03439             random_gen->shuffleElements(expectation_indices[layer]);
03440             if( mask_with_pepper_salt )
03441                 for( int j=0 ; j < round(fraction_of_masked_inputs*input.length()) ; j++)
03442                     corrupted_input[ expectation_indices[layer][j] ] = random_gen->binomial_sample(prob_salt_noise);
03443             else
03444                 for( int j=0 ; j < round(fraction_of_masked_inputs*input.length()) ; j++)
03445                     corrupted_input[ expectation_indices[layer][j] ] = 0;
03446         }
03447     }
03448  /*   else if( noise_type == "binary_sampling" )
03449     {
03450         for( int i=0; i<corrupted_input.length(); i++ )
03451             corrupted_input[i] = random_gen->binomial_sample((input[i]-0.5)*binary_sampling_noise_parameter+0.5);
03452     }
03453     else if( noise_type == "gaussian" )
03454     {
03455         for( int i=0; i<corrupted_input.length(); i++ )
03456             corrupted_input[i] = input[i] +
03457                 random_gen->gaussian_01() * gaussian_std;
03458     }
03459     else
03460             PLERROR("In StackedAutoassociatorsNet::corrupt_input(): "
03461                     "missing_data_method %s not valid with noise_type %s",
03462                      missing_data_method.c_str(), noise_type.c_str());
03463     }*/
03464     else if( noise_type == "none" )
03465         corrupted_input << input;
03466     else
03467         PLERROR("In DeepBeliefNet::corrupt_input(): noise_type %s not valid", noise_type.c_str());
03468 }
03469 
03470 
03471 void DeepBeliefNet::test(VMat testset, PP<VecStatsCollector> test_stats, VMat testoutputs, VMat testcosts) const
03472 {
03473 
03474     //  Re-implementing simply because we want to measure the time it takes to
03475     //  do the testing. The reset is there for two purposes:
03476     //  1. to have fine-grained statistics at each call of test()
03477     //  2. to be able to have a more meaningful cumulative_testing_time
03478     //
03479     //  BIG Nota Bene:
03480     //  Get the statistics by E[testN.E[cumulative_test_time], where N is the
03481     //  index of the last split that you're testing.
03482     //  E[testN-1.E[cumulative_test_time] will basically be the cumulative test
03483     //  time until (and including) the N-1th split! So it's a pretty
03484     //  meaningless number (more or less).
03485 
03486     Profiler::reset("testing");
03487     Profiler::start("testing");
03488 
03489     inherited::test(testset, test_stats, testoutputs, testcosts);
03490 
03491     Profiler::end("testing");
03492 
03493     const Profiler::Stats& stats = Profiler::getStats("testing");
03494 
03495     real ticksPerSec = Profiler::ticksPerSecond();
03496     real cpu_time = (stats.user_duration+stats.system_duration)/ticksPerSec;
03497     cumulative_testing_time += cpu_time;
03498 
03499     if (testcosts)
03500         // if it is used (usually not) testcosts is a VMat that is of size
03501         // nexamples x ncosts. The last column will have missing values.
03502         // We just need to put a value in one of the rows of that column.
03503         testcosts->put(0,cumulative_testing_time_cost_index,cumulative_testing_time);
03504 
03505     if( !test_stats )
03506     {
03507         test_stats = new VecStatsCollector();
03508         test_stats->setFieldNames(getTestCostNames());
03509     }
03510     if (test_stats) {
03511         // Here we simply update the corresponding stat index
03512         Vec test_time_stats(test_stats->length(), MISSING_VALUE);
03513         test_time_stats[cumulative_testing_time_cost_index] =
03514             cumulative_testing_time;
03515         test_stats->update(test_time_stats);
03516         test_stats->finalize();
03517     }
03518 }
03519 
03520 
03521 TVec<string> DeepBeliefNet::getTestCostNames() const
03522 {
03523     // Return the names of the costs computed by computeCostsFromOutputs
03524     // (these may or may not be exactly the same as what's returned by
03525     // getTrainCostNames).
03526 
03527     return cost_names;
03528 }
03529 
03530 TVec<string> DeepBeliefNet::getTrainCostNames() const
03531 {
03532     return cost_names;
03533 }
03534 
03535 
03536 //#####  Helper functions  ##################################################
03537 
03538 void DeepBeliefNet::setLearningRate( real the_learning_rate )
03539 {
03540     for( int i=0 ; i<n_layers-1 ; i++ )
03541     {
03542         layers[i]->setLearningRate( the_learning_rate );
03543         connections[i]->setLearningRate( the_learning_rate );
03544         if( partial_costs.length() != 0 && partial_costs[i] )
03545             partial_costs[i]->setLearningRate( the_learning_rate );
03546     }
03547     layers[n_layers-1]->setLearningRate( the_learning_rate );
03548 
03549     if( use_classification_cost )
03550     {
03551         classification_module->joint_connection->setLearningRate(
03552             the_learning_rate );
03553         joint_layer->setLearningRate( the_learning_rate );
03554     }
03555 
03556     if( final_module )
03557         final_module->setLearningRate( the_learning_rate );
03558 
03559     if( final_cost )
03560         final_cost->setLearningRate( the_learning_rate );
03561 
03562     for( int i=0 ; i<generative_connections.length() ; i++ )
03563         generative_connections[i]->setLearningRate( the_learning_rate );
03564 
03565     for( int i=0; i<greedy_target_connections.length(); i++ )
03566         greedy_target_connections[i]->setLearningRate( the_learning_rate );
03567 
03568     for( int i=0; i<greedy_target_layers.length(); i++ )
03569         greedy_target_layers[i]->setLearningRate( the_learning_rate );
03570 }
03571 
03572 
03573 
03574 
03575 TVec<Vec> DeepBeliefNet::fantasizeKTimeOnMultiSrcImg(const int KTime, const Mat& srcImg, const Vec& sample, bool alwaysFromSrcImg)
03576 {
03577     int n=srcImg.length();
03578     TVec<Vec> output(0);
03579 
03580     for( int i=0; i<n; i++ )
03581     {
03582         const Vec img_i = srcImg(i);
03583         TVec<Vec> outputTmp;
03584         outputTmp = fantasizeKTime(KTime, img_i, sample, alwaysFromSrcImg);
03585         output = concat(output, outputTmp);
03586     }
03587 
03588     return output;
03589 }
03590 
03591 
03592 TVec<Vec> DeepBeliefNet::fantasizeKTime(const int KTime, const Vec& srcImg, const Vec& sample, bool alwaysFromSrcImg)
03593 {
03594     if(sample.size() > n_layers-1)
03595         PLERROR("In DeepBeliefNet::fantasize():"
03596         " Size of sample (%i) should be <= "
03597         "number of hidden layer (%i).",sample.size(), n_layers-1);
03598 
03599     int n_hlayers_used = sample.size();
03600 
03601     TVec<Vec> fantaImagesObtained(KTime+1);
03602     fantaImagesObtained[0].resize(srcImg.size());
03603     fantaImagesObtained[0] << srcImg;
03604     layers[0]->setExpectation(srcImg);
03605 
03606     for( int k=0 ; k<KTime ; k++ )
03607     {
03608         fantaImagesObtained[k+1].resize(srcImg.size());
03609         for( int i=0 ; i<n_hlayers_used; i++ )
03610         {
03611             connections[i]->setAsDownInput( layers[i]->expectation );
03612             layers[i+1]->getAllActivations( connections[i], 0, false );
03613             layers[i+1]->computeExpectation();
03614         }
03615 
03616         for( int i=n_hlayers_used-1 ; i>=0; i-- )
03617         {
03618             if( sample[i] == 1 )
03619             {
03620                 Vec expectDecode(layers[i+1]->size);
03621                 expectDecode << layers[i+1]->expectation;
03622                 for( int j=0; j<expectDecode.size(); j++ )
03623                     expectDecode[j] = random_gen->binomial_sample(expectDecode[j]);
03624                 layers[i+1]->setExpectation(expectDecode);
03625             }
03626             connections[i]->setAsUpInput( layers[i+1]->expectation );
03627                 layers[i]->getAllActivations( connections[i], 0, false );
03628                 layers[i]->computeExpectation();
03629         }
03630         fantaImagesObtained[k+1] << layers[0]->expectation;
03631         if( alwaysFromSrcImg )
03632             layers[0]->setExpectation(srcImg);
03633     }
03634     return fantaImagesObtained;
03635 }
03636 
03637 } // end of namespace PLearn
03638 
03639 
03640 /*
03641   Local Variables:
03642   mode:c++
03643   c-basic-offset:4
03644   c-file-style:"stroustrup"
03645   c-file-offsets:((innamespace . 0)(inline-open . 0))
03646   indent-tabs-mode:nil
03647   fill-column:79
03648   End:
03649 */
03650 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines