PLearn 0.1
StackedAutoassociatorsNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // StackedAutoassociatorsNet.cc
00004 //
00005 // Copyright (C) 2007 Hugo Larochelle
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Hugo Larochelle
00036 
00040 #define PL_LOG_MODULE_NAME "StackedAutoassociatorsNet"
00041 
00042 #include "StackedAutoassociatorsNet.h"
00043 #include <plearn/io/pl_log.h>
00044 #include <plearn/sys/Profiler.h>
00045 #include <plearn/io/load_and_save.h>
00046 
00047 #define minibatch_hack 0 // Do we force the minibatch setting? (debug hack)
00048 
00049 namespace PLearn {
00050 using namespace std;
00051 
00052 PLEARN_IMPLEMENT_OBJECT(
00053     StackedAutoassociatorsNet,
00054     "Neural net, trained layer-wise in a greedy fashion using autoassociators",
00055     "It is highly inspired by the DeepBeliefNet class, and can use the\n"
00056     "same RBMLayer and RBMConnection components.\n"
00057     );
00058 
00059 StackedAutoassociatorsNet::StackedAutoassociatorsNet() :
00060     greedy_learning_rate( 0. ),
00061     greedy_decrease_ct( 0. ),
00062     fine_tuning_learning_rate( 0. ),
00063     fine_tuning_decrease_ct( 0. ),
00064     l1_neuron_decay( 0. ),
00065     l1_neuron_decay_center( 0 ),
00066     batch_size( 1 ),
00067     online( false ),
00068     compute_all_test_costs( false ),
00069     reconstruct_hidden( false ),
00070     noise_type( "masking_noise" ),
00071     missing_data_method( "binomial_complementary"),
00072     corrupted_data_weight( 1 ),
00073     data_weight( 1 ),
00074     fraction_of_masked_inputs( 0. ),
00075     probability_of_masked_inputs( 0. ),
00076     probability_of_masked_target( 0. ),
00077     mask_with_mean( false ),
00078     mask_with_pepper_salt( false ),
00079     pep_salt_zero_centered( 0. ),
00080     renoising( false ),
00081     noisy( 0 ),
00082     prob_salt_noise( 0.5 ),
00083     gaussian_std( 1. ),
00084     binary_sampling_noise_parameter( 1. ),
00085     unsupervised_nstages( 0 ),
00086     unsupervised_fine_tuning_learning_rate( 0. ),
00087     unsupervised_fine_tuning_decrease_ct( 0. ),
00088     nb_corrupted_layer( -1 ),
00089     mask_input_layer_only( false ),
00090     mask_input_layer_only_in_unsupervised_fine_tuning( false ),
00091     train_stats_window( -1 ),
00092     learnerExpdir(""),
00093     save_learner_before_fine_tuning( false ),
00094     keep_online_representations( false ),
00095     n_layers( 0 ),
00096     unsupervised_stage( 0 ),
00097     minibatch_size( 0 ),
00098     currently_trained_layer( 0 )
00099 {
00100     // random_gen will be initialized in PLearner::build_()
00101     random_gen = new PRandom();
00102     nstages = 0;
00103     //To have faster test time by default. That don't change the result.
00104     if(test_minibatch_size==1)
00105         test_minibatch_size = 128;
00106 }
00107 
00108 void StackedAutoassociatorsNet::declareOptions(OptionList& ol)
00109 {
00110     declareOption(ol, "greedy_learning_rate",
00111                   &StackedAutoassociatorsNet::greedy_learning_rate,
00112                   OptionBase::buildoption,
00113                   "The learning rate used during the autoassociator "
00114                   "gradient descent training");
00115 
00116     declareOption(ol, "greedy_decrease_ct",
00117                   &StackedAutoassociatorsNet::greedy_decrease_ct,
00118                   OptionBase::buildoption,
00119                   "The decrease constant of the learning rate used during "
00120                   "the autoassociator\n"
00121                   "gradient descent training. When a hidden layer has finished "
00122                   "its training,\n"
00123                   "the learning rate is reset to it's initial value.\n");
00124 
00125     declareOption(ol, "fine_tuning_learning_rate",
00126                   &StackedAutoassociatorsNet::fine_tuning_learning_rate,
00127                   OptionBase::buildoption,
00128                   "The learning rate used during the fine tuning gradient descent");
00129 
00130     declareOption(ol, "fine_tuning_decrease_ct",
00131                   &StackedAutoassociatorsNet::fine_tuning_decrease_ct,
00132                   OptionBase::buildoption,
00133                   "The decrease constant of the learning rate used during "
00134                   "fine tuning\n"
00135                   "gradient descent.\n");
00136 
00137     declareOption(ol, "l1_neuron_decay",
00138                   &StackedAutoassociatorsNet::l1_neuron_decay,
00139                   OptionBase::buildoption,
00140                   " L1 penalty weight on the hidden layers, to encourage "
00141                   "sparsity during\n"
00142                   "the greedy unsupervised phases.\n"
00143                   );
00144 
00145     declareOption(ol, "l1_neuron_decay_center",
00146                   &StackedAutoassociatorsNet::l1_neuron_decay_center,
00147                   OptionBase::buildoption,
00148                   "Value around which the L1 penalty should be centered, i.e.\n"
00149                   "   L1(h) = | h - l1_neuron_decay_center |\n"
00150                   "where h are the values of the neurons.\n");
00151 
00152     declareOption(ol, "training_schedule",
00153                   &StackedAutoassociatorsNet::training_schedule,
00154                   OptionBase::buildoption,
00155                   "Number of examples to use during each phase of greedy pre-training.\n"
00156                   "The number of fine-tunig steps is defined by nstages.\n");
00157 
00158     declareOption(ol, "layers", &StackedAutoassociatorsNet::layers,
00159                   OptionBase::buildoption,
00160                   "The layers of units in the network. The first element\n"
00161                   "of this vector should be the input layer and the\n"
00162                   "subsequent elements should be the hidden layers. The\n"
00163                   "output layer should not be included in layers.\n");
00164 
00165     declareOption(ol, "reconstruction_layers", &StackedAutoassociatorsNet::reconstruction_layers,
00166                   OptionBase::buildoption,
00167                   "The reconstruction layers in the network (if different than encodage layers.\n"
00168                   "The first element of this vector should be the layer for the input layer reconstruction and the\n"
00169                   "subsequent elements should be the layer for the reconstruction of hidden layers.\n");
00170 
00171     declareOption(ol, "connections", &StackedAutoassociatorsNet::connections,
00172                   OptionBase::buildoption,
00173                   "The weights of the connections between the layers");
00174 
00175     declareOption(ol, "reconstruction_connections",
00176                   &StackedAutoassociatorsNet::reconstruction_connections,
00177                   OptionBase::buildoption,
00178                   "The weights of the reconstruction connections between the "
00179                   "layers");
00180 
00181     declareOption(ol, "correlation_connections",
00182                   &StackedAutoassociatorsNet::correlation_connections,
00183                   OptionBase::buildoption,
00184                   "Optional weights to capture correlation and anti-correlation\n"
00185                   "in the hidden layers. They must have the same input and\n"
00186                   "output sizes, compatible with their corresponding layers.");
00187 
00188     declareOption(ol, "direct_connections",
00189                   &StackedAutoassociatorsNet::direct_connections,
00190                   OptionBase::buildoption,
00191                   "Optional weights from each inputs to all other inputs'\n"
00192                   "reconstruction, which can capture simple (linear or log-linear)\n"
00193                   "correlations between inputs.");
00194 
00195     declareOption(ol, "final_module", &StackedAutoassociatorsNet::final_module,
00196                   OptionBase::buildoption,
00197                   "Module that takes as input the output of the last layer\n"
00198                   "(layers[n_layers-1), and feeds its output to final_cost\n"
00199                   "which defines the fine-tuning criteria.\n"
00200                  );
00201 
00202     declareOption(ol, "final_cost", &StackedAutoassociatorsNet::final_cost,
00203                   OptionBase::buildoption,
00204                   "The cost function to be applied on top of the neural network\n"
00205                   "(i.e. at the output of final_module). Its gradients will be \n"
00206                   "backpropagated to final_module and then backpropagated to\n"
00207                   "the layers.\n"
00208                   );
00209 
00210     declareOption(ol, "partial_costs", &StackedAutoassociatorsNet::partial_costs,
00211                   OptionBase::buildoption,
00212                   "Corresponding additional supervised cost function to be "
00213                   "applied on \n"
00214                   "top of each hidden layer during the autoassociator "
00215                   "training stages. \n"
00216                   "The gradient for these costs are not backpropagated to "
00217                   "previous layers.\n"
00218         );
00219 
00220     declareOption(ol, "batch_size", &StackedAutoassociatorsNet::batch_size,
00221                   OptionBase::buildoption,
00222                   "Training batch size (1=stochastic learning, 0=full batch"
00223                   " learning)");
00224 
00225     declareOption(ol, "online", &StackedAutoassociatorsNet::online,
00226                   OptionBase::buildoption,
00227                   "If true then all unsupervised training stages (as well as\n"
00228                   "the fine-tuning stage) are done simultaneously.\n");
00229 
00230     declareOption(ol, "partial_costs_weights",
00231                   &StackedAutoassociatorsNet::partial_costs_weights,
00232                   OptionBase::buildoption,
00233                   "Relative weights of the partial costs. If not defined,\n"
00234                   "weights of 1 will be assumed for all partial costs.\n"
00235         );
00236 
00237     declareOption(ol, "greedy_target_connections",
00238                   &StackedAutoassociatorsNet::greedy_target_connections,
00239                   OptionBase::buildoption,
00240                   "Optional target connections during greedy training..\n"
00241                   "They connect the target with the hidden layer from which\n"
00242                   "the autoassociator's cost (including partial cost) is computed\n"
00243                   "(only during training).\n"
00244                   "Currently works only if target is a class index.\n"
00245         );
00246 
00247     declareOption(ol, "compute_all_test_costs",
00248                   &StackedAutoassociatorsNet::compute_all_test_costs,
00249                   OptionBase::buildoption,
00250                   "Indication that, at test time, all costs for all layers \n"
00251                   "(up to the currently trained layer) should be computed.\n"
00252         );
00253 
00254     declareOption(ol, "reconstruct_hidden",
00255                   &StackedAutoassociatorsNet::reconstruct_hidden,
00256                   OptionBase::buildoption,
00257                   "Indication that the autoassociators are also trained to\n"
00258                   "reconstruct their hidden layers (inspired from CD1 in an RBM).\n"
00259         );
00260 
00261     declareOption(ol, "noise_type",
00262                   &StackedAutoassociatorsNet::noise_type,
00263                   OptionBase::buildoption,
00264                   "Type of noise that corrupts the autoassociators input. "
00265                   "Choose among:\n"
00266                   " - \"missing_data\"\n"
00267                   " - \"masking_noise\"\n"
00268                   " - \"binary_sampling\"\n"
00269                   " - \"gaussian\"\n"
00270                   " - \"none\"\n"
00271         );
00272 
00273     declareOption(ol, "missing_data_method",
00274                   &StackedAutoassociatorsNet::missing_data_method,
00275                   OptionBase::buildoption,
00276                   "Method used to fill the double_input vector for missing_data noise type."
00277                   "Choose among:\n"
00278                   " - \"binomial_complementary\"\n"
00279                   " - \"one_if_missing\""
00280         );
00281 
00282     declareOption(ol, "corrupted_data_weight",
00283                   &StackedAutoassociatorsNet::corrupted_data_weight,
00284                   OptionBase::buildoption,
00285                   "Weight owned by a corrupted or missing data when"
00286                   "backpropagating the gradient of reconstruction cost.\n"
00287         );
00288 
00289     declareOption(ol, "data_weight",
00290                   &StackedAutoassociatorsNet::data_weight,
00291                   OptionBase::buildoption,
00292                   "Weight owned by a data not corrupted when"
00293                   "backpropagating the gradient of reconstruction cost.\n"
00294         );
00295 
00296     declareOption(ol, "fraction_of_masked_inputs",
00297                   &StackedAutoassociatorsNet::fraction_of_masked_inputs,
00298                   OptionBase::buildoption,
00299                   "Random fraction of the autoassociators' input components that\n"
00300                   "masked, i.e. unsused to reconstruct the input.\n"
00301         );
00302 
00303     declareOption(ol, "probability_of_masked_inputs",
00304                   &StackedAutoassociatorsNet::probability_of_masked_inputs,
00305                   OptionBase::buildoption,
00306                   "Probability of masking each input component. Either this "
00307                   "option.\n"
00308                   "or fraction_of_masked_inputs should be > 0.\n"
00309         );
00310 
00311     declareOption(ol, "probability_of_masked_target",
00312                   &StackedAutoassociatorsNet::probability_of_masked_target,
00313                   OptionBase::buildoption,
00314                   "Probability of masking the target, when using greedy_target_connections.\n"
00315         );
00316 
00317     declareOption(ol, "mask_with_mean",
00318                   &StackedAutoassociatorsNet::mask_with_mean,
00319                   OptionBase::buildoption,
00320                   "Indication that inputs should be masked with the "
00321                   "training set mean of that component.\n"
00322         );
00323 
00324     declareOption(ol, "mask_with_pepper_salt",
00325                   &StackedAutoassociatorsNet::mask_with_pepper_salt,
00326                   OptionBase::buildoption,
00327                   "Indication that inputs should be masked with "
00328                   "0 or 1 according to prob_salt_noise.\n"
00329         );
00330 
00331     declareOption(ol, "pep_salt_zero_centered",
00332                   &StackedAutoassociatorsNet::pep_salt_zero_centered,
00333                   OptionBase::buildoption,
00334                   " Indicate if the mask is zero centered (>0) or not (==0). "
00335                   " If equal 0 (not centered)"
00336                   " then pepVal is 0 and saltVal is 1."
00337                   " If is greater than 0 (centered),"
00338                   " then pepVal is -pep_salt_zero_centered and "
00339                   " saltVal is pep_salt_zero_centered.\n"
00340         );
00341 
00342     declareOption(ol, "renoising",
00343                   &StackedAutoassociatorsNet::renoising,
00344                   OptionBase::buildoption,
00345                   "Indication that the autoassociator will try to"
00346                   "'reconstruct' _another_ corrupted version of the input"
00347                   "(instead of the input itself),"
00348                   "from an initial encoded corrupted version of the input.\n"
00349         );
00350 
00351     declareOption(ol, "noisy",
00352                   &StackedAutoassociatorsNet::noisy, 
00353                   OptionBase::buildoption,
00354                   "Indication that example are corrupted before using them for a particular training."
00355                   "Note that the original example are used for any test."
00356                   "Choose among:\n"
00357                   "0 : no example noisy\n"
00358                   "1 : noisy applied before unsup. pre-training (basic autoassociator will be used (no denoising).\n"
00359                   "2 : noisy applied before unsup. pre-training and before supervised fine-tuning.\n" 
00360         );
00361 
00362 
00363     declareOption(ol, "prob_salt_noise",
00364                   &StackedAutoassociatorsNet::prob_salt_noise,
00365                   OptionBase::buildoption,
00366                   "Probability that we mask the input by 1 instead of 0.\n"
00367         );
00368 
00369     declareOption(ol, "gaussian_std",
00370                   &StackedAutoassociatorsNet::gaussian_std,
00371                   OptionBase::buildoption,
00372                   "Standard deviation of Gaussian noise.\n"
00373         );
00374 
00375     declareOption(ol, "binary_sampling_noise_parameter",
00376                   &StackedAutoassociatorsNet::binary_sampling_noise_parameter,
00377                   OptionBase::buildoption,
00378                   "Parameter \tau for corrupted input sampling:\n"
00379                   "  \tilde{x}_k ~ B((x_k - 0.5) \tau + 0.5)\n"
00380         );
00381 
00382     declareOption(ol, "unsupervised_nstages",
00383                   &StackedAutoassociatorsNet::unsupervised_nstages,
00384                   OptionBase::buildoption,
00385                   "Number of samples to use for unsupervised fine-tuning.\n");
00386 
00387     declareOption(ol, "unsupervised_fine_tuning_learning_rate",
00388                   &StackedAutoassociatorsNet::unsupervised_fine_tuning_learning_rate,
00389                   OptionBase::buildoption,
00390                   "The learning rate used during the unsupervised "
00391                   "fine tuning gradient descent");
00392 
00393     declareOption(ol, "unsupervised_fine_tuning_decrease_ct",
00394                   &StackedAutoassociatorsNet::unsupervised_fine_tuning_decrease_ct,
00395                   OptionBase::buildoption,
00396                   "The decrease constant of the learning rate used during\n"
00397                   "unsupervised fine tuning gradient descent.\n");
00398 
00399     declareOption(ol, "nb_corrupted_layer",
00400                   &StackedAutoassociatorsNet::nb_corrupted_layer,
00401                   OptionBase::buildoption,
00402                   "Indicate how many layers should be corrupted,\n"
00403                   "starting with the input one,\n"
00404                   "during greedy layer-wise learning.\n");
00405 
00406     declareOption(ol, "mask_input_layer_only",
00407                   &StackedAutoassociatorsNet::mask_input_layer_only,
00408                   OptionBase::buildoption,
00409                   "Indication that only the input layer should be corrupted\n"
00410                   "during greedy layer-wise learning.\n");
00411 
00412     declareOption(ol, "mask_input_layer_only_in_unsupervised_fine_tuning",
00413                   &StackedAutoassociatorsNet::mask_input_layer_only_in_unsupervised_fine_tuning,
00414                   OptionBase::buildoption,
00415                   "Indication that only the input layer should be masked\n"
00416                   "during unsupervised fine-tuning.\n");
00417 
00418     declareOption(ol, "train_stats_window",
00419                   &StackedAutoassociatorsNet::train_stats_window,
00420                   OptionBase::buildoption,
00421                   "The number of samples to use to compute training stats.\n"
00422                   "-1 (default) means the number of training samples.\n");
00423 
00424 
00425     declareOption(ol, "learnerExpdir",
00426                   &StackedAutoassociatorsNet::learnerExpdir,
00427                   OptionBase::buildoption,
00428                   "Experiment directory where the learner will be save\n"
00429                   "if save_learner_before_fine_tuning is true."
00430         );
00431 
00432     declareOption(ol, "save_learner_before_fine_tuning",
00433                   &StackedAutoassociatorsNet::save_learner_before_fine_tuning,
00434                   OptionBase::buildoption,
00435                   "Saves the learner before the supervised fine-tuning."
00436         );
00437 
00438     declareOption(ol, "keep_online_representations",
00439                   &StackedAutoassociatorsNet::keep_online_representations,
00440                   OptionBase::buildoption,
00441                   "Keep trace of the representations obtained during an "
00442                   "unsupervised training phase.\n"
00443         );
00444     
00445     declareOption(ol, "greedy_stages",
00446                   &StackedAutoassociatorsNet::greedy_stages,
00447                   OptionBase::learntoption,
00448                   "Number of training samples seen in the different greedy "
00449                   "phases.\n"
00450         );
00451 
00452     declareOption(ol, "n_layers", &StackedAutoassociatorsNet::n_layers,
00453                   OptionBase::learntoption,
00454                   "Number of layers"
00455         );
00456 
00457     declareOption(ol, "unsupervised_stage",
00458                   &StackedAutoassociatorsNet::unsupervised_stage,
00459                   OptionBase::learntoption,
00460                   "Number of samples visited so far during unsupervised "
00461                   "fine-tuning.\n");
00462 
00463     declareOption(ol, "correlation_layers",
00464                   &StackedAutoassociatorsNet::correlation_layers,
00465                   OptionBase::learntoption,
00466                   "Hidden layers for the correlation connections"
00467         );
00468 
00469     declareOption(ol, "expectation_means",
00470                   &StackedAutoassociatorsNet::expectation_means,
00471                   OptionBase::learntoption,
00472                   "Mean of layers on the training set for each layer"
00473         );
00474 
00475     // Now call the parent class' declareOptions
00476     inherited::declareOptions(ol);
00477 }
00478 
00479 void StackedAutoassociatorsNet::declareMethods(RemoteMethodMap& rmm)
00480 {
00481     // Insert a backpointer to remote methods; note that this is different from
00482     // declareOptions().
00483     rmm.inherited(inherited::_getRemoteMethodMap_());
00484 
00485     declareMethod(
00486         rmm, "fantasizeKTime",
00487         &StackedAutoassociatorsNet::fantasizeKTime,
00488         (BodyDoc("On a trained learner, computes a codage-decodage phase (fantasize phase) through a specified number of hidden layer. From one specified source image."),
00489          ArgDoc ("kTime", "Number of time we want to fantasize. \n" 
00490                  "Next input image will again be the source Image (if alwaysFromSrcImg is True) \n"
00491                  "or next input image will be the last fantasize image (if alwaysFromSrcImg is False), and so on for kTime.)"),
00492          ArgDoc ("srcImg", "Source image vector (should have same width as raws layer)"),
00493          ArgDoc ("sampling", "Vector of bool indicating whether or not a sampling will be done for each hidden layer\n"
00494                 "during decodage. Its width indicates how many hidden layer will be used.)\n"
00495                 " (should have same width as maskNoiseFractOrProb)\n"
00496                 "smaller element of the vector correspond to lower layer"),
00497          ArgDoc ("maskNoiseFractOrProb", "Vector of noise fraction or probability\n"
00498                 "(according to the one used during the learning stage)\n"
00499                 "for each layer. (should have same width as sampling or be empty if unuseful.\n"
00500                 "Smaller element of the vector correspond to lower layer"),
00501          ArgDoc ("alwaysFromSrcImg", "Booleen indicating whether each encode-decode \n"
00502                 "steps are done from the source image (sets to True) or \n"
00503                 "if the next input image is the preceding fantasize image obtained (sets to False). "),
00504          RetDoc ("Fantasize images obtained for each kTime.")));
00505     
00506     declareMethod(
00507         rmm, "fantasizeKTimeOnMultiSrcImg",
00508         &StackedAutoassociatorsNet::fantasizeKTimeOnMultiSrcImg,
00509         (BodyDoc("Call the 'fantasizeKTime' function for each source images found in the matrix 'srcImg'."),
00510          ArgDoc ("kTime", "Number of time we want to fantasize for each source images. \n"
00511                  "Next input image will again be the source Image (if alwaysFromSrcImg is True) \n"
00512                  "or next input image will be the last fantasize image (if alwaysFromSrcImg is False), and so on for kTime.)"),
00513          ArgDoc ("srcImg", "Source images matrix (should have same width as raws layer)"),
00514          ArgDoc ("sampling", "Vector of bool indicating whether or not a sampling will be done for each hidden layer\n"
00515                 "during decodage. Its width indicates how many hidden layer will be used.)\n"
00516                 " (should have same width as maskNoiseFractOrProb)\n"
00517                 "smaller element of the vector correspond to lower layer"),
00518          ArgDoc ("maskNoiseFractOrProb", "Vector of noise fraction or probability\n"
00519                 "(according to the one used during the learning stage)\n"
00520                 "for each layer. (should have same width as sampling or be empty if unuseful.\n"
00521                 "Smaller element of the vector correspond to lower layer"),
00522          ArgDoc ("alwaysFromSrcImg", "Booleen indicating whether each encode-decode \n"
00523                 "steps are done from the source image (sets to True) or \n"
00524                 "if the next input image is the preceding fantasize image obtained (sets to False). "),
00525          RetDoc ("For each source images, fantasize images obtained for each kTime.")));
00526     
00527     declareMethod(
00528         rmm, "getTrainRepresentations", &StackedAutoassociatorsNet::getTrainRepresentations,
00529         (BodyDoc("Returns the representations obtained during last pre-training of the current layer.\n"),
00530          RetDoc ("Current train representations")));
00531 
00532     declareMethod(
00533         rmm, "remote_setCurrentlyTrainedLayer", &StackedAutoassociatorsNet::remote_setCurrentlyTrainedLayer,
00534         (BodyDoc("Modify current_trained_layer.\n"),
00535         ArgDoc ("input", "Matrix of inputs."),
00536         RetDoc ("Outputs from each hidden layers.")));
00537    
00538 }
00539 
00540 void StackedAutoassociatorsNet::build_()
00541 {
00542     MODULE_LOG << "build_() called" << endl;
00543 
00544     if(inputsize_ > 0 && targetsize_ > 0)
00545     {
00546         // Initialize some learnt variables
00547         n_layers = layers.length();
00548 
00549         if(nb_corrupted_layer == -1)
00550                 nb_corrupted_layer = n_layers-1;
00551 
00552         if( nb_corrupted_layer >= n_layers)
00553             PLERROR("StackedAutoassociatorsNet::build_() - \n"
00554                     " - \n"
00555                     "nb_corrupted_layers should be < %d\n",n_layers);
00556 
00557         if( weightsize_ > 0 )
00558             PLERROR("StackedAutoassociatorsNet::build_() - \n"
00559                     "usage of weighted samples (weight size > 0) is not\n"
00560                     "implemented yet.\n");
00561 
00562         if( !online && training_schedule.length() != n_layers-1 )
00563             PLERROR("StackedAutoassociatorsNet::build_() - \n"
00564                     "training_schedule should have %d elements.\n",
00565                     n_layers-1);
00566 
00567         if( partial_costs && partial_costs.length() != n_layers-1 )
00568             PLERROR("StackedAutoassociatorsNet::build_() - \n"
00569                     "partial_costs should have %d elements.\n",
00570                     n_layers-1);
00571 
00572         if( partial_costs && partial_costs_weights &&
00573             partial_costs_weights.length() != n_layers-1 )
00574             PLERROR("StackedAutoassociatorsNet::build_() - \n"
00575                     "partial_costs_weights should have %d elements.\n",
00576                     n_layers-1);
00577 
00578         if( online && reconstruct_hidden )
00579             PLERROR("StackedAutoassociatorsNet::build_()"
00580                     " - \n"
00581                     "cannot use online setting with reconstruct_hidden=true.\n");
00582 
00583 //        if( unsupervised_nstages > 0 && correlation_connections.length() != 0 )
00584 //            PLERROR("StackedAutoassociatorsNet::build_()"
00585 //                    " - \n"
00586 //                    "cannot use unsupervised fine-tuning with correlation connections.\n");
00587 
00588         if( fraction_of_masked_inputs < 0 )
00589             PLERROR("StackedAutoassociatorsNet::build_()"
00590                     " - \n"
00591                     "fraction_of_masked_inputs should be > or equal to 0.\n");
00592 
00593         if( probability_of_masked_inputs < 0 )
00594             PLERROR("StackedAutoassociatorsNet::build_()"
00595                     " - \n"
00596                     "probability_of_masked_inputs should be > or equal to 0.\n");
00597 
00598         if( prob_salt_noise < 0 )
00599             PLERROR("StackedAutoassociatorsNet::build_()"
00600                     " - \n"
00601                     "prob_salt_noise should be > or equal to 0.\n");
00602 
00603         if( probability_of_masked_target < 0 )
00604             PLERROR("StackedAutoassociatorsNet::build_()"
00605                     " - \n"
00606                     "probability_of_masked_target should be > or equal to 0.\n");
00607 
00608         if( data_weight < 0 )
00609             PLERROR("StackedAutoassociatorsNet::build_()"
00610                     " - \n"
00611                     "data_weight should be > or equal to 0.\n");
00612 
00613         if( corrupted_data_weight < 0 )
00614             PLERROR("StackedAutoassociatorsNet::build_()"
00615                     " - \n"
00616                     "corrupted_data_weight should be > or equal to 0.\n");
00617 
00618         if( online && noise_type != "masking_noise" && batch_size != 1)
00619             PLERROR("StackedAutoassociatorsNet::build_()"
00620                     " - \n"
00621                     "corrupted inputs only works with masking noise in online setting,"
00622                     "in the non-minibatch case.\n");
00623 
00624         if( renoising && noisy > 0 )
00625             PLERROR("StackedAutoassociatorsNet::build_()"
00626                     " - \n"
00627                     "cannot use renoising and noisy at the same time.\n");
00628 
00629         if( renoising && noise_type == "missing_data" )
00630             PLERROR("StackedAutoassociatorsNet::build_()"
00631                     " - \n"
00632                     "cannot use renoising with missing data.\n");
00633 
00634         if( noisy > 0 && noise_type == "missing_data")
00635             PLERROR("StackedAutoassociatorsNet::build_()"
00636                     " - \n"
00637                     "cannot use noisy with missing data.\n");
00638 
00639         if( !online )
00640         {
00641             if( greedy_stages.length() == 0)
00642             {
00643                 greedy_stages.resize(n_layers-1);
00644                 greedy_stages.clear();
00645             }
00646 
00647             if(stage > 0)
00648                 currently_trained_layer = n_layers;
00649             else
00650             {
00651                 currently_trained_layer = n_layers-1;
00652                 while(currently_trained_layer>1
00653                       && greedy_stages[currently_trained_layer-1] <= 0)
00654                     currently_trained_layer--;
00655             }
00656         }
00657         else
00658         {
00659             currently_trained_layer = n_layers;
00660         }
00661 
00662         build_layers_and_connections();
00663         build_costs();
00664     }
00665 }
00666 
00667 void StackedAutoassociatorsNet::build_layers_and_connections()
00668 {
00669     MODULE_LOG << "build_layers_and_connections() called" << endl;
00670 
00671     if( connections.length() != n_layers-1 )
00672         PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() - \n"
00673                 "there should be %d connections.\n",
00674                 n_layers-1);
00675 
00676     if( reconstruction_connections.length() != n_layers-1 )
00677         PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() - \n"
00678                 "there should be %d reconstruction connections.\n",
00679                 n_layers-1);
00680 
00681     if( correlation_connections.length() != 0 &&
00682         correlation_connections.length() != n_layers-1 )
00683         PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() - \n"
00684                 "there should be either %d correlation connections or none.\n",
00685                 n_layers-1);
00686 
00687     if( direct_connections.length() != 0 &&
00688         direct_connections.length() != n_layers-1 )
00689         PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() - \n"
00690                 "there should be either %d direct connections or none.\n",
00691                 n_layers-1);
00692 
00693     if(reconstruct_hidden && compute_all_test_costs )
00694         PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() - \n"
00695                 "compute_all_test_costs option is not implemented for\n"
00696                 "reconstruct_hidden option.");
00697 
00698     if( noise_type == "missing_data" || renoising || noisy > 0 )
00699     {
00700         if( correlation_connections.length() !=0 )
00701             PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() - \n"
00702                     "Missing data, renoising and noisy are not implemented with correlation_connections.\n");
00703     
00704         if( direct_connections.length() !=0 )
00705             PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() - \n"
00706                     "Missing data, renoising and noisy are not implemented with direct_connections.\n");
00707         
00708         if( reconstruct_hidden )
00709             PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() - \n"
00710                     "Missing data, renoising and noisy are not implemented with reconstruct_hidden.\n");
00711 
00712         if( online ) 
00713             PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() - \n"
00714                     "Missing data, renoising and noisy are not implemented in the online setting.\n");
00715     }
00716 
00717     if(correlation_connections.length() != 0)
00718     {
00719         if( compute_all_test_costs )
00720             PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() - \n"
00721                     "compute_all_test_costs option is not implemented for\n"
00722                     "correlation_connections.");
00723         correlation_layers.resize( layers.length()-1 );
00724         for( int i=0 ; i<n_layers-1 ; i++ )
00725         {
00726             if( greedy_stages[i] == 0 )
00727             {
00728                 CopiesMap map;
00729                 correlation_layers[i] =
00730                     layers[i+1]->deepCopy(map);
00731             }
00732         }
00733         correlation_activations.resize( n_layers-1 );
00734         correlation_activations_m.resize( n_layers-1 );
00735         correlation_expectations.resize( n_layers-1 );
00736         correlation_expectations_m.resize( n_layers-1 );
00737         correlation_activation_gradients.resize( n_layers-1 );
00738         correlation_activation_gradients_m.resize( n_layers-1 );
00739         correlation_expectation_gradients.resize( n_layers-1 );
00740         correlation_expectation_gradients_m.resize( n_layers-1 );
00741     }
00742 
00743     if(layers[0]->size != inputsize_)
00744         PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() - \n"
00745                 "layers[0] should have a size of %d.\n",
00746                 inputsize_);
00747 
00748     activations.resize( n_layers );
00749     activations_m.resize( n_layers );
00750     expectations.resize( n_layers );
00751     expectations_m.resize( n_layers );
00752     activation_gradients.resize( n_layers );
00753     activation_gradients_m.resize( n_layers );
00754     expectation_gradients.resize( n_layers );
00755     expectation_gradients_m.resize( n_layers );
00756 
00757     // If not defined, reconstruction_layers will  
00758     // simply point to the layers vector. 
00759     if( reconstruction_layers.length() == 0 )
00760         reconstruction_layers = layers;
00761     else
00762         if( reconstruction_layers.length() != layers.length()-1 && 
00763             reconstruction_layers.length() != layers.length() )
00764             PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() "
00765                      "- \n"
00766                     "reconstruction_layers should have a length of layers.length-1 or layers.length, i.e: %d\n.",
00767                      layers.length()-1);
00768 
00769     for( int i=0 ; i<n_layers-1 ; i++ )
00770     {
00771         if( layers[i]->size != reconstruction_layers[i]->size )
00772             PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() "
00773                      "- \n"
00774                     "layers[%i] should have same size of reconstruction_layers[%i], i.e: %d.\n",
00775                     i, i, layers[i]->size);
00776 
00777         if( noise_type == "missing_data")
00778         {
00779             if( layers[i]->size * 2 != connections[i]->down_size )
00780                 PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() "
00781                      "- \n"
00782                     "When noise_type==%s, connections[%i] should have a down_size "
00783                     "2 time the size of layers[%i], i.e: 2 * %d.\n",
00784                     noise_type.c_str(), i, i, layers[i]->size);
00785 
00786             if( reconstruction_connections[i]->up_size != layers[i]->size*2 )
00787             PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() "
00788                     "- \n"
00789                     "When noise_type==%s, recontruction_connections[%i] should have a up_size "
00790                     "2 time the size of layers[%i], i.e: 2 * %d.\n",
00791                     noise_type.c_str(), i, i, layers[i]->size);
00792         }
00793         else
00794         {
00795             if( layers[i]->size != connections[i]->down_size )
00796                 PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() "
00797                      "- \n"
00798                     "connections[%i] should have a down_size of %d.\n",
00799                     i, layers[i]->size);
00800 
00801             if( reconstruction_connections[i]->up_size != layers[i]->size )
00802             PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() "
00803                     "- \n"
00804                     "recontruction_connections[%i] should have a up_size of "
00805                     "%d.\n",
00806                     i, layers[i]->size);
00807         }
00808 
00809         if( connections[i]->up_size != layers[i+1]->size )
00810             PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() "
00811                     "- \n"
00812                     "connections[%i] should have a up_size of %d.\n",
00813                     i, layers[i+1]->size);
00814 
00815         if( layers[i+1]->size != reconstruction_connections[i]->down_size )
00816             PLERROR("StackedAutoassociatorsNet::build_layers_and_connections() "
00817                     "- \n"
00818                     "recontruction_connections[%i] should have a down_size of "
00819                     "%d.\n",
00820                     i, layers[i+1]->size);
00821 
00822         if(correlation_connections.length() != 0)
00823         {
00824             if(reconstruct_hidden)
00825                 PLERROR("StackedAutoassociatorsNet::build_layers_and_connections()"
00826                         " - \n"
00827                         "cannot use correlation_connections with reconstruct_hidden=true.\n");
00828 
00829             if( correlation_connections[i]->up_size != layers[i+1]->size ||
00830                 correlation_connections[i]->down_size != layers[i+1]->size )
00831                 PLERROR("StackedAutoassociatorsNet::build_layers_and_connections()"
00832                         " - \n"
00833                         "correlation_connections[%i] should have a up_size and "
00834                         "down_size of %d.\n",
00835                         i, layers[i+1]->size);
00836             correlation_activations[i].resize( layers[i+1]->size );
00837             correlation_expectations[i].resize( layers[i+1]->size );
00838             correlation_activation_gradients[i].resize( layers[i+1]->size );
00839             correlation_expectation_gradients[i].resize( layers[i+1]->size );
00840             if( !(correlation_connections[i]->random_gen) )
00841             {
00842                 correlation_connections[i]->random_gen = random_gen;
00843                 correlation_connections[i]->forget();
00844             }
00845 
00846             if( !(correlation_layers[i]->random_gen) )
00847             {
00848                 correlation_layers[i]->random_gen = random_gen;
00849                 correlation_layers[i]->forget();
00850             }
00851         }
00852 
00853         if(direct_connections.length() != 0)
00854         {
00855             if( online )
00856                 PLERROR("StackedAutoassociatorsNet::build_layers_and_connections()"
00857                         " - \n"
00858                         "cannot use direct_connections in the online setting.\n");
00859 
00860 
00861             if(reconstruct_hidden)
00862                 PLERROR("StackedAutoassociatorsNet::build_layers_and_connections()"
00863                         " - \n"
00864                         "cannot use direct_connections with reconstruct_hidden=true.\n");
00865 
00866             if( direct_connections[i]->up_size != layers[i]->size ||
00867                 direct_connections[i]->down_size != layers[i]->size )
00868                 PLERROR("StackedAutoassociatorsNet::build_layers_and_connections()"
00869                         " - \n"
00870                         "direct_connections[%i] should have a up_size and "
00871                         "down_size of %d.\n",
00872                         i, layers[i]->size);
00873             if( !(direct_connections[i]->random_gen) )
00874             {
00875                 direct_connections[i]->random_gen = random_gen;
00876                 direct_connections[i]->forget();
00877             }
00878         }
00879 
00880         if(greedy_target_connections.length() != 0)
00881         {
00882             if(reconstruct_hidden)
00883                 PLERROR("StackedAutoassociatorsNet::build_layers_and_connections()"
00884                         " - \n"
00885                         "greedy_target_connections not implemented with reconstruct_hidden=true.\n");
00886 
00887             if( greedy_target_connections[i]->up_size != layers[i+1]->size )
00888                 PLERROR("StackedAutoassociatorsNet::build_layers_and_connections()"
00889                         " - \n"
00890                         "greedy_target_connections[%i] should have a up_size of %d.\n",
00891                         i, layers[i+1]->size);
00892             if( !(greedy_target_connections[i]->random_gen) )
00893             {
00894                 greedy_target_connections[i]->random_gen = random_gen;
00895                 greedy_target_connections[i]->forget();
00896             }
00897         }
00898 
00899         if( !(layers[i]->random_gen) )
00900         {
00901             layers[i]->random_gen = random_gen;
00902             layers[i]->forget();
00903         }
00904         
00905         if( !(reconstruction_layers[i]->random_gen) )
00906         {
00907             reconstruction_layers[i]->random_gen = random_gen;
00908             reconstruction_layers[i]->forget();
00909         }
00910 
00911         if( !(connections[i]->random_gen) )
00912         {
00913             connections[i]->random_gen = random_gen;
00914             connections[i]->forget();
00915         }
00916 
00917         if( !(reconstruction_connections[i]->random_gen) )
00918         {
00919             reconstruction_connections[i]->random_gen = random_gen;
00920             reconstruction_connections[i]->forget();
00921         }
00922 
00923         activations[i].resize( layers[i]->size );
00924         expectations[i].resize( layers[i]->size );
00925         activation_gradients[i].resize( layers[i]->size );
00926         expectation_gradients[i].resize( layers[i]->size );
00927     }
00928     if( !(layers[n_layers-1]->random_gen) )
00929     {
00930         layers[n_layers-1]->random_gen = random_gen;
00931         layers[n_layers-1]->forget();
00932     }
00933     activations[n_layers-1].resize( layers[n_layers-1]->size );
00934     expectations[n_layers-1].resize( layers[n_layers-1]->size );
00935     activation_gradients[n_layers-1].resize( layers[n_layers-1]->size );
00936     expectation_gradients[n_layers-1].resize( layers[n_layers-1]->size );
00937 
00938     reconstruction_weights.resize( layers[0]->size );
00939     // Will be correctly resized if keep_online_representations == True
00940     train_representations.resize( 1 );
00941 
00942     // For denoising autoencoders
00943     doubled_expectations.resize( n_layers-1 );
00944     doubled_expectation_gradients.resize( n_layers-1 );
00945     corrupted_autoassociator_expectations.resize( n_layers-1 );
00946     binary_masks.resize( n_layers-1 );
00947     
00948     if( (noise_type == "masking_noise" || noise_type == "missing_data") && fraction_of_masked_inputs > 0 )
00949         autoassociator_expectation_indices.resize( n_layers-1 );
00950     
00951     if( renoising || noisy > 0 )
00952        second_corrupted_autoassociator_expectations.resize( n_layers-1 );
00953 
00954     for( int i=0 ; i<n_layers-1 ; i++ )
00955     {
00956         binary_masks[i].resize( layers[i]->size ); // For online learning
00957         if( noise_type == "missing_data" )
00958         {
00959             corrupted_autoassociator_expectations[i].resize( layers[i]->size * 2 );
00960             doubled_expectations[i].resize( layers[i]->size * 2 );
00961             doubled_expectation_gradients[i].resize( layers[i]->size * 2 );
00962         }
00963         else
00964         {
00965             corrupted_autoassociator_expectations[i].resize( layers[i]->size );
00966             doubled_expectations[i].resize( layers[i]->size );
00967             doubled_expectation_gradients[i].resize( layers[i]->size );
00968         }
00969 
00970         if( (noise_type == "masking_noise" || noise_type == "missing_data") && fraction_of_masked_inputs > 0 )
00971         {
00972             autoassociator_expectation_indices[i].resize( layers[i]->size );
00973             for( int j=0 ; j < autoassociator_expectation_indices[i].length() ; j++ )
00974                 autoassociator_expectation_indices[i][j] = j;
00975         }
00976 
00977         if( renoising || noisy > 0 )
00978             second_corrupted_autoassociator_expectations[i].resize( layers[i]->size );
00979     }
00980 
00981     if(greedy_target_connections.length() != 0)
00982     {
00983         target_vec.resize(greedy_target_connections[0]->down_size);
00984         target_vec_gradient.resize(greedy_target_connections[0]->down_size);
00985         targets_vec.resize(n_layers-1);
00986         targets_vec_gradient.resize(n_layers-1);
00987         for( int i=0; i<n_layers-1; i++ )
00988         {
00989             targets_vec[i].resize(greedy_target_connections[0]->down_size);
00990             targets_vec_gradient[i].resize(greedy_target_connections[0]->down_size);
00991         }
00992     }
00993 }
00994 void StackedAutoassociatorsNet::build_costs()
00995 {
00996     MODULE_LOG << "build_final_cost() called" << endl;
00997 
00998     if( !final_cost )
00999         PLERROR("StackedAutoassociatorsNet::build_costs() - \n"
01000                 "final_cost should be provided.\n");
01001 
01002     final_cost_gradient.resize( final_cost->input_size );
01003     final_cost->setLearningRate( fine_tuning_learning_rate );
01004 
01005     if( !(final_cost->random_gen) )
01006     {
01007         final_cost->random_gen = random_gen;
01008         final_cost->forget();
01009     }
01010 
01011 
01012     if( !final_module )
01013         PLERROR("StackedAutoassociatorsNet::build_costs() - \n"
01014                 "final_module should be provided.\n");
01015 
01016     if( layers[n_layers-1]->size != final_module->input_size )
01017         PLERROR("StackedAutoassociatorsNet::build_costs() - \n"
01018                 "final_module should have an input_size of %d.\n",
01019                 layers[n_layers-1]->size);
01020 
01021     if( final_module->output_size != final_cost->input_size )
01022         PLERROR("StackedAutoassociatorsNet::build_costs() - \n"
01023                 "final_module should have an output_size of %d.\n",
01024                 final_cost->input_size);
01025 
01026     final_module->setLearningRate( fine_tuning_learning_rate );
01027 
01028     if( !(final_module->random_gen) )
01029     {
01030         final_module->random_gen = random_gen;
01031         final_module->forget();
01032     }
01033 
01034 
01035     if(targetsize_ != 1)
01036         PLERROR("StackedAutoassociatorsNet::build_costs() - \n"
01037                 "target size of %d is not supported.\n", targetsize_);
01038 
01039     if(partial_costs)
01040     {
01041 
01042         if( correlation_connections.length() != 0 )
01043             PLERROR("StackedAutoassociatorsNet::build_costs() - \n"
01044                     "correlation_connections cannot be used with partial costs.");
01045 
01046         partial_costs_positions.resize(partial_costs.length());
01047         partial_costs_positions.clear();
01048         for(int i=0; i<partial_costs.length(); i++)
01049         {
01050             if(!partial_costs[i])
01051                 PLERROR("StackedAutoassociatorsNet::build_final_cost() - \n"
01052                         "partial_costs[%i] should be provided.\n",i);
01053             if( layers[i+1]->size != partial_costs[i]->input_size )
01054                 PLERROR("StackedAutoassociatorsNet::build_costs() - \n"
01055                         "partial_costs[%i] should have an input_size of %d.\n",
01056                         i,layers[i+1]->size);
01057             if(i==0)
01058                 partial_costs_positions[i] = n_layers-1;
01059             else
01060                 partial_costs_positions[i] = partial_costs_positions[i-1]
01061                     + partial_costs[i-1]->costNames().length();
01062 
01063             if( !(partial_costs[i]->random_gen) )
01064             {
01065                 partial_costs[i]->random_gen = random_gen;
01066                 partial_costs[i]->forget();
01067             }
01068         }
01069     }
01070 }
01071 
01072 void StackedAutoassociatorsNet::build()
01073 {
01074     inherited::build();
01075     build_();
01076 }
01077 
01078 
01079 void StackedAutoassociatorsNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
01080 {
01081     inherited::makeDeepCopyFromShallowCopy(copies);
01082 
01083     // deepCopyField(, copies);
01084 
01085     // Public options
01086     deepCopyField(training_schedule, copies);
01087     deepCopyField(layers, copies);
01088     deepCopyField(reconstruction_layers, copies);
01089     deepCopyField(connections, copies);
01090     deepCopyField(reconstruction_connections, copies);
01091     deepCopyField(correlation_connections, copies);
01092     deepCopyField(direct_connections, copies);
01093     deepCopyField(final_module, copies);
01094     deepCopyField(final_cost, copies);
01095     deepCopyField(partial_costs, copies);
01096     deepCopyField(partial_costs_weights, copies);
01097     deepCopyField(greedy_target_connections, copies);
01098 
01099     // Protected options
01100     deepCopyField(activations, copies);
01101     deepCopyField(activations_m, copies);
01102     deepCopyField(expectations, copies);
01103     deepCopyField(expectations_m, copies);
01104     deepCopyField(doubled_expectations, copies);
01105     deepCopyField(activation_gradients, copies);
01106     deepCopyField(activation_gradients_m, copies);
01107     deepCopyField(expectation_gradients, copies);
01108     deepCopyField(doubled_expectation_gradients, copies);
01109     deepCopyField(expectation_gradients_m, copies);
01110     deepCopyField(reconstruction_activations, copies);
01111     deepCopyField(reconstruction_activations_m, copies);
01112     deepCopyField(reconstruction_activation_gradients, copies);
01113     deepCopyField(reconstruction_activation_gradients_m, copies);
01114     deepCopyField(reconstruction_expectation_gradients, copies);
01115     deepCopyField(reconstruction_expectation_gradients_m, copies);
01116     deepCopyField(fine_tuning_reconstruction_activations, copies);
01117     deepCopyField(fine_tuning_reconstruction_expectations, copies);
01118     deepCopyField(fine_tuning_reconstruction_activation_gradients, copies);
01119     deepCopyField(fine_tuning_reconstruction_expectation_gradients, copies);
01120     deepCopyField(reconstruction_activation_gradients_from_hid_rec, copies);
01121     deepCopyField(reconstruction_expectation_gradients_from_hid_rec, copies);
01122     deepCopyField(hidden_reconstruction_activations, copies);
01123     deepCopyField(hidden_reconstruction_activation_gradients, copies);
01124     deepCopyField(correlation_activations, copies);
01125     deepCopyField(correlation_activations_m, copies);
01126     deepCopyField(correlation_expectations, copies);
01127     deepCopyField(correlation_expectations_m, copies);
01128     deepCopyField(correlation_activation_gradients, copies);
01129     deepCopyField(correlation_activation_gradients_m, copies);
01130     deepCopyField(correlation_expectation_gradients, copies);
01131     deepCopyField(correlation_expectation_gradients_m, copies);
01132     deepCopyField(correlation_layers, copies);
01133     deepCopyField(direct_activations, copies);
01134     deepCopyField(direct_and_reconstruction_activations, copies);
01135     deepCopyField(direct_and_reconstruction_activation_gradients, copies);
01136     deepCopyField(partial_costs_positions, copies);
01137     deepCopyField(partial_cost_value, copies);
01138     deepCopyField(partial_cost_values, copies);
01139     deepCopyField(partial_cost_values_0, copies);
01140     deepCopyField(final_cost_input, copies);
01141     deepCopyField(final_cost_inputs, copies);
01142     deepCopyField(final_cost_value, copies);
01143     deepCopyField(final_cost_values, copies);
01144     deepCopyField(final_cost_values_0, copies);
01145     deepCopyField(final_cost_gradient, copies);
01146     deepCopyField(final_cost_gradients, copies);
01147     deepCopyField(corrupted_autoassociator_expectations, copies);
01148     deepCopyField(second_corrupted_autoassociator_expectations, copies);
01149     deepCopyField(reconstruction_weights, copies);
01150     deepCopyField(binary_masks, copies);
01151     deepCopyField(tmp_mask, copies);
01152     deepCopyField(autoassociator_expectation_indices, copies);
01153     deepCopyField(expectation_means, copies);
01154     deepCopyField(target_vec, copies);
01155     deepCopyField(target_vec_gradient, copies);
01156     deepCopyField(targets_vec, copies);
01157     deepCopyField(targets_vec_gradient, copies);
01158     deepCopyField(greedy_stages, copies);
01159 }
01160 
01161 
01162 int StackedAutoassociatorsNet::outputsize() const
01163 {
01164     if(currently_trained_layer < n_layers)
01165         return layers[currently_trained_layer]->size;
01166     return final_module->output_size;
01167 }
01168 
01169 void StackedAutoassociatorsNet::forget()
01170 {
01174 
01181     inherited::forget();
01182 
01183     for( int i=0 ; i<n_layers ; i++ )
01184         layers[i]->forget();
01185 
01186     for( int i=0 ; i<n_layers-1 ; i++ )
01187     {
01188         reconstruction_layers[i]->forget();
01189         connections[i]->forget();
01190         reconstruction_connections[i]->forget();
01191     }
01192 
01193     final_module->forget();
01194     final_cost->forget();
01195 
01196     for( int i=0 ; i<partial_costs.length() ; i++ )
01197         if( partial_costs[i] )
01198             partial_costs[i]->forget();
01199 
01200     if(correlation_connections.length() != 0)
01201     {
01202         for( int i=0 ; i<n_layers-1 ; i++)
01203         {
01204             correlation_connections[i]->forget();
01205             correlation_layers[i]->forget();
01206         }
01207     }
01208 
01209     if(direct_connections.length() != 0)
01210     {
01211         for( int i=0 ; i<n_layers-1 ; i++)
01212             direct_connections[i]->forget();
01213     }
01214 
01215     for( int i=0; i<greedy_target_connections.length(); i++ )
01216         greedy_target_connections[i]->forget();
01217 
01218     stage = 0;
01219     unsupervised_stage = 0;
01220     greedy_stages.clear();
01221 }
01222 
01223 void StackedAutoassociatorsNet::train()
01224 {
01225     Profiler::pl_profile_start("StackedAutoassociatorsNet::train");
01226     MODULE_LOG << "train() called " << endl;
01227     MODULE_LOG << "  training_schedule = " << training_schedule << endl;
01228 
01229     minibatch_size = batch_size > 0 ? batch_size : train_set->length();
01230     int n_train_stats_samples = (train_stats_window >= 0)
01231         ? train_stats_window
01232         : train_set->length();
01233 
01234     Vec input(inputsize());
01235     Mat inputs(minibatch_size, inputsize());
01236     Vec target(targetsize());
01237     Mat targets(minibatch_size, inputsize());
01238     real weight; // unused
01239     Vec weights(minibatch_size);
01240 
01241     TVec<string> train_cost_names = getTrainCostNames();
01242     Vec train_costs(train_cost_names.length(), MISSING_VALUE);
01243     Mat train_costs_m(minibatch_size, train_cost_names.length(),
01244                       MISSING_VALUE);
01245 
01246     int nsamples = train_set->length();
01247     int sample;
01248 
01249     PP<ProgressBar> pb;
01250 
01251     if( !train_stats )
01252     {
01253         train_stats = new VecStatsCollector();
01254         train_stats->setFieldNames(train_cost_names);
01255     }
01256 
01257     // clear stats of previous epoch
01258     train_stats->forget();
01259 
01260     real lr = 0;
01261     int init_stage;
01262 
01263     if( !online )
01264     {
01265         Profiler::pl_profile_start("StackedAutoassociatorsNet::train !online");
01266 
01267         /***** initial greedy training *****/
01268         Profiler::pl_profile_start("StackedAutoassociatorsNet::train greedy");
01269         for( int i=0 ; i<n_layers-1 ; i++ )
01270         {
01271             MODULE_LOG << "Training connection weights between layers " << i
01272                        << " and " << i+1 << endl;
01273 
01274             int end_stage = training_schedule[i];
01275             int* this_stage = greedy_stages.subVec(i,1).data();
01276             init_stage = *this_stage;
01277 
01278             MODULE_LOG << "  stage = " << *this_stage << endl;
01279             MODULE_LOG << "  end_stage = " << end_stage << endl;
01280             MODULE_LOG << "  greedy_learning_rate = " << greedy_learning_rate << endl;
01281 
01282             if( *this_stage == 0 && noise_type == "masking_noise" && mask_with_mean )
01283             {
01284                 Vec in(inputsize());
01285                 Vec tar(train_set->targetsize());
01286                 real w;
01287                 expectation_means.resize(n_layers-1);
01288                 expectation_means[i].resize(expectations[i].length());
01289                 expectation_means[i].clear();
01290                 for( int l = 0; l<train_set->length(); l++ )
01291                 {
01292                     train_set->getExample(l, in, tar, w);
01293                     // Get representation
01294                     expectations[0] << in;
01295                     if(correlation_connections.length() != 0)
01296                     {
01297                         for( int j=0 ; j<i; j++ )
01298                         {
01299                             connections[j]->fprop( expectations[j], correlation_activations[j] );
01300                             layers[j+1]->fprop( correlation_activations[j],
01301                                                 correlation_expectations[j] );
01302                             correlation_connections[j]->fprop( correlation_expectations[j],
01303                                                                activations[j+1] );
01304                             correlation_layers[j]->fprop( activations[j+1],
01305                                                           expectations[j+1] );
01306                         }
01307                     }
01308                     else
01309                     {
01310                         for( int j=0 ; j<i; j++ )
01311                         {
01312                             connections[j]->fprop( expectations[j], activations[j+1] );
01313                             layers[j+1]->fprop(activations[j+1],expectations[j+1]);
01314                         }
01315                     }
01316 
01317                     expectation_means[i] += expectations[i];
01318                 }
01319                 expectation_means[i] /= train_set->length();
01320             }
01321 
01322             if( report_progress && *this_stage < end_stage )
01323                 pb = new ProgressBar( "Training layer "+tostring(i)
01324                                       +" of "+classname(),
01325                                       end_stage - init_stage );
01326 
01327             train_costs.fill(MISSING_VALUE);
01328             lr = greedy_learning_rate;
01329             layers[i]->setLearningRate( lr );
01330             reconstruction_layers[i]->setLearningRate( lr );
01331             connections[i]->setLearningRate( lr );
01332             reconstruction_connections[i]->setLearningRate( lr );
01333             if(correlation_connections.length() != 0)
01334             {
01335                 correlation_connections[i]->setLearningRate( lr );
01336                 correlation_layers[i]->setLearningRate( lr );
01337             }
01338             if(direct_connections.length() != 0)
01339             {
01340                 direct_connections[i]->setLearningRate( lr );
01341             }
01342             if( greedy_target_connections.length() && greedy_target_connections[i] )
01343                 greedy_target_connections[i]->setLearningRate( lr );
01344             layers[i+1]->setLearningRate( lr );
01345             if(partial_costs.length() != 0 && partial_costs[i])
01346                         partial_costs[i]->setLearningRate( lr );
01347 
01348             // Make sure that storage not null, will be resized anyways by bprop calls
01349             reconstruction_activations.resize(layers[i]->size);
01350             reconstruction_activations_m.resize(minibatch_size,
01351                                                 layers[i]->size);
01352             reconstruction_activation_gradients.resize(layers[i]->size);
01353             reconstruction_activation_gradients_m.resize(minibatch_size,
01354                                                          layers[i]->size);
01355             reconstruction_expectation_gradients.resize(layers[i]->size);
01356             reconstruction_expectation_gradients_m.resize(minibatch_size,
01357                                                           layers[i]->size);
01358 
01359             if(reconstruct_hidden)
01360             {
01361                 reconstruction_activation_gradients_from_hid_rec.resize(
01362                     layers[i+1]->size);
01363                 reconstruction_expectation_gradients_from_hid_rec.resize(
01364                     layers[i+1]->size);
01365                 hidden_reconstruction_activations.resize(layers[i+1]->size);
01366                 hidden_reconstruction_activation_gradients.resize(layers[i+1]->size);
01367             }
01368 
01369             if(direct_connections.length() != 0)
01370             {
01371                 direct_activations.resize(layers[i]->size);
01372                 direct_and_reconstruction_activations.resize(layers[i]->size);
01373                 direct_and_reconstruction_activation_gradients.resize(layers[i]->size);
01374             }
01375 
01376             if( keep_online_representations )
01377             {
01378                 train_representations.resize(end_stage-(*this_stage));
01379                 train_representations.clear();
01380             }
01381             int greedyBatchSize = end_stage - (*this_stage);
01382             string old_noise_type = noise_type;
01383             for( ; *this_stage<end_stage ; (*this_stage)++ )
01384             {
01385                 if( !fast_exact_is_equal( greedy_decrease_ct , 0 ) )
01386                 {
01387                     lr = greedy_learning_rate/(1 + greedy_decrease_ct
01388                                                * (*this_stage));
01389                     layers[i]->setLearningRate( lr );
01390                     reconstruction_layers[i]->setLearningRate( lr );
01391                     connections[i]->setLearningRate( lr );
01392                     reconstruction_connections[i]->setLearningRate( lr );
01393                     layers[i+1]->setLearningRate( lr );
01394                     if(correlation_connections.length() != 0)
01395                     {
01396                         correlation_connections[i]->setLearningRate( lr );
01397                         correlation_layers[i]->setLearningRate( lr );
01398                     }
01399                     if(direct_connections.length() != 0)
01400                     {
01401                         direct_connections[i]->setLearningRate( lr );
01402                     }
01403                     if(partial_costs.length() != 0 && partial_costs[i])
01404                         partial_costs[i]->setLearningRate( lr );
01405                     if( greedy_target_connections.length() && greedy_target_connections[i] )
01406                         greedy_target_connections[i]->setLearningRate( lr );
01407                 }
01408                 int train_representations_i = 0;
01409                 sample = *this_stage % nsamples;
01410                 train_set->getExample(sample, input, target, weight);
01411                 if( keep_online_representations )
01412                 {
01413                     train_representations_i = greedyBatchSize - (end_stage-(*this_stage));
01414                     train_representations[train_representations_i].resize(layers[i+1]->size);
01415                 }
01416                 if( noisy >= 1 )
01417                 {
01418                     corrupt_input( input, second_corrupted_autoassociator_expectations[0], 0 );
01419                     noise_type = "none";
01420                     greedyStep( second_corrupted_autoassociator_expectations[0], target, i, train_costs, train_representations[train_representations_i]);
01421                     noise_type = old_noise_type;
01422                 }
01423                 else
01424                     greedyStep( input, target, i, train_costs, train_representations[train_representations_i]);
01425                 
01426                 train_stats->update( train_costs );
01427 
01428                 if( pb )
01429                     pb->update( *this_stage - init_stage + 1 );
01430             }
01431         }
01432         Profiler::pl_profile_end("StackedAutoassociatorsNet::train greedy");
01433 
01434         /***** unsupervised fine-tuning by gradient descent *****/
01435         if( unsupervised_stage < unsupervised_nstages )
01436         {
01437             Profiler::pl_profile_start("StackedAutoassociatorsNet::train unsupervised");
01438 
01439 //            if( unsupervised_nstages > 0 && correlation_connections.length() != 0 )
01440 //                PLERROR("StackedAutoassociatorsNet::train()"
01441 //                        " - \n"
01442 //                        "cannot use unsupervised fine-tuning with correlation connections.\n");
01443 
01444             MODULE_LOG << "Unsupervised fine-tuning all parameters, ";
01445             MODULE_LOG << "by gradient descent" << endl;
01446             MODULE_LOG << "  unsupervised_stage = " << unsupervised_stage << endl;
01447             MODULE_LOG << "  unsupervised_nstages = " <<
01448                 unsupervised_nstages << endl;
01449             MODULE_LOG << "  unsupervised_fine_tuning_learning_rate = " <<
01450                 unsupervised_fine_tuning_learning_rate << endl;
01451 
01452             init_stage = unsupervised_stage;
01453             if( report_progress && unsupervised_stage < unsupervised_nstages )
01454                 pb = new ProgressBar( "Fine-tuning parameters of all layers of "
01455                                       + classname(),
01456                                       unsupervised_nstages - init_stage );
01457 
01458             fine_tuning_reconstruction_activations.resize( n_layers );
01459             fine_tuning_reconstruction_expectations.resize( n_layers );
01460             fine_tuning_reconstruction_activation_gradients.resize( n_layers );
01461             fine_tuning_reconstruction_expectation_gradients.resize( n_layers );
01462             for( int i=0 ; i<n_layers ; i++ )
01463             {
01464                 fine_tuning_reconstruction_activations[i].resize(
01465                     layers[i]->size );
01466                 fine_tuning_reconstruction_expectations[i].resize(
01467                     layers[i]->size );
01468                 fine_tuning_reconstruction_activation_gradients[i].resize(
01469                     layers[i]->size );
01470                 fine_tuning_reconstruction_expectation_gradients[i].resize(
01471                     layers[i]->size );
01472             }
01473 
01474             setLearningRate( unsupervised_fine_tuning_learning_rate );
01475             train_costs.fill(MISSING_VALUE);
01476             string old_noise_type = noise_type;
01477             for( ; unsupervised_stage<unsupervised_nstages ; unsupervised_stage++ )
01478             {
01479                 sample = unsupervised_stage % nsamples;
01480                 if( !fast_exact_is_equal( unsupervised_fine_tuning_decrease_ct, 0. ) )
01481                     setLearningRate(
01482                         unsupervised_fine_tuning_learning_rate
01483                         / (1. + unsupervised_fine_tuning_decrease_ct
01484                            * unsupervised_stage ) );
01485 
01486                 train_set->getExample( sample, input, target, weight );
01487                 if( noisy >= 1)
01488                 {
01489                     corrupt_input( input, second_corrupted_autoassociator_expectations[0], 0 );
01490                     noise_type = "none";
01491                     unsupervisedFineTuningStep(second_corrupted_autoassociator_expectations[0], target, train_costs );
01492                     noise_type = old_noise_type;
01493                 }
01494                 else
01495                     unsupervisedFineTuningStep( input, target, train_costs );
01496                 train_stats->update( train_costs );
01497 
01498                 if( pb )
01499                     pb->update( unsupervised_stage - init_stage + 1 );
01500             }
01501             Profiler::pl_profile_end("StackedAutoassociatorsNet::train unsupervised");
01502         }
01503 
01504         if( save_learner_before_fine_tuning )
01505         {
01506             if( learnerExpdir == "" )
01507                 PLWARNING("StackedAutoassociatorsNet::train() - \n"
01508                     "cannot save model before fine-tuning because\n"
01509                     "no experiment directory has been set.");
01510             else
01511                 PLearn::save(learnerExpdir + "/learner_before_finetuning.psave",*this);
01512         }
01513 
01514         /***** fine-tuning by gradient descent *****/
01515         if( stage < nstages )
01516         {
01517             Profiler::pl_profile_start("StackedAutoassociatorsNet::train supervised");
01518 
01519             MODULE_LOG << "Fine-tuning all parameters, by gradient descent" << endl;
01520             MODULE_LOG << "  stage = " << stage << endl;
01521             MODULE_LOG << "  nstages = " << nstages << endl;
01522             MODULE_LOG << "  fine_tuning_learning_rate = " <<
01523                 fine_tuning_learning_rate << endl;
01524 
01525             init_stage = stage;
01526             if( report_progress && stage < nstages )
01527                 pb = new ProgressBar( "Fine-tuning parameters of all layers of "
01528                                       + classname(),
01529                                       nstages - init_stage );
01530 
01531             setLearningRate( fine_tuning_learning_rate );
01532             train_costs.fill(MISSING_VALUE);
01533             for( ; stage<nstages ; stage++ )
01534             {
01535                 sample = stage % nsamples;
01536                 if( !fast_exact_is_equal( fine_tuning_decrease_ct, 0. ) )
01537                     setLearningRate( fine_tuning_learning_rate
01538                                      / (1. + fine_tuning_decrease_ct * stage ) );
01539 
01540                 train_set->getExample( sample, input, target, weight );
01541                 if( noisy >= 2)
01542                 {
01543                     corrupt_input( input, second_corrupted_autoassociator_expectations[0], 0 );
01544                     fineTuningStep( second_corrupted_autoassociator_expectations[0], target, train_costs );
01545                 }
01546                 else
01547                     fineTuningStep( input, target, train_costs );
01548                 train_stats->update( train_costs );
01549 
01550                 if( pb )
01551                     pb->update( stage - init_stage + 1 );
01552             }
01553         }
01554 
01555         train_stats->finalize();
01556         MODULE_LOG << "  train costs = " << train_stats->getMean() << endl;
01557 
01558         // Update currently_trained_layer
01559         if(stage > 0)
01560             currently_trained_layer = n_layers;
01561         else
01562         {
01563             currently_trained_layer = n_layers-1;
01564             while(currently_trained_layer>1
01565                   && greedy_stages[currently_trained_layer-1] <= 0)
01566                 currently_trained_layer--;
01567         }
01568         Profiler::pl_profile_end("StackedAutoassociatorsNet::train !online");
01569         Profiler::pl_profile_end("StackedAutoassociatorsNet::train supervised");
01570     }
01571     else // online==true
01572     {
01573         Profiler::pl_profile_start("StackedAutoassociatorsNet::train online");
01574 
01575         if( unsupervised_nstages > 0 )
01576             PLERROR("StackedAutoassociatorsNet::train()"
01577                     " - \n"
01578                     "unsupervised fine-tuning with online=true is not implemented.\n");
01579 
01580         // Train all layers simultaneously AND fine-tuning as well!
01581         if( stage < nstages )
01582         {
01583 
01584             MODULE_LOG << "Training all layers greedy layer-wise AND "
01585                        << "fine-tuning all parameters, by gradient descent"
01586                        << endl;
01587             MODULE_LOG << "  stage = " << stage << endl;
01588             MODULE_LOG << "  nstages = " << nstages << endl;
01589             MODULE_LOG << "  fine_tuning_learning_rate = "
01590                        << fine_tuning_learning_rate << endl;
01591             MODULE_LOG << "  greedy_learning_rate = "
01592                        << greedy_learning_rate << endl;
01593 
01594             init_stage = stage;
01595             if( report_progress && stage < nstages )
01596                 pb = new ProgressBar(
01597                     "Greedy layer-wise training AND fine-tuning parameters of "
01598                                       + classname(),
01599                                       nstages - init_stage );
01600 
01601             setLearningRate( fine_tuning_learning_rate );
01602             train_costs.fill(MISSING_VALUE);
01603             for( ; stage<nstages ; stage++ )
01604             {
01605                 // Do a step every 'minibatch_size' examples
01606                 if (stage % minibatch_size == 0)
01607                 {
01608                     sample = stage % nsamples;
01609                     if( !fast_exact_is_equal(fine_tuning_decrease_ct, 0.) )
01610                         setLearningRate(fine_tuning_learning_rate
01611                                         /(1. + fine_tuning_decrease_ct*stage));
01612 
01613                     if (minibatch_size > 1 || minibatch_hack)
01614                     {
01615                         train_set->getExamples(sample, minibatch_size,
01616                                                inputs, targets, weights,
01617                                                NULL, true );
01618                         onlineStep(inputs, targets, train_costs_m);
01619                     }
01620                     else
01621                     {
01622                         train_set->getExample(sample, input, target, weight);
01623                         onlineStep(input, target, train_costs);
01624                     }
01625 
01626                     // Update stats if we are in the last n_train_stats_samples
01627                     if (stage >= nstages - n_train_stats_samples){
01628                         if (minibatch_size > 1 || minibatch_hack)
01629                             for (int k = 0; k < minibatch_size; k++)
01630                                 train_stats->update(train_costs_m(k));
01631                         else
01632                             train_stats->update(train_costs);
01633                     }
01634                 }
01635 
01636                 if (pb)
01637                     pb->update(stage - init_stage + 1);
01638             }
01639         }
01640         Profiler::pl_profile_end("StackedAutoassociatorsNet::train online");
01641 
01642     }
01643     Profiler::pl_profile_end("StackedAutoassociatorsNet::train");
01644 }
01645 
01646 void StackedAutoassociatorsNet::corrupt_input(const Vec& input, Vec& corrupted_input, int layer)
01647 {
01648     tmp_mask.resize(input.length());
01649     corrupt_input(input,corrupted_input,layer,tmp_mask);
01650 }
01651 
01652 void StackedAutoassociatorsNet::corrupt_input(const Vec& input, Vec& corrupted_input, int layer, Vec& binary_mask)
01653 {
01654     binary_mask.fill(1);
01655     corrupted_input.resize(input.length());
01656     reconstruction_weights.resize(input.length());
01657     reconstruction_weights.fill(1);
01658 
01659     if( (mask_input_layer_only && layer != 0) ||
01660          (!mask_input_layer_only && layer > (nb_corrupted_layer-1)) )
01661     {
01662         corrupted_input << input;
01663         return;
01664     }
01665 
01666     if( noise_type == "masking_noise" )
01667     {
01668         if( probability_of_masked_inputs > 0 )
01669         {
01670             if( fraction_of_masked_inputs > 0 )
01671                 PLERROR("In StackedAutoassociatorsNet::corrupt_input():" 
01672                         " fraction_of_masked_inputs and probability_of_masked_inputs can't be both > 0");
01673             if( mask_with_pepper_salt )
01674             {
01675                 real pepVal = 0;
01676                 real saltVal = 1;
01677                 if( pep_salt_zero_centered>0. )
01678                 {
01679                     pepVal = -pep_salt_zero_centered;
01680                     saltVal = pep_salt_zero_centered;
01681                 }
01682                 for( int j=0 ; j <input.length() ; j++)
01683                 {
01684                     if( random_gen->uniform_sample() < probability_of_masked_inputs )
01685                     {
01686                         // Sample saltVal with probability prob_salt_noise,
01687                         // else pepVal
01688                         corrupted_input[ j ] =
01689                             random_gen->binomial_sample(prob_salt_noise) == 1 ?
01690                                 saltVal:
01691                                 pepVal;
01692                         reconstruction_weights[j] = corrupted_data_weight;
01693                     }
01694                     else
01695                     {
01696                         corrupted_input[ j ] = input[ j ];  
01697                         reconstruction_weights[j] = data_weight;
01698                     }
01699                 }
01700             }
01701             else if( mask_with_mean )
01702             {
01703                 for( int j=0 ; j <input.length() ; j++)
01704                 {
01705                     if( random_gen->uniform_sample() < probability_of_masked_inputs )
01706                     {
01707                         corrupted_input[ j ] = expectation_means[layer][ j ];
01708                         reconstruction_weights[j] = corrupted_data_weight;
01709                         binary_mask[ j ] = 0;
01710                     }
01711                     else
01712                     {
01713                         corrupted_input[ j ] = input[ j ];
01714                         reconstruction_weights[j] = data_weight;
01715                     }
01716                 }
01717             }
01718             else
01719             {
01720                 for( int j=0 ; j <input.length() ; j++)
01721                 {
01722                     if( random_gen->uniform_sample() < probability_of_masked_inputs )
01723                     {
01724                         corrupted_input[ j ] = 0;
01725                         reconstruction_weights[j] = corrupted_data_weight;
01726                         binary_mask[ j ] = 0;
01727                     }
01728                     else
01729                     {
01730                         corrupted_input[ j ] = input[ j ];
01731                         reconstruction_weights[j] = data_weight;
01732                     }
01733                 }
01734             }
01735         }
01736         else
01737         {
01738             corrupted_input << input;
01739             reconstruction_weights.fill(data_weight);
01740             if( fraction_of_masked_inputs > 0. ) 
01741             {
01742                 random_gen->shuffleElements(autoassociator_expectation_indices[layer]);
01743                 if( mask_with_pepper_salt )
01744                 {
01745                     real pepVal = 0;
01746                     real saltVal = 1;
01747                     if( pep_salt_zero_centered>0. )
01748                     {
01749                         pepVal = -pep_salt_zero_centered;
01750                         saltVal = pep_salt_zero_centered;
01751                     }
01752                     for( int j=0 ; j < round(fraction_of_masked_inputs*input.length()) ; j++)
01753                     {
01754                         // Sample saltVal with probability prob_salt_noise,
01755                         // else pepVal
01756                         corrupted_input[ autoassociator_expectation_indices[layer][j] ] =
01757                             random_gen->binomial_sample(prob_salt_noise) == 1?
01758                                 saltVal:
01759                                 pepVal;
01760                         reconstruction_weights[autoassociator_expectation_indices[layer][j]] = corrupted_data_weight;
01761                     }
01762                 }   
01763                 else if( mask_with_mean )
01764                 {
01765                     for( int j=0 ; j < round(fraction_of_masked_inputs*input.length()) ; j++)
01766                     {
01767                         corrupted_input[ autoassociator_expectation_indices[layer][j] ] = expectation_means[layer][autoassociator_expectation_indices[layer][j]];
01768                         reconstruction_weights[autoassociator_expectation_indices[layer][j]] = corrupted_data_weight;
01769                         binary_mask[ autoassociator_expectation_indices[layer][j] ] = 0;
01770                     }
01771                 }
01772                 else
01773                 {
01774                     for( int j=0 ; j < round(fraction_of_masked_inputs*input.length()) ; j++)
01775                     {
01776                         corrupted_input[ autoassociator_expectation_indices[layer][j] ] = 0;
01777                         reconstruction_weights[autoassociator_expectation_indices[layer][j]] = corrupted_data_weight;
01778                         binary_mask[ autoassociator_expectation_indices[layer][j] ] = 0;
01779                     }
01780                 }
01781             }
01782         }
01783     }
01784     else if( noise_type == "binary_sampling" )
01785         for( int i=0; i<corrupted_input.length(); i++ )
01786             corrupted_input[i] = random_gen->binomial_sample((input[i]-0.5)*binary_sampling_noise_parameter+0.5);
01787     else if( noise_type == "gaussian" )
01788         for( int i=0; i<corrupted_input.length(); i++ )
01789             corrupted_input[i] = input[i] + random_gen->gaussian_01() * gaussian_std;
01790     else if( noise_type == "missing_data")
01791     {
01792         // The entry input is the doubled one according to missing_data_method
01793         int original_input_length = input.length() / 2;
01794         reconstruction_weights.resize(original_input_length);
01795    
01796         if(missing_data_method == "binomial_complementary" || 
01797            missing_data_method == "one_if_missing")
01798         {
01799             int down_missing_value = 0;
01800             int up_missing_value = 0;
01801         
01802             if(missing_data_method == "one_if_missing")
01803                 up_missing_value = 1;
01804 
01805             if( probability_of_masked_inputs > 0 )
01806             {
01807                 if( fraction_of_masked_inputs > 0 )
01808                     PLERROR("In StackedAutoassociatorsNet::corrupt_input():"
01809                             " fraction_of_masked_inputs and probability_of_masked_inputs can't be both > 0");
01810                 for( int j=0 ; j<original_input_length ; j++ )
01811                     if( random_gen->uniform_sample() < probability_of_masked_inputs )
01812                     {
01813                         corrupted_input[ j*2 ] = down_missing_value;
01814                         corrupted_input[ j*2+1 ] = up_missing_value;
01815                         reconstruction_weights[j] = corrupted_data_weight;
01816                     }
01817                     else
01818                     {
01819                         corrupted_input[ j*2 ] = input[ j*2 ];
01820                         corrupted_input[ j*2+1] = input[ j*2+1 ];
01821                         reconstruction_weights[j] = data_weight;
01822                     }
01823             }
01824             else
01825             {
01826                 corrupted_input << input;
01827                 reconstruction_weights.fill(data_weight);
01828                 if( fraction_of_masked_inputs > 0. )
01829                 {
01830                     random_gen->shuffleElements(autoassociator_expectation_indices[layer]);
01831                     for( int j=0 ; j < round(fraction_of_masked_inputs*original_input_length) ; j++)
01832                     {
01833                         corrupted_input[ autoassociator_expectation_indices[layer][j]*2 ] = down_missing_value;
01834                         corrupted_input[ autoassociator_expectation_indices[layer][j]*2 + 1 ] = up_missing_value;
01835                         reconstruction_weights[autoassociator_expectation_indices[layer][j]] = corrupted_data_weight;
01836                     }
01837                 }
01838             }
01839         }
01840         else
01841             PLERROR("In StackedAutoassociatorsNet::corrupt_input(): "
01842                     "missing_data_method %s not valid with noise_type %s",
01843                      missing_data_method.c_str(), noise_type.c_str());
01844     }
01845     else if( noise_type == "none" )
01846         corrupted_input << input;
01847     else
01848         PLERROR("In StackedAutoassociatorsNet::corrupt_input(): noise_type %s not valid", noise_type.c_str());
01849 }
01850 
01851 
01852 // ***** binomial_complementary ******
01853 // doubled_input[2*i] = input[i] and
01854 // doubled input[2*i+1] = 1-input[i]
01855 // If input is gradient that we have to double for backpropagation
01856 // (double_grad==true), then:
01857 // doubled_input[2*i] = input[i] and
01858 // oubled input[2*i+1] = -input[i]
01859 // ********** one_if_missing *********
01860 // doubled_input[2*i] = input[i] and
01861 // doubled input[2*i+1] = 0 (gradian or not)
01862 void StackedAutoassociatorsNet::double_input(const Vec& input, Vec& doubled_input, bool double_grad) const
01863 {
01864     if( noise_type == "missing_data" )
01865     {
01866         doubled_input.resize(input.length()*2);
01867         for( int i=0; i<input.size(); i++ )
01868         {
01869             doubled_input[i*2] = input[i];
01870             if( missing_data_method == "binomial_complementary")
01871             {
01872                 if( double_grad )
01873                     doubled_input[i*2+1] = - input[i];
01874                 else
01875                     doubled_input[i*2+1] = 1 - input[i];
01876             }
01877             else if( missing_data_method == "one_if_missing" )
01878                 doubled_input[i*2+1] = 0;
01879             else
01880                 PLERROR("In StackedAutoassociatorsNet::double_input(): "
01881                 "missing_data_method %s not valid",missing_data_method.c_str());
01882         }
01883     }
01884     else
01885     {
01886         doubled_input.resize(input.length());
01887         doubled_input << input;
01888     }
01889 }
01890 
01891 // ***** binomial_complementary *****
01892 // divided_input[i] = input[2*i] - input[2*i+1]
01893 // even if input is the doubled_gradient
01894 // ********** one_if_missing *********
01895 // divided_input[i] = input[2*i]
01896 void StackedAutoassociatorsNet::divide_input(const Vec& input, Vec& divided_input) const
01897 {
01898     if( noise_type == "missing_data" )
01899     {
01900         divided_input.resize(input.length()/2);
01901         for( int i=0; i<divided_input.size(); i++ )  
01902         {
01903             if( missing_data_method == "binomial_complementary" )
01904                 divided_input[i] = input[i*2] - input[i*2+1];
01905             else if( missing_data_method == "one_if_missing" )
01906                 divided_input[i] = input[i*2];
01907             else
01908                 PLERROR("In StackedAutoassociatorsNet::divide_input(): "
01909                         "missing_data_method %s not valid", missing_data_method.c_str());
01910         }
01911     }
01912     else
01913     {
01914         divided_input.resize(input.length());
01915         divided_input << input;
01916     }
01917 }
01918 
01919 
01920 void StackedAutoassociatorsNet::greedyStep(const Vec& input, const Vec& target,
01921                                            int index, Vec train_costs, Vec& representation)
01922 {
01923     Profiler::pl_profile_start("StackedAutoassociatorsNet::greedyStep");
01924     PLASSERT( index < n_layers );
01925 
01926     expectations[0] << input;
01927 
01928     if(correlation_connections.length() != 0)
01929     {
01930         for( int i=0 ; i<index + 1; i++ )
01931         {
01932             if( i == index )
01933             {
01934                 corrupt_input( expectations[i], corrupted_autoassociator_expectations[i], i );
01935                 connections[i]->fprop( corrupted_autoassociator_expectations[i], 
01936                                        correlation_activations[i] );
01937             }
01938             else
01939                 connections[i]->fprop( expectations[i], correlation_activations[i] );
01940 
01941             if( i == index && greedy_target_connections.length() && greedy_target_connections[i] )
01942             {
01943                 target_vec.clear();
01944                 if( probability_of_masked_target == 0. ||
01945                     random_gen->uniform_sample() >= probability_of_masked_target )
01946                     target_vec[(int)target[0]] = 1;
01947 
01948                 greedy_target_connections[i]->setAsDownInput(target_vec);
01949                 greedy_target_connections[i]->computeProduct(0, correlation_activations[i].length(),
01950                                                              correlation_activations[i], true);
01951             }
01952 
01953             layers[i+1]->fprop( correlation_activations[i],
01954                                 correlation_expectations[i] );
01955             correlation_connections[i]->fprop( correlation_expectations[i],
01956                                                activations[i+1] );
01957 
01958             correlation_layers[i]->fprop( activations[i+1],
01959                                           expectations[i+1] );
01960         }
01961     }
01962     else
01963     {
01964         for( int i=0 ; i<index + 1; i++ )
01965         {
01966             double_input(expectations[i], doubled_expectations[i]);
01967            
01968             if( i == index )
01969             {
01970                 corrupt_input( doubled_expectations[i], corrupted_autoassociator_expectations[i], i );
01971                 connections[i]->fprop( corrupted_autoassociator_expectations[i], activations[i+1] );
01972             }
01973             else
01974                 connections[i]->fprop( doubled_expectations[i], activations[i+1] );
01975             
01976             if( i == index && greedy_target_connections.length() && greedy_target_connections[i] )
01977             {
01978                 target_vec.clear();
01979                 if( probability_of_masked_target == 0. ||
01980                     random_gen->uniform_sample() >= probability_of_masked_target )
01981                     target_vec[(int)target[0]] = 1;
01982 
01983                 greedy_target_connections[i]->setAsDownInput(target_vec);
01984                 greedy_target_connections[i]->computeProduct(0, activations[i+1].length(),
01985                                                              activations[i+1], true);
01986             }
01987 
01988             layers[i+1]->fprop(activations[i+1],expectations[i+1]);
01989             if( keep_online_representations )
01990                 representation << expectations[i+1];
01991         }
01992     }
01993 
01994 
01995     if( partial_costs && partial_costs[ index ] )
01996     {
01997         partial_costs[ index ]->fprop( expectations[ index + 1],
01998                                        target, partial_cost_value );
01999 
02000         // Update partial cost (might contain some weights for example)
02001         partial_costs[ index ]->bpropUpdate( expectations[ index + 1 ],
02002                                              target, partial_cost_value[0],
02003                                              expectation_gradients[ index + 1 ]
02004                                              );
02005 
02006         train_costs.subVec(partial_costs_positions[index]+1,
02007                            partial_cost_value.length()) << partial_cost_value;
02008 
02009         if( !fast_exact_is_equal( partial_costs_weights.length(), 0 ) )
02010             expectation_gradients[ index + 1 ] *= partial_costs_weights[index];
02011 
02012         // Update hidden layer bias and weights
02013         layers[ index+1 ]->bpropUpdate( activations[ index + 1 ],
02014                                         expectations[ index + 1 ],
02015                                         activation_gradients[ index + 1 ],
02016                                         expectation_gradients[ index + 1 ] );
02017 
02018         Profiler::pl_profile_start("StackedAutoassociatorsNet::greedyStep bprop connection");
02019         connections[ index ]->bpropUpdate( corrupted_autoassociator_expectations[index],
02020                                            activations[ index + 1 ],
02021                                            expectation_gradients[ index ],
02022                                            activation_gradients[ index + 1 ] );
02023         Profiler::pl_profile_end("StackedAutoassociatorsNet::greedyStep bprop connection");
02024     }
02025 
02026     reconstruction_connections[ index ]->fprop( expectations[ index + 1],
02027                                                 reconstruction_activations);
02028     if(direct_connections.length() != 0)
02029     {
02030         direct_connections[ index ]->fprop( corrupted_autoassociator_expectations[index],
02031                                             direct_activations );
02032         direct_and_reconstruction_activations.clear();
02033         direct_and_reconstruction_activations += direct_activations;
02034         direct_and_reconstruction_activations += reconstruction_activations;
02035 
02036         reconstruction_layers[ index ]->fprop( direct_and_reconstruction_activations,
02037                                 reconstruction_layers[ index ]->expectation);
02038 
02039         reconstruction_layers[ index ]->activation << direct_and_reconstruction_activations;
02040         reconstruction_layers[ index ]->activation += reconstruction_layers[ index ]->bias;
02041         //reconstruction_layers[ index ]->expectation_is_up_to_date = true;  // Won't work for certain RBMLayers
02042         reconstruction_layers[ index ]->setExpectationByRef( reconstruction_layers[ index ]->expectation );
02043         train_costs[index] = reconstruction_layers[ index ]->fpropNLL(expectations[index]);
02044 
02045         reconstruction_layers[ index ]->bpropNLL(expectations[index], train_costs[index],
02046                                   direct_and_reconstruction_activation_gradients);
02047 
02048         reconstruction_layers[ index ]->update(direct_and_reconstruction_activation_gradients);
02049 
02050         direct_connections[ index ]->bpropUpdate(
02051             corrupted_autoassociator_expectations[index],
02052             direct_activations,
02053             reconstruction_expectation_gradients, // Will be overwritten later
02054             direct_and_reconstruction_activation_gradients);
02055 
02056         reconstruction_connections[ index ]->bpropUpdate(
02057             expectations[ index + 1],
02058             reconstruction_activations,
02059             reconstruction_expectation_gradients,
02060             direct_and_reconstruction_activation_gradients);
02061     }
02062     else
02063     {
02064         Vec divided_reconstruction_activations(reconstruction_activations.size());
02065         Vec divided_reconstruction_activation_gradients(reconstruction_layers[ index ]->size);
02066         
02067         divide_input(reconstruction_activations, divided_reconstruction_activations);
02068 
02069         reconstruction_layers[ index ]->fprop( divided_reconstruction_activations,
02070                                reconstruction_layers[ index ]->expectation);
02071         reconstruction_layers[ index ]->activation << divided_reconstruction_activations;
02072         reconstruction_layers[ index ]->activation += reconstruction_layers[ index ]->bias;
02073         //reconstruction_layers[ index ]->expectation_is_up_to_date = true;
02074         reconstruction_layers[ index ]->setExpectationByRef( reconstruction_layers[ index ]->expectation );
02075         real rec_err;
02076 
02077         // If we want to compute reconstruction error according to reconstruction weights.
02078         //   rec_err = reconstruction_layers[ index ]->fpropNLL(expectations[index], reconstruction_weights);
02079        
02080         if( renoising )
02081         {
02082             corrupt_input( expectations[index], second_corrupted_autoassociator_expectations[index], index );
02083             rec_err = reconstruction_layers[ index ]->fpropNLL(second_corrupted_autoassociator_expectations[index]);
02084             reconstruction_layers[ index ]->bpropNLL(second_corrupted_autoassociator_expectations[index], rec_err, divided_reconstruction_activation_gradients);
02085         }
02086         else
02087         {
02088             rec_err = reconstruction_layers[ index ]->fpropNLL(expectations[index]);
02089             reconstruction_layers[ index ]->bpropNLL(expectations[index], rec_err, divided_reconstruction_activation_gradients);
02090         }
02091         train_costs[index] = rec_err;
02092 
02093         // apply reconstruction weights which can be different for corrupted
02094         // (or missing) and non corrupted data. 
02095         multiply(reconstruction_weights, 
02096                 divided_reconstruction_activation_gradients, 
02097                 divided_reconstruction_activation_gradients);
02098 
02099         double_input(divided_reconstruction_activation_gradients, 
02100                     reconstruction_activation_gradients, true);   
02101  
02102         if(reconstruct_hidden)
02103         {
02104             Profiler::pl_profile_start("StackedAutoassociatorsNet::greedyStep reconstruct_hidden");
02105             connections[ index ]->fprop( reconstruction_layers[ index ]->expectation,
02106                                          hidden_reconstruction_activations );
02107             layers[ index+1 ]->fprop( hidden_reconstruction_activations,
02108                 layers[ index+1 ]->expectation );
02109             layers[ index+1 ]->activation << hidden_reconstruction_activations;
02110             layers[ index+1 ]->activation += layers[ index+1 ]->bias;
02111             //layers[ index+1 ]->expectation_is_up_to_date = true;
02112             layers[ index+1 ]->setExpectationByRef( layers[ index+1 ]->expectation );
02113             real hid_rec_err = layers[ index+1 ]->fpropNLL(expectations[index+1]);
02114             train_costs[index] += hid_rec_err;
02115 
02116             layers[ index+1 ]->bpropNLL(expectations[index+1], hid_rec_err,
02117                                         hidden_reconstruction_activation_gradients);
02118             layers[ index+1 ]->update(hidden_reconstruction_activation_gradients);
02119 
02120             Profiler::pl_profile_start("StackedAutoassociatorsNet::greedyStep reconstruct_hidden connection bprop");
02121             connections[ index ]->bpropUpdate(
02122                 reconstruction_layers[ index ]->expectation,
02123                 hidden_reconstruction_activations,
02124                 reconstruction_expectation_gradients_from_hid_rec,
02125                 hidden_reconstruction_activation_gradients);
02126             Profiler::pl_profile_end("StackedAutoassociatorsNet::greedyStep reconstruct_hidden connection bprop");
02127 
02128             reconstruction_layers[ index ]->bpropUpdate(
02129                 reconstruction_activations,
02130                 reconstruction_layers[ index ]->expectation,
02131                 reconstruction_activation_gradients_from_hid_rec,
02132                 reconstruction_expectation_gradients_from_hid_rec);
02133             Profiler::pl_profile_end("StackedAutoassociatorsNet::greedyStep reconstruct_hidden");
02134         }
02135 
02136         reconstruction_layers[ index ]->update(divided_reconstruction_activation_gradients);
02137         
02138         if(reconstruct_hidden)
02139             reconstruction_activation_gradients +=
02140                 reconstruction_activation_gradients_from_hid_rec;
02141 
02142         // // This is a bad update! Propagates gradient through sigmoid again!
02143         // reconstruction_layers[ index ]->bpropUpdate( reconstruction_activations,
02144         //                                   reconstruction_layers[ index ]->expectation,
02145         //                                   reconstruction_activation_gradients,
02146         //                                   reconstruction_expectation_gradients);
02147         reconstruction_connections[ index ]->bpropUpdate(
02148             expectations[ index + 1],
02149             reconstruction_activations,
02150             reconstruction_expectation_gradients,
02151             reconstruction_activation_gradients);
02152     }
02153 
02154 
02155     if(!fast_exact_is_equal(l1_neuron_decay,0))
02156     {
02157         // Compute L1 penalty gradient on neurons
02158         real* hid = expectations[ index + 1 ].data();
02159         real* grad = reconstruction_expectation_gradients.data();
02160         int len = expectations[ index + 1 ].length();
02161         for(int l=0; l<len; l++)
02162         {
02163             if(*hid > l1_neuron_decay_center)
02164                 *grad += l1_neuron_decay;
02165             else if(*hid < l1_neuron_decay_center)
02166                 *grad -= l1_neuron_decay;
02167             hid++;
02168             grad++;
02169         }
02170     }
02171 
02172     // Update hidden layer bias and weights
02173 
02174     if(correlation_connections.length() != 0)
02175     {
02176         correlation_layers[ index ]->bpropUpdate(
02177             activations[ index + 1 ],
02178             expectations[ index + 1 ],
02179             reconstruction_activation_gradients,  // reused
02180             reconstruction_expectation_gradients
02181             );
02182 
02183         correlation_connections[ index ]->bpropUpdate(
02184             correlation_expectations[ index ],
02185             activations[ index+1 ],
02186             correlation_expectation_gradients[ index ],
02187             reconstruction_activation_gradients);
02188 
02189         layers[ index+1 ]->bpropUpdate(
02190             correlation_activations[ index ],
02191             correlation_expectations[ index ],
02192             correlation_activation_gradients [ index ],
02193             correlation_expectation_gradients [ index ]);
02194 
02195         connections[ index ]->bpropUpdate(
02196             corrupted_autoassociator_expectations[index],
02197             correlation_activations[ index ],
02198             reconstruction_expectation_gradients, //reused
02199             correlation_activation_gradients [ index ]);
02200 
02201         if( greedy_target_connections.length() && greedy_target_connections[index] )
02202         {
02203             greedy_target_connections[index]->bpropUpdate(
02204                 target_vec, 
02205                 correlation_activations[index],
02206                 target_vec_gradient,
02207                 correlation_activation_gradients [ index ]);
02208         }
02209     }
02210     else
02211     {
02212         layers[ index+1 ]->bpropUpdate( activations[ index + 1 ],
02213                                         expectations[ index + 1 ],
02214                                         // reused
02215                                         reconstruction_activation_gradients,
02216                                         reconstruction_expectation_gradients);
02217 
02218         connections[ index ]->bpropUpdate(
02219             corrupted_autoassociator_expectations[index],
02220             activations[ index + 1 ],
02221             reconstruction_expectation_gradients, //reused
02222             reconstruction_activation_gradients);
02223         if( greedy_target_connections.length() && greedy_target_connections[index] )
02224         {
02225             greedy_target_connections[index]->bpropUpdate(
02226                 target_vec, 
02227                 activations[ index + 1 ],
02228                 target_vec_gradient,
02229                 reconstruction_activation_gradients);
02230         }
02231     }
02232 
02233     Profiler::pl_profile_end("StackedAutoassociatorsNet::greedyStep");
02234 }
02235 
02236 void StackedAutoassociatorsNet::greedyStep(const Mat& inputs,
02237                                            const Mat& targets,
02238                                            int index, Mat& train_costs)
02239 {
02240     PLCHECK_MSG(false, "Mini-batch not implemented yet.");
02241 }
02242 
02243 void StackedAutoassociatorsNet::unsupervisedFineTuningStep(const Vec& input,
02244                                                            const Vec& target,
02245                                                            Vec& train_costs)
02246 {
02247     // fprop
02248     expectations[0] << input;
02249 
02250     bool old_mask_input_layer_only = mask_input_layer_only;
02251     mask_input_layer_only = mask_input_layer_only_in_unsupervised_fine_tuning;
02252 
02253     if(correlation_connections.length() != 0)
02254     {
02255 
02256         for( int i=0 ; i<n_layers-1; i++ )
02257         {
02258             corrupt_input( expectations[i], corrupted_autoassociator_expectations[i], i);
02259             connections[i]->fprop( corrupted_autoassociator_expectations[i],
02260                                    correlation_activations[i] );
02261             layers[i+1]->fprop( correlation_activations[i],
02262                                 correlation_expectations[i] );
02263             correlation_connections[i]->fprop( correlation_expectations[i],
02264                                                activations[i+1] );
02265             correlation_layers[i]->fprop( activations[i+1],
02266                                           expectations[i+1] );
02267         }
02268     }
02269     else
02270     {
02271         for( int i=0 ; i<n_layers-1; i++ )
02272         {
02273             corrupt_input( expectations[i], corrupted_autoassociator_expectations[i], i);
02274             connections[i]->fprop( corrupted_autoassociator_expectations[i],
02275                                    activations[i+1] );
02276             layers[i+1]->fprop(activations[i+1],expectations[i+1]);
02277         }
02278     }
02279     fine_tuning_reconstruction_expectations[ n_layers-1 ] <<
02280         expectations[ n_layers-1 ];
02281 
02282     for( int i=n_layers-2 ; i>=0; i-- )
02283     {
02284         reconstruction_connections[i]->fprop(
02285             fine_tuning_reconstruction_expectations[i+1],
02286             fine_tuning_reconstruction_activations[i] );
02287         layers[i]->fprop( fine_tuning_reconstruction_activations[i],
02288                           fine_tuning_reconstruction_expectations[i]);
02289     }
02290 
02291     layers[ 0 ]->setExpectation( fine_tuning_reconstruction_expectations[ 0 ] );
02292     layers[ 0 ]->activation << fine_tuning_reconstruction_activations[0];
02293     layers[ 0 ]->activation += layers[ 0 ]->bias;
02294     real rec_err = layers[ 0 ]->fpropNLL( input );
02295     train_costs[n_layers-1] = rec_err;
02296 
02297     layers[ 0 ]->bpropNLL( input, rec_err,
02298                            fine_tuning_reconstruction_activation_gradients[ 0 ] );
02299 
02300     layers[ 0 ]->update( fine_tuning_reconstruction_activation_gradients[ 0 ] );
02301 
02302     for( int i=0 ; i<n_layers-1; i++ )
02303     {
02304         if( i != 0)
02305             layers[i]->bpropUpdate( fine_tuning_reconstruction_activations[i],
02306                                     fine_tuning_reconstruction_expectations[i],
02307                                     fine_tuning_reconstruction_activation_gradients[i],
02308                                     fine_tuning_reconstruction_expectation_gradients[i]);
02309         reconstruction_connections[i]->bpropUpdate(
02310             fine_tuning_reconstruction_expectations[i+1],
02311             fine_tuning_reconstruction_activations[i],
02312             fine_tuning_reconstruction_expectation_gradients[i+1],
02313             fine_tuning_reconstruction_activation_gradients[i]);
02314     }
02315 
02316     expectation_gradients[ n_layers-1 ] <<
02317         fine_tuning_reconstruction_expectation_gradients[ n_layers-1 ];
02318 
02319     for( int i=n_layers-2 ; i>=0; i-- )
02320     {
02321 
02322         if(!fast_exact_is_equal(l1_neuron_decay,0))
02323         {
02324             // Compute L1 penalty gradient on neurons
02325             real* hid = expectations[ i + 1 ].data();
02326             real* grad = expectation_gradients[ i + 1 ].data();
02327             int len = expectations[ i + 1 ].length();
02328             for(int l=0; l<len; l++)
02329             {
02330                 if(*hid > l1_neuron_decay_center)
02331                     *grad += l1_neuron_decay;
02332                 else if(*hid < l1_neuron_decay_center)
02333                     *grad -= l1_neuron_decay;
02334                 hid++;
02335                 grad++;
02336             }
02337         }
02338 
02339         if(correlation_connections.length() != 0)
02340         {
02341             correlation_layers[ i ]->bpropUpdate(
02342                 activations[ i + 1 ],
02343                 expectations[ i + 1 ],
02344                 activation_gradients[ i + 1 ],
02345                 expectation_gradients[ i + 1 ]
02346                 );
02347 
02348             correlation_connections[ i ]->bpropUpdate(
02349                 correlation_expectations[ i ],
02350                 activations[ i + 1 ],
02351                 correlation_expectation_gradients[ i ],
02352                 activation_gradients[ i + 1 ] );
02353 
02354             layers[ i + 1 ]->bpropUpdate(
02355                 correlation_activations[ i ],
02356                 correlation_expectations[ i ],
02357                 correlation_activation_gradients [ i ],
02358                 correlation_expectation_gradients [ i ]);
02359 
02360             connections[ i ]->bpropUpdate(
02361                 corrupted_autoassociator_expectations[ i ],
02362                 correlation_activations[ i ],
02363                 expectation_gradients[i],
02364                 correlation_activation_gradients [ i ]);
02365         }
02366         else
02367         {
02368 
02369             layers[i+1]->bpropUpdate(
02370                 activations[i+1],expectations[i+1],
02371                 activation_gradients[i+1],expectation_gradients[i+1]);
02372             connections[i]->bpropUpdate(
02373                 corrupted_autoassociator_expectations[i], activations[i+1],
02374                 expectation_gradients[i], activation_gradients[i+1] );
02375         }
02376     }
02377 
02378     mask_input_layer_only = old_mask_input_layer_only;
02379 }
02380 
02381 void StackedAutoassociatorsNet::unsupervisedFineTuningStep(const Mat& inputs,
02382                                                            const Mat& targets,
02383                                                            Mat& train_costs)
02384 {
02385     PLCHECK_MSG(false, "Mini-batch not implemented yet.");
02386 }
02387 
02388 void StackedAutoassociatorsNet::fineTuningStep(const Vec& input,
02389                                                const Vec& target,
02390                                                Vec& train_costs)
02391 {
02392     Profiler::pl_profile_start("StackedAutoassociatorsNet::fineTuningStep");
02393     Profiler::pl_profile_start("StackedAutoassociatorsNet::fineTuningStep fprop");
02394 
02395     // fprop
02396     expectations[0] << input;
02397 
02398     if(correlation_connections.length() != 0)
02399     {
02400         for( int i=0 ; i<n_layers-1; i++ )
02401         {
02402             connections[i]->fprop( expectations[i], correlation_activations[i] );
02403             layers[i+1]->fprop( correlation_activations[i],
02404                                 correlation_expectations[i] );
02405             correlation_connections[i]->fprop( correlation_expectations[i],
02406                                                activations[i+1] );
02407             correlation_layers[i]->fprop( activations[i+1],
02408                                           expectations[i+1] );
02409         }
02410     }
02411     else
02412     {
02413         for( int i=0 ; i<n_layers-1; i++ )
02414         {
02415             double_input(expectations[i], doubled_expectations[i]);
02416             Profiler::pl_profile_start("StackedAutoassociatorsNet::fineTuningStep fprop connection");
02417             connections[i]->fprop( doubled_expectations[i], activations[i+1] );
02418             Profiler::pl_profile_end("StackedAutoassociatorsNet::fineTuningStep fprop connection");
02419             layers[i+1]->fprop(activations[i+1],expectations[i+1]);
02420         }
02421     }
02422     Profiler::pl_profile_end("StackedAutoassociatorsNet::fineTuningStep fprop");
02423     final_module->fprop( expectations[ n_layers-1 ],
02424                          final_cost_input );
02425     final_cost->fprop( final_cost_input, target, final_cost_value );
02426 
02427     train_costs.subVec(train_costs.length()-final_cost_value.length(),
02428                        final_cost_value.length()) <<
02429         final_cost_value;
02430 
02431     final_cost->bpropUpdate( final_cost_input, target,
02432                              final_cost_value[0],
02433                              final_cost_gradient );
02434     final_module->bpropUpdate( expectations[ n_layers-1 ],
02435                                final_cost_input,
02436                                expectation_gradients[ n_layers-1 ],
02437                                final_cost_gradient );
02438 
02439     Profiler::pl_profile_start("StackedAutoassociatorsNet::fineTuningStep bpropUpdate");
02440     if( correlation_connections.length() != 0 )
02441     {
02442         for( int i=n_layers-1 ; i>0 ; i-- )
02443         {
02444             correlation_layers[i-1]->bpropUpdate(
02445                 activations[i],
02446                 expectations[i],
02447                 activation_gradients[i],
02448                 expectation_gradients[i] );
02449 
02450             correlation_connections[i-1]->bpropUpdate(
02451                 correlation_expectations[i-1],
02452                 activations[i],
02453                 correlation_expectation_gradients[i-1],
02454                 activation_gradients[i] );
02455 
02456             layers[i]->bpropUpdate( correlation_activations[i-1],
02457                                     correlation_expectations[i-1],
02458                                     correlation_activation_gradients[i-1],
02459                                     correlation_expectation_gradients[i-1] );
02460 
02461             connections[i-1]->bpropUpdate( expectations[i-1],
02462                                            correlation_activations[i-1],
02463                                            expectation_gradients[i-1],
02464                                            correlation_activation_gradients[i-1] );
02465         }
02466     }
02467     else
02468     {   
02469         for( int i=n_layers-1 ; i>0 ; i-- )
02470         {
02471             layers[i]->bpropUpdate( activations[i],
02472                                     expectations[i],
02473                                     activation_gradients[i],
02474                                     expectation_gradients[i] );
02475 
02476             Profiler::pl_profile_start("StackedAutoassociatorsNet::fineTuningStep bpropUpdate connection");
02477             connections[i-1]->bpropUpdate( doubled_expectations[i-1],
02478                                            activations[i],
02479                                            doubled_expectation_gradients[i-1],
02480                                            activation_gradients[i] );
02481 
02482             Profiler::pl_profile_end("StackedAutoassociatorsNet::fineTuningStep bpropUpdate connection");
02483             divide_input( doubled_expectation_gradients[i-1], expectation_gradients[i-1] );
02484         }
02485     }
02486     Profiler::pl_profile_end("StackedAutoassociatorsNet::fineTuningStep bpropUpdate");
02487     Profiler::pl_profile_end("StackedAutoassociatorsNet::fineTuningStep");
02488 }
02489 
02490 void StackedAutoassociatorsNet::fineTuningStep(const Mat& inputs,
02491                                                const Mat& targets,
02492                                                Mat& train_costs)
02493 {
02494     PLCHECK_MSG(false, "Mini-batch not implemented yet.");
02495 }
02496 
02497 
02498 
02499 void StackedAutoassociatorsNet::onlineStep(const Vec& input,
02500                                            const Vec& target,
02501                                            Vec& train_costs)
02502 {
02503     real lr;
02504     // fprop
02505     expectations[0] << input;
02506 
02507     if(correlation_connections.length() != 0)
02508     {
02509         for( int i=0 ; i<n_layers-1; i++ )
02510         {
02511             corrupt_input( expectations[i], corrupted_autoassociator_expectations[i], 
02512                            i, binary_masks[i] );
02513             connections[i]->fprop( corrupted_autoassociator_expectations[i], 
02514                                    correlation_activations[i] );
02515 
02516             if( greedy_target_connections.length() && greedy_target_connections[i] )
02517             {
02518                 targets_vec[i].clear();
02519                 if( probability_of_masked_target == 0. ||
02520                     random_gen->uniform_sample() >= probability_of_masked_target )
02521                     targets_vec[i][(int)target[0]] = 1;
02522 
02523                 greedy_target_connections[i]->setAsDownInput(targets_vec[i]);
02524                 greedy_target_connections[i]->computeProduct(0, correlation_activations[i].length(),
02525                                                              correlation_activations[i], true);
02526             }
02527 
02528             layers[i+1]->fprop( correlation_activations[i],
02529                                 correlation_expectations[i] );
02530             correlation_connections[i]->fprop( correlation_expectations[i],
02531                                                activations[i+1] );
02532             correlation_layers[i]->fprop( activations[i+1],
02533                                           expectations[i+1] );
02534 
02535         }
02536     }
02537     else
02538     {
02539         for( int i=0 ; i<n_layers-1; i++ )
02540         {
02541             corrupt_input( expectations[i], corrupted_autoassociator_expectations[i], 
02542                            i, binary_masks[i] );
02543             connections[i]->fprop( corrupted_autoassociator_expectations[i], 
02544                                    activations[i+1] );
02545             
02546             if( greedy_target_connections.length() && greedy_target_connections[i] )
02547             {
02548                 targets_vec[i].clear();
02549                 if( probability_of_masked_target == 0. ||
02550                     random_gen->uniform_sample() >= probability_of_masked_target )
02551                     targets_vec[i][(int)target[0]] = 1;
02552 
02553                 greedy_target_connections[i]->setAsDownInput(targets_vec[i]);
02554                 greedy_target_connections[i]->computeProduct(0, activations[i+1].length(),
02555                                                              activations[i+1], true);
02556             }
02557 
02558             layers[i+1]->fprop(activations[i+1],expectations[i+1]);
02559         }
02560     }
02561 
02562     // Unsupervised greedy layer-wise cost
02563 
02564     // Set learning rates
02565     if( !fast_exact_is_equal( greedy_decrease_ct , 0 ) )
02566         lr = greedy_learning_rate / (1 + greedy_decrease_ct * stage) ;
02567     else
02568         lr = greedy_learning_rate;
02569 
02570     for( int i=0 ; i<n_layers-1 ; i++ )
02571     {
02572         layers[i]->setLearningRate( lr );
02573         reconstruction_layers[i]->setLearningRate( lr );
02574         connections[i]->setLearningRate( lr );
02575         reconstruction_connections[i]->setLearningRate( lr );
02576         if(correlation_layers.length() != 0)
02577         {
02578             correlation_layers[i]->setLearningRate( lr );
02579             correlation_connections[i]->setLearningRate( lr );
02580         }
02581         if( partial_costs.length() != 0 && partial_costs[ i ] )
02582         {
02583             partial_costs[ i ]->setLearningRate( lr );
02584         }
02585         if( greedy_target_connections.length() && greedy_target_connections[i] )
02586             greedy_target_connections[i]->setLearningRate( lr );
02587     }
02588     layers[n_layers-1]->setLearningRate( lr );
02589 
02590     // Backpropagate unsupervised gradient, layer-wise
02591     for( int i=n_layers-1 ; i>0 ; i-- )
02592     {
02593         reconstruction_connections[ i-1 ]->fprop(
02594             expectations[ i ],
02595             reconstruction_activations);
02596 
02597         reconstruction_layers[ i-1 ]->fprop( reconstruction_activations,
02598                               reconstruction_layers[ i-1 ]->expectation);
02599 
02600         reconstruction_layers[ i-1 ]->activation << reconstruction_activations;
02601         reconstruction_layers[ i-1 ]->activation += reconstruction_layers[ i-1 ]->bias;
02602         //reconstruction_layers[ i-1 ]->expectation_is_up_to_date = true;
02603         reconstruction_layers[ i-1 ]->setExpectationByRef( reconstruction_layers[ i-1 ]->expectation );
02604         real rec_err = reconstruction_layers[ i-1 ]->fpropNLL( expectations[i-1] );
02605         train_costs[i-1] = rec_err;
02606 
02607         reconstruction_layers[ i-1 ]->bpropNLL(expectations[i-1], rec_err,
02608                                   reconstruction_activation_gradients);
02609 
02610         reconstruction_layers[ i-1 ]->update(reconstruction_activation_gradients);
02611 
02612         reconstruction_connections[ i-1 ]->bpropUpdate(
02613             expectations[ i ],
02614             reconstruction_activations,
02615             reconstruction_expectation_gradients,
02616             reconstruction_activation_gradients);
02617 
02618         if( partial_costs.length() != 0 && partial_costs[ i-1 ] )
02619         {
02620             
02621             partial_costs[ i-1 ]->fprop( expectations[ i],
02622                                        target, partial_cost_value );
02623             
02624             // Update partial cost (might contain some weights for example)
02625             partial_costs[ i-1 ]->bpropUpdate(
02626                 expectations[ i ],
02627                 target, partial_cost_value[0],
02628                 expectation_gradients[ i ]
02629                 );
02630 
02631             train_costs.subVec(partial_costs_positions[i-1]+1,
02632                                partial_cost_value.length())
02633                 << partial_cost_value;
02634             
02635             if( !fast_exact_is_equal( partial_costs_weights.length(), 0 ) )
02636                 expectation_gradients[ i ] *= partial_costs_weights[i-1];
02637             reconstruction_expectation_gradients += expectation_gradients[ i ];
02638         }
02639 
02640         if(!fast_exact_is_equal(l1_neuron_decay,0))
02641         {
02642             // Compute L1 penalty gradient on neurons
02643             real* hid = expectations[ i ].data();
02644             real* grad = reconstruction_expectation_gradients.data();
02645             int len = expectations[ i ].length();
02646             for(int j=0; j<len; j++)
02647             {
02648                 if(*hid > l1_neuron_decay_center)
02649                     *grad += l1_neuron_decay;
02650                 else if(*hid < l1_neuron_decay_center)
02651                     *grad -= l1_neuron_decay;
02652                 hid++;
02653                 grad++;
02654             }
02655         }
02656 
02657         if( correlation_connections.length() != 0 )
02658         {
02659             correlation_layers[i-1]->bpropUpdate(
02660                 activations[i],
02661                 expectations[i],
02662                 reconstruction_activation_gradients,
02663                 reconstruction_expectation_gradients );
02664 
02665             correlation_connections[i-1]->bpropUpdate(
02666                 correlation_expectations[i-1],
02667                 activations[i],
02668                 correlation_expectation_gradients[i-1],
02669                 reconstruction_activation_gradients);
02670 
02671             layers[i]->bpropUpdate( correlation_activations[i-1],
02672                                     correlation_expectations[i-1],
02673                                     correlation_activation_gradients[i-1],
02674                                     correlation_expectation_gradients[i-1] );
02675 
02676             connections[i-1]->bpropUpdate( corrupted_autoassociator_expectations[i-1],
02677                                            correlation_activations[i-1],
02678                                            reconstruction_expectation_gradients,
02679                                            correlation_activation_gradients[i-1] );
02680 
02681             if( greedy_target_connections.length() && greedy_target_connections[i-1] )
02682             {
02683                 greedy_target_connections[i-1]->bpropUpdate(
02684                     targets_vec[i-1], 
02685                     correlation_activations[i-1],
02686                     targets_vec_gradient[i-1],
02687                     correlation_activation_gradients [ i-1 ]);
02688             }
02689         }
02690         else
02691         {
02692             layers[i]->bpropUpdate(
02693                 activations[i],
02694                 expectations[i],
02695                 reconstruction_activation_gradients,
02696                 reconstruction_expectation_gradients );
02697 
02698             connections[i-1]->bpropUpdate(
02699                 corrupted_autoassociator_expectations[i-1],
02700                 activations[i],
02701                 reconstruction_expectation_gradients,
02702                 reconstruction_activation_gradients);
02703 
02704             if( greedy_target_connections.length() && greedy_target_connections[i-1] )
02705             {
02706                 greedy_target_connections[i-1]->bpropUpdate(
02707                     targets_vec[i-1], 
02708                     activations[ i ],
02709                     targets_vec_gradient[i-1],
02710                     reconstruction_activation_gradients);
02711             }
02712         }
02713     }
02714 
02715     // Put back fine-tuning learning rate
02716     // Set learning rates
02717     if( !fast_exact_is_equal( fine_tuning_decrease_ct , 0 ) )
02718         lr = fine_tuning_learning_rate
02719             / (1 + fine_tuning_decrease_ct * stage) ;
02720     else
02721         lr = fine_tuning_learning_rate ;
02722 
02723     // Set learning rate back for fine-tuning
02724     for( int i=0 ; i<n_layers-1 ; i++ )
02725     {
02726         layers[i]->setLearningRate( lr );
02727         connections[i]->setLearningRate( lr );
02728         //reconstruction_connections[i]->setLearningRate( lr );
02729         if(correlation_layers.length() != 0)
02730         {
02731             correlation_layers[i]->setLearningRate( lr );
02732             correlation_connections[i]->setLearningRate( lr );
02733         }
02734         if( greedy_target_connections.length() && greedy_target_connections[i] )
02735             greedy_target_connections[i]->setLearningRate( lr );
02736     }
02737     layers[n_layers-1]->setLearningRate( lr );
02738 
02739 
02740     final_module->fprop( expectations[ n_layers-1 ],
02741                          final_cost_input );
02742     final_cost->fprop( final_cost_input, target, final_cost_value );
02743 
02744     train_costs.subVec(train_costs.length()-final_cost_value.length(),
02745                        final_cost_value.length()) <<
02746         final_cost_value;
02747 
02748     final_cost->bpropUpdate( final_cost_input, target,
02749                              final_cost_value[0],
02750                              final_cost_gradient );
02751     final_module->bpropUpdate( expectations[ n_layers-1 ],
02752                                final_cost_input,
02753                                expectation_gradients[ n_layers-1 ],
02754                                final_cost_gradient );
02755 
02756     // Fine-tuning backpropagation
02757     if( correlation_connections.length() != 0 )
02758     {
02759         for( int i=n_layers-1 ; i>0 ; i-- )
02760         {
02761             correlation_layers[i-1]->bpropUpdate(
02762                 activations[i],
02763                 expectations[i],
02764                 activation_gradients[i],
02765                 expectation_gradients[i] );
02766 
02767             correlation_connections[i-1]->bpropUpdate(
02768                 correlation_expectations[i-1],
02769                 activations[i],
02770                 correlation_expectation_gradients[i-1],
02771                 activation_gradients[i] );
02772 
02773             layers[i]->bpropUpdate( correlation_activations[i-1],
02774                                     correlation_expectations[i-1],
02775                                     correlation_activation_gradients[i-1],
02776                                     correlation_expectation_gradients[i-1] );
02777 
02778             connections[i-1]->bpropUpdate(
02779                 corrupted_autoassociator_expectations[i-1],
02780                 correlation_activations[i-1],
02781                 expectation_gradients[i-1],
02782                 correlation_activation_gradients[i-1] );
02783             expectation_gradients[i-1] *= binary_masks[ i-1 ];
02784         }
02785     }
02786     else
02787     {
02788         for( int i=n_layers-1 ; i>0 ; i-- )
02789         {
02790             layers[i]->bpropUpdate( activations[i],
02791                                     expectations[i],
02792                                     activation_gradients[i],
02793                                     expectation_gradients[i] );
02794 
02795             connections[i-1]->bpropUpdate( corrupted_autoassociator_expectations[i-1],
02796                                            activations[i],
02797                                            expectation_gradients[i-1],
02798                                            activation_gradients[i] );
02799             expectation_gradients[i-1] *= binary_masks[ i-1 ];
02800         }
02801     }
02802 }
02803 
02804 void StackedAutoassociatorsNet::onlineStep(const Mat& inputs,
02805                                            const Mat& targets,
02806                                            Mat& train_costs)
02807 {
02808     real lr;
02809     int mbatch_size = inputs.length();
02810     PLASSERT( targets.length() == mbatch_size );
02811     train_costs.resize(mbatch_size, train_costs.width());
02812 
02813     // fprop
02814     expectations_m[0].resize(mbatch_size, inputsize());
02815     expectations_m[0] << inputs;
02816 
02817     if( greedy_target_connections.length() != 0 )
02818         PLERROR("In StackedAutoassociatorsNet::onlineStep(): greedy_target_connections not "
02819                 "implemented yet in mini-batch online setting.\n");
02820     
02821     if(correlation_connections.length() != 0)
02822     {
02823         for( int i=0 ; i<n_layers-1; i++ )
02824         {
02825             if( partial_costs.length() != 0 && partial_costs[ i ] )
02826                 PLERROR("In StackedAutoassociatorsNet::onlineStep(): partial costs not "
02827                         "implemented yet for correlation_connections, in mini-batch online "
02828                         "setting.\n");
02829 
02830             connections[i]->fprop(expectations_m[i],
02831                                   correlation_activations_m[i]);
02832             layers[i+1]->fprop(correlation_activations_m[i],
02833                                correlation_expectations_m[i]);
02834             correlation_connections[i]->fprop(correlation_expectations_m[i],
02835                                               activations_m[i+1] );
02836             correlation_layers[i]->fprop(activations_m[i+1],
02837                                          expectations_m[i+1]);
02838         }
02839     }
02840     else
02841     {
02842         for( int i=0 ; i<n_layers-1; i++ )
02843         {
02844             connections[i]->fprop( expectations_m[i], activations_m[i+1] );
02845             layers[i+1]->fprop(activations_m[i+1], expectations_m[i+1]);
02846 
02847             if( partial_costs.length() != 0 && partial_costs[ i ] )
02848             {
02849                 // Set learning rates
02850                 if( !fast_exact_is_equal(fine_tuning_decrease_ct, 0 ) )
02851                     lr = fine_tuning_learning_rate /
02852                         (1 + fine_tuning_decrease_ct * stage);
02853                 else
02854                     lr = fine_tuning_learning_rate;
02855 
02856                 partial_costs[ i ]->setLearningRate( lr );
02857                 partial_costs[ i ]->fprop( expectations_m[i + 1],
02858                                            targets, partial_cost_values );
02859                 // Update partial cost (might contain some weights for example)
02860                 partial_cost_values_0.resize(mbatch_size);
02861                 partial_cost_values_0 << partial_cost_values.column(0);
02862                 partial_costs[ i ]->bpropUpdate(
02863                     expectations_m[ i + 1 ],
02864                     targets,
02865                     partial_cost_values_0,
02866                     expectation_gradients_m[ i + 1 ]
02867                     );
02868 
02869                 train_costs.subMatColumns(partial_costs_positions[i]+1,
02870                                           partial_cost_values.width())
02871                     << partial_cost_values;
02872 
02873                 if( partial_costs_weights.length() != 0 )
02874                     expectation_gradients_m[i + 1] *= partial_costs_weights[i];
02875 
02876                 // Update hidden layer bias and weights
02877                 layers[ i+1 ]->bpropUpdate( activations_m[ i + 1 ],
02878                                             expectations_m[ i + 1 ],
02879                                             activation_gradients_m[ i + 1 ],
02880                                             expectation_gradients_m[ i + 1 ] );
02881 
02882                 connections[ i ]->bpropUpdate( expectations_m[ i ],
02883                                                activations_m[ i + 1 ],
02884                                                expectation_gradients_m[ i ],
02885                                                activation_gradients_m[ i + 1 ]
02886                                              );
02887             }
02888         }
02889     }
02890 
02891     final_module->fprop( expectations_m[ n_layers-1 ],
02892                          final_cost_inputs );
02893 
02894     final_cost->fprop( final_cost_inputs, targets, final_cost_values );
02895 
02896     train_costs.subMatColumns(train_costs.width() - final_cost_values.width(),
02897                               final_cost_values.width())
02898         << final_cost_values;
02899 
02900     final_cost_values_0.resize(mbatch_size);
02901     final_cost_values_0 << final_cost_values.column(0);
02902     final_cost->bpropUpdate( final_cost_inputs, targets,
02903                              final_cost_values_0,
02904                              final_cost_gradients );
02905     final_module->bpropUpdate( expectations_m[ n_layers-1 ],
02906                                final_cost_inputs,
02907                                expectation_gradients_m[ n_layers-1 ],
02908                                final_cost_gradients );
02909 
02910     // Unsupervised greedy layer-wise cost
02911 
02912     // Set learning rates
02913     if( !fast_exact_is_equal( greedy_decrease_ct, 0 ) )
02914         lr = greedy_learning_rate / (1 + greedy_decrease_ct * stage) ;
02915     else
02916         lr = greedy_learning_rate;
02917 
02918     for( int i=0 ; i<n_layers-1 ; i++ )
02919     {
02920         layers[i]->setLearningRate( lr );
02921         reconstruction_layers[i]->setLearningRate( lr );
02922         connections[i]->setLearningRate( lr );
02923         reconstruction_connections[i]->setLearningRate( lr );
02924         if(correlation_layers.length() != 0)
02925         {
02926             correlation_layers[i]->setLearningRate( lr );
02927             correlation_connections[i]->setLearningRate( lr );
02928         }
02929     }
02930     layers[n_layers-1]->setLearningRate( lr );
02931 
02932     // Backpropagate unsupervised gradient, layer-wise
02933     for( int i=n_layers-1 ; i>0 ; i-- )
02934     {
02935         reconstruction_connections[ i-1 ]->fprop(
02936             expectations_m[ i ],
02937             reconstruction_activations_m);
02938 
02939         reconstruction_layers[ i-1 ]->activations.resize(mbatch_size,reconstruction_layers[i-1]->size);
02940         reconstruction_layers[ i-1 ]->activations << reconstruction_activations_m;
02941         reconstruction_layers[ i-1 ]->activations += reconstruction_layers[ i-1 ]->bias;
02942 
02943         Mat layer_exp = reconstruction_layers[i-1]->getExpectations();
02944         reconstruction_layers[ i-1 ]->fprop(reconstruction_activations_m,
02945                              layer_exp);
02946         reconstruction_layers[ i-1 ]->setExpectationsByRef(layer_exp);
02947 
02948         reconstruction_layers[ i-1 ]->fpropNLL(expectations_m[i-1],
02949                                 train_costs.column(i-1));
02950 
02951         reconstruction_layers[ i-1 ]->bpropNLL(expectations_m[i-1], train_costs.column(i-1),
02952                                 reconstruction_activation_gradients_m);
02953 
02954         reconstruction_layers[ i-1 ]->update(reconstruction_activation_gradients_m);
02955 
02956         reconstruction_connections[ i-1 ]->bpropUpdate(
02957             expectations_m[ i ],
02958             reconstruction_activations_m,
02959             reconstruction_expectation_gradients_m,
02960             reconstruction_activation_gradients_m);
02961 
02962         if(!fast_exact_is_equal(l1_neuron_decay,0))
02963         {
02964             // Compute L1 penalty gradient on neurons
02965             for (int k = 0; k < mbatch_size; k++)
02966             {
02967                 real* hid = expectations_m[i](k).data();
02968                 real* grad = reconstruction_expectation_gradients_m(k).data();
02969                 int width = expectations_m[i].width();
02970                 for(int j = 0; j < width; j++)
02971                 {
02972                     if(*hid > l1_neuron_decay_center)
02973                         *grad += l1_neuron_decay;
02974                     else if(*hid < l1_neuron_decay_center)
02975                         *grad -= l1_neuron_decay;
02976                     hid++;
02977                     grad++;
02978                 }
02979             }
02980         }
02981 
02982         if( correlation_connections.length() != 0 )
02983         {
02984             correlation_layers[i-1]->bpropUpdate(
02985                 activations_m[i],
02986                 expectations_m[i],
02987                 reconstruction_activation_gradients_m,
02988                 reconstruction_expectation_gradients_m);
02989 
02990             correlation_connections[i-1]->bpropUpdate(
02991                 correlation_expectations_m[i-1],
02992                 activations_m[i],
02993                 correlation_expectation_gradients_m[i-1],
02994                 reconstruction_activation_gradients_m);
02995 
02996             layers[i]->bpropUpdate(
02997                 correlation_activations_m[i-1],
02998                 correlation_expectations_m[i-1],
02999                 correlation_activation_gradients_m[i-1],
03000                 correlation_expectation_gradients_m[i-1]);
03001 
03002             connections[i-1]->bpropUpdate(
03003                 expectations_m[i-1],
03004                 correlation_activations_m[i-1],
03005                 reconstruction_expectation_gradients_m,
03006                 correlation_activation_gradients_m[i-1]);
03007         }
03008         else
03009         {
03010             layers[i]->bpropUpdate(
03011                 activations_m[i],
03012                 expectations_m[i],
03013                 reconstruction_activation_gradients_m,
03014                 reconstruction_expectation_gradients_m);
03015 
03016             connections[i-1]->bpropUpdate(
03017                 expectations_m[i-1],
03018                 activations_m[i],
03019                 reconstruction_expectation_gradients_m,
03020                 reconstruction_activation_gradients_m);
03021         }
03022     }
03023 
03024     // Put back fine-tuning learning rate
03025     // Set learning rates
03026     if( !fast_exact_is_equal(fine_tuning_decrease_ct, 0) )
03027         lr = fine_tuning_learning_rate
03028             / (1 + fine_tuning_decrease_ct * stage) ;
03029     else
03030         lr = fine_tuning_learning_rate ;
03031 
03032     // Set learning rate back for fine-tuning
03033     for( int i=0 ; i<n_layers-1 ; i++ )
03034     {
03035         layers[i]->setLearningRate( lr );
03036         connections[i]->setLearningRate( lr );
03037         //reconstruction_connections[i]->setLearningRate( lr );
03038         if(correlation_layers.length() != 0)
03039         {
03040             correlation_layers[i]->setLearningRate( lr );
03041             correlation_connections[i]->setLearningRate( lr );
03042         }
03043     }
03044     layers[n_layers-1]->setLearningRate( lr );
03045 
03046     // Fine-tuning backpropagation
03047     if( correlation_connections.length() != 0 )
03048     {
03049         for( int i=n_layers-1 ; i>0 ; i-- )
03050         {
03051             correlation_layers[i-1]->bpropUpdate(
03052                 activations_m[i],
03053                 expectations_m[i],
03054                 activation_gradients_m[i],
03055                 expectation_gradients_m[i] );
03056 
03057             correlation_connections[i-1]->bpropUpdate(
03058                 correlation_expectations_m[i-1],
03059                 activations_m[i],
03060                 correlation_expectation_gradients_m[i-1],
03061                 activation_gradients_m[i] );
03062 
03063             layers[i]->bpropUpdate( correlation_activations_m[i-1],
03064                                     correlation_expectations_m[i-1],
03065                                     correlation_activation_gradients_m[i-1],
03066                                     correlation_expectation_gradients_m[i-1] );
03067 
03068             connections[i-1]->bpropUpdate(
03069                 expectations_m[i-1],
03070                 correlation_activations_m[i-1],
03071                 expectation_gradients_m[i-1],
03072                 correlation_activation_gradients_m[i-1] );
03073         }
03074     }
03075     else
03076     {
03077         for( int i=n_layers-1 ; i>0 ; i-- )
03078         {
03079             layers[i]->bpropUpdate( activations_m[i],
03080                                     expectations_m[i],
03081                                     activation_gradients_m[i],
03082                                     expectation_gradients_m[i] );
03083 
03084             connections[i-1]->bpropUpdate( expectations_m[i-1],
03085                                            activations_m[i],
03086                                            expectation_gradients_m[i-1],
03087                                            activation_gradients_m[i] );
03088         }
03089     }
03090 }
03091 
03092 void StackedAutoassociatorsNet::computeOutput(const Vec& input, Vec& output) const
03093 {
03094     Profiler::pl_profile_start("StackedAutoassociatorsNet::computeOutput");
03095     // fprop
03096 
03097     expectations[0] << input;
03098 
03099     if(correlation_connections.length() != 0)
03100     {
03101         for( int i=0 ; i<currently_trained_layer-1; i++ )
03102         {
03103             connections[i]->fprop( expectations[i], correlation_activations[i] );
03104             layers[i+1]->fprop( correlation_activations[i],
03105                                 correlation_expectations[i] );
03106             correlation_connections[i]->fprop( correlation_expectations[i],
03107                                                activations[i+1] );
03108             correlation_layers[i]->fprop( activations[i+1],
03109                                           expectations[i+1] );
03110         }
03111     }
03112     else
03113     {
03114         for(int i=0 ; i<currently_trained_layer-1 ; i++ )
03115         {
03116             double_input(expectations[i], doubled_expectations[i]);
03117             connections[i]->fprop( doubled_expectations[i], activations[i+1] );
03118             layers[i+1]->fprop(activations[i+1],expectations[i+1]);
03119         }
03120     }
03121 
03122     if( currently_trained_layer<n_layers )
03123     {
03124         if(correlation_connections.length() != 0)
03125         {
03126             connections[currently_trained_layer-1]->fprop(
03127                 expectations[currently_trained_layer-1],
03128                 correlation_activations[currently_trained_layer-1] );
03129 
03130             layers[currently_trained_layer]->fprop(
03131                 correlation_activations[currently_trained_layer-1],
03132                 correlation_expectations[currently_trained_layer-1] );
03133 
03134             correlation_connections[currently_trained_layer-1]->fprop(
03135                 correlation_expectations[currently_trained_layer-1],
03136                 activations[currently_trained_layer] );
03137 
03138             correlation_layers[currently_trained_layer-1]->fprop(
03139                 activations[currently_trained_layer],
03140                 output );
03141         }
03142         else
03143         {
03144             double_input(expectations[currently_trained_layer-1], 
03145                 doubled_expectations[currently_trained_layer-1]);
03146             connections[currently_trained_layer-1]->fprop(
03147                 doubled_expectations[currently_trained_layer-1],
03148                 activations[currently_trained_layer] );
03149             layers[currently_trained_layer]->fprop(
03150                 activations[currently_trained_layer],
03151                 output);
03152         }
03153     }
03154     else
03155         final_module->fprop( expectations[ currently_trained_layer - 1],
03156                              output );
03157     Profiler::pl_profile_end("StackedAutoassociatorsNet::computeOutput");
03158 }
03159 
03160 void StackedAutoassociatorsNet::computeOutputs(const Mat& input, Mat& output) const
03161 {
03162     if(correlation_connections.length() != 0
03163        || compute_all_test_costs
03164        || noise_type == "missing_data"){
03165         inherited::computeOutputs(input, output);
03166     }else{
03167         Profiler::pl_profile_start("StackedAutoassociatorsNet::computeOutputs");
03168 
03169         expectations_m[0].resize(input.length(), inputsize());
03170         Mat m = expectations_m[0];
03171         m<<input;
03172 
03173         for(int i=0 ; i<currently_trained_layer-1 ; i++ )
03174         {
03175             connections[i]->fprop( expectations_m[i], activations_m[i+1] );
03176             layers[i+1]->fprop(activations_m[i+1],expectations_m[i+1]);
03177         }
03178         if(currently_trained_layer < n_layers)
03179         {
03180             connections[currently_trained_layer-1]->fprop( expectations_m[currently_trained_layer-1],
03181                  activations_m[currently_trained_layer] );
03182             layers[currently_trained_layer]->fprop(activations_m[currently_trained_layer],
03183                  output);
03184         }
03185         else
03186         {
03187             final_module->fprop( expectations_m[ currently_trained_layer - 1],
03188                              output );
03189         }
03190         Profiler::pl_profile_end("StackedAutoassociatorsNet::computeOutputs");
03191     }
03192 }
03193 
03194 void StackedAutoassociatorsNet::computeOutputsAndCosts(const Mat& input, const Mat& target,
03195                                                        Mat& output, Mat& costs) const
03196 {
03197     if(correlation_connections.length() != 0 
03198        || compute_all_test_costs
03199        || noise_type == "missing_data"){
03200         inherited::computeOutputsAndCosts(input, target, output, costs);
03201     }else{
03202         Profiler::pl_profile_start("StackedAutoassociatorsNet::computeOutputsAndCosts");
03203 
03204         int n=input.length();
03205         PLASSERT(target.length()==n);
03206         output.resize(n,outputsize());
03207         costs.resize(n,nTestCosts());
03208         computeOutputs(input, output);
03209         for (int i=0;i<n;i++)
03210         {
03211             Vec in_i = input(i);
03212             Vec out_i = output(i); 
03213             Vec target_i = target(i);
03214             Vec c_i = costs(i);
03215             computeCostsFromOutputs(in_i, out_i, target_i, c_i);
03216         }
03217         Profiler::pl_profile_end("StackedAutoassociatorsNet::computeOutputsAndCosts");
03218     }
03219 }
03220 
03221 void StackedAutoassociatorsNet::computeCostsFromOutputs(const Vec& input, const Vec& output,
03222                                            const Vec& target, Vec& costs) const
03223 {
03224     //Assumes that computeOutput has been called
03225 
03226     Profiler::pl_profile_start("StackedAutoassociatorsNet::computeCostsFromOutputs");
03227     costs.resize( nTestCosts() );
03228     costs.fill( MISSING_VALUE );
03229 
03230     if(compute_all_test_costs)
03231     {
03232         for(int i=0; i<currently_trained_layer-1; i++)
03233         {
03234             reconstruction_connections[ i ]->fprop( expectations[ i+1 ],
03235                                                     reconstruction_activations);
03236             if( direct_connections.length() != 0 )
03237             {
03238                 direct_connections[ i ]->fprop(
03239                     expectations[ i ],
03240                     direct_activations );
03241                 reconstruction_activations += direct_activations;
03242             }
03243 
03244             reconstruction_layers[ i ]->fprop( reconstruction_activations,
03245                                 reconstruction_layers[ i ]->expectation);
03246 
03247             reconstruction_layers[ i ]->activation << reconstruction_activations;
03248             reconstruction_layers[ i ]->activation += reconstruction_layers[ i ]->bias;
03249             //reconstruction_layers[ i ]->expectation_is_up_to_date = true;
03250             reconstruction_layers[ i ]->setExpectationByRef( reconstruction_layers[ i ]->expectation );
03251 
03252             costs[i] = reconstruction_layers[ i ]->fpropNLL(expectations[ i ]);
03253 
03254             if( partial_costs && partial_costs[i])
03255             {
03256                 partial_costs[ i ]->fprop( expectations[ i + 1],
03257                                            target, partial_cost_value );
03258                 costs.subVec(partial_costs_positions[i],
03259                              partial_cost_value.length()) <<
03260                     partial_cost_value;
03261             }
03262         }
03263     }
03264 
03265     if( currently_trained_layer<n_layers )
03266     {
03267         reconstruction_connections[ currently_trained_layer-1 ]->fprop(
03268             output,
03269             reconstruction_activations);
03270         if( direct_connections.length() != 0 )
03271         {
03272             direct_connections[ currently_trained_layer-1 ]->fprop(
03273                 expectations[ currently_trained_layer-1 ],
03274                 direct_activations );
03275             reconstruction_activations += direct_activations;
03276         }
03277                 
03278         Vec divided_reconstruction_activations(reconstruction_activations.size());
03279         divide_input(reconstruction_activations, divided_reconstruction_activations);
03280 
03281         reconstruction_layers[ currently_trained_layer-1 ]->fprop(
03282           divided_reconstruction_activations,
03283           reconstruction_layers[ currently_trained_layer-1 ]->expectation);
03284         
03285         reconstruction_layers[ currently_trained_layer-1 ]->activation <<
03286             divided_reconstruction_activations;
03287         reconstruction_layers[ currently_trained_layer-1 ]->activation += 
03288             reconstruction_layers[ currently_trained_layer-1 ]->bias;
03289         //reconstruction_layers[ currently_trained_layer-1 ]->expectation_is_up_to_date = true;
03290         reconstruction_layers[ currently_trained_layer-1 ]->setExpectationByRef(
03291             reconstruction_layers[ currently_trained_layer-1 ]->expectation );
03292 
03293         costs[ currently_trained_layer-1 ] =
03294             reconstruction_layers[ currently_trained_layer-1 ]->fpropNLL(
03295                 expectations[ currently_trained_layer-1 ]);
03296 
03297         if(reconstruct_hidden)
03298         {
03299             connections[ currently_trained_layer-1 ]->fprop(
03300                 reconstruction_layers[ currently_trained_layer-1 ]->expectation,
03301                 hidden_reconstruction_activations );
03302             layers[ currently_trained_layer ]->fprop(
03303                 hidden_reconstruction_activations,
03304                 layers[ currently_trained_layer ]->expectation );
03305             layers[ currently_trained_layer ]->activation <<
03306                 hidden_reconstruction_activations;
03307             layers[ currently_trained_layer ]->activation += 
03308                 layers[ currently_trained_layer ]->bias;
03309             //layers[ currently_trained_layer ]->expectation_is_up_to_date = true;
03310             layers[ currently_trained_layer ]->setExpectationByRef(
03311                 layers[ currently_trained_layer ]->expectation );
03312             costs[ currently_trained_layer-1 ] +=
03313                 layers[ currently_trained_layer ]->fpropNLL(
03314                     output);
03315         }
03316 
03317         if( partial_costs && partial_costs[ currently_trained_layer-1 ] )
03318         {
03319             partial_costs[ currently_trained_layer-1 ]->fprop(
03320                 output,
03321                 target, partial_cost_value );
03322             costs.subVec(partial_costs_positions[currently_trained_layer-1],
03323                          partial_cost_value.length()) << partial_cost_value;
03324         }
03325     }
03326     else
03327     {
03328         final_cost->fprop( output, target, final_cost_value );
03329         costs.subVec(costs.length()-final_cost_value.length(),
03330                      final_cost_value.length()) <<
03331             final_cost_value;
03332     }
03333     Profiler::pl_profile_end("StackedAutoassociatorsNet::computeCostsFromOutputs");
03334 }
03335 
03336 TVec<string> StackedAutoassociatorsNet::getTestCostNames() const
03337 {
03338     // Return the names of the costs computed by computeCostsFromOutputs
03339     // (these may or may not be exactly the same as what's returned by
03340     // getTrainCostNames).
03341 
03342     TVec<string> cost_names(0);
03343 
03344     for( int i=0; i<layers.size()-1; i++)
03345         cost_names.push_back("reconstruction_error_" + tostring(i+1));
03346 
03347     for( int i=0 ; i<partial_costs.size() ; i++ )
03348     {
03349         TVec<string> names = partial_costs[i]->costNames();
03350         for(int j=0; j<names.length(); j++)
03351             cost_names.push_back("partial" + tostring(i) + "." +
03352                 names[j]);
03353     }
03354 
03355     cost_names.append( final_cost->costNames() );
03356 
03357     return cost_names;
03358 }
03359 
03360 TVec<string> StackedAutoassociatorsNet::getTrainCostNames() const
03361 {
03362     TVec<string> cost_names(0);
03363 
03364     for( int i=0; i<layers.size()-1; i++)
03365         cost_names.push_back("reconstruction_error_" + tostring(i+1));
03366 
03367     cost_names.push_back("global_reconstruction_error");
03368 
03369     for( int i=0 ; i<partial_costs.size() ; i++ )
03370     {
03371         TVec<string> names = partial_costs[i]->costNames();
03372         for(int j=0; j<names.length(); j++)
03373             cost_names.push_back("partial" + tostring(i) + "." +
03374                 names[j]);
03375     }
03376 
03377     cost_names.append( final_cost->costNames() );
03378 
03379     return cost_names;
03380 }
03381 
03382 
03383 //#####  Helper functions  ##################################################
03384 
03385 void StackedAutoassociatorsNet::setLearningRate( real the_learning_rate )
03386 {
03387     for( int i=0 ; i<n_layers-1 ; i++ )
03388     {
03389         layers[i]->setLearningRate( the_learning_rate );
03390         reconstruction_layers[i]->setLearningRate( the_learning_rate );
03391         connections[i]->setLearningRate( the_learning_rate );
03392         if(correlation_layers.length() != 0)
03393         {
03394             correlation_layers[i]->setLearningRate( the_learning_rate );
03395             correlation_connections[i]->setLearningRate( the_learning_rate );
03396         }
03397         if(direct_connections.length() != 0)
03398         {
03399             direct_connections[i]->setLearningRate( the_learning_rate );
03400         }
03401         reconstruction_connections[i]->setLearningRate( the_learning_rate );
03402     }
03403 
03404     for( int i=0; i<greedy_target_connections.length(); i++ )
03405         greedy_target_connections[i]->setLearningRate( the_learning_rate );
03406 
03407     layers[n_layers-1]->setLearningRate( the_learning_rate );
03408 
03409     final_cost->setLearningRate( the_learning_rate );
03410     final_module->setLearningRate( the_learning_rate );
03411 }
03412 
03413 TVec<Vec> StackedAutoassociatorsNet::fantasizeKTimeOnMultiSrcImg(const int KTime, const Mat& srcImg, const Vec& sample, const Vec& maskNoiseFractOrProb, bool alwaysFromSrcImg)
03414 {
03415     int n=srcImg.length();
03416     TVec<Vec> output(0);
03417 
03418     for( int i=0; i<n; i++ )
03419     {
03420         const Vec img_i = srcImg(i);
03421         TVec<Vec> outputTmp;  
03422         outputTmp = fantasizeKTime(KTime, img_i, sample, maskNoiseFractOrProb, alwaysFromSrcImg);
03423         output = concat(output, outputTmp);        
03424     }
03425 
03426     return output;
03427 }
03428 
03429 TVec<Vec> StackedAutoassociatorsNet::fantasizeKTime(const int KTime, const Vec& srcImg, const Vec& sample, const Vec& maskNoiseFractOrProb, bool alwaysFromSrcImg)
03430 {
03431     bool bFractOrProbUseful=false;
03432 
03433     // Noise type that needs fraction_of_masked_inputs or prob_masked_inputs
03434     if(noise_type == "masking_noise" || noise_type == "missing_data")
03435         bFractOrProbUseful=true;
03436 
03437     if(bFractOrProbUseful && maskNoiseFractOrProb.size() == 0)
03438         PLERROR("In StackedAutoassociatorsNet::fantasize():"
03439         "maskNoiseFractOrProb should be defined because fraction_of_masked_inputs"
03440         " or prob_masked_inputs have been used during the learning stage.");
03441 
03442     if(bFractOrProbUseful && maskNoiseFractOrProb.size() != sample.size())
03443         PLERROR("In StackedAutoassociatorsNet::fantasize():"
03444         "Size of maskNoiseFractOrProb should be equal to sample's size.");
03445 
03446     if(sample.size() > n_layers-1)
03447         PLERROR("In StackedAutoassociatorsNet::fantasize():"
03448         " Size of sample (%i) should be <= "
03449         "number of hidden layer (%i).",sample.size(), n_layers-1);
03450 
03451     bool bFraction_masked_input = true;
03452     bool autoassociator_expectation_indices_temp_initialized = false;
03453 
03454     // Number of hidden layer to be 'covered'
03455     int n_hlayers_used = sample.size();
03456 
03457     // Save actual value
03458     real old_fraction_masked_inputs = fraction_of_masked_inputs;
03459     real old_prob_masked_inputs = probability_of_masked_inputs;
03460     bool old_mask_input_layer_only = mask_input_layer_only;
03461     int  old_nb_corrupted_layer = nb_corrupted_layer;
03462 
03463     // New values for fantasize
03464     mask_input_layer_only = false;
03465     nb_corrupted_layer = n_hlayers_used;
03466 
03467     if(bFractOrProbUseful)
03468     {
03469         if(old_prob_masked_inputs > 0.)
03470             bFraction_masked_input = false;
03471         else
03472             if(autoassociator_expectation_indices.size() == 0)
03473             {
03474                 autoassociator_expectation_indices.resize( n_hlayers_used );
03475                 autoassociator_expectation_indices_temp_initialized = true;
03476             }
03477     }
03478     
03479     TVec<Vec> fantaImagesObtained(KTime+1);
03480 
03481     fantaImagesObtained[0].resize(srcImg.size());
03482     fantaImagesObtained[0] << srcImg;
03483     expectations[0] << srcImg;
03484     
03485     // Do fantasize k time.
03486     for( int k=0 ; k<KTime ; k++ )
03487     {
03488         fantaImagesObtained[k+1].resize(srcImg.size());
03489         for( int i=0 ; i<n_hlayers_used; i++ )
03490         {
03491             // Initialisation made only at the first loop.
03492             if(k == 0)
03493             {
03494                 // initialize autoassociator_expectation_indices if not already done
03495                 // considering new fraction_of_masked_inputs possibly different (not
03496                 // equal to zero) from the one used during the training.
03497                 if(autoassociator_expectation_indices_temp_initialized)
03498                 {
03499                     autoassociator_expectation_indices[i].resize( layers[i]->size );
03500                     for( int j=0 ; j < autoassociator_expectation_indices[i].length() ; j++ )
03501                          autoassociator_expectation_indices[i][j] = j;
03502                 }
03503             }
03504 
03505             if(bFractOrProbUseful)
03506             {
03507                 if(bFraction_masked_input)
03508                     fraction_of_masked_inputs = maskNoiseFractOrProb[i];
03509                 else
03510                     probability_of_masked_inputs = maskNoiseFractOrProb[i];
03511             }
03512             double_input(expectations[i], doubled_expectations[i]);
03513             corrupt_input(
03514                 doubled_expectations[i],
03515                 corrupted_autoassociator_expectations[i], i);
03516             connections[i]->fprop(
03517                 corrupted_autoassociator_expectations[i],
03518                 activations[i+1] );
03519             layers[i+1]->fprop(activations[i+1],expectations[i+1]);
03520         }
03521 
03522         for( int i=n_hlayers_used-1 ; i>=0; i-- )
03523         {
03524             // Binomial sample
03525             if( sample[i] == 1 )
03526                 for( int j=0; j<expectations[i+1].size(); j++ )
03527                     expectations[i+1][j] = random_gen->binomial_sample(expectations[i+1][j]);
03528     
03529             reconstruction_connections[i]->fprop(
03530                 expectations[i+1],
03531                 reconstruction_activations );
03532   
03533             Vec divided_reconstruction_activations(reconstruction_activations.size());
03534             divide_input(reconstruction_activations, divided_reconstruction_activations);
03535 
03536             reconstruction_layers[i]->fprop(divided_reconstruction_activations, expectations[i]);
03537         }
03538         fantaImagesObtained[k+1] << expectations[0];
03539         if( alwaysFromSrcImg )
03540             expectations[0] << srcImg;
03541     }
03542 
03543     if(bFractOrProbUseful)
03544     {
03545         fraction_of_masked_inputs = old_fraction_masked_inputs;
03546         probability_of_masked_inputs = old_prob_masked_inputs;
03547     }
03548 
03549     mask_input_layer_only = old_mask_input_layer_only;
03550     nb_corrupted_layer = old_nb_corrupted_layer;
03551 
03552     return fantaImagesObtained;
03553 }
03554 
03555 } // end of namespace PLearn
03556 
03557 
03558 /*
03559   Local Variables:
03560   mode:c++
03561   c-basic-offset:4
03562   c-file-style:"stroustrup"
03563   c-file-offsets:((innamespace . 0)(inline-open . 0))
03564   indent-tabs-mode:nil
03565   fill-column:79
03566   End:
03567 */
03568 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines