PLearn 0.1
RBMModule.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMModule.cc
00004 //
00005 // Copyright (C) 2007 Olivier Delalleau
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Olivier Delalleau, Yoshua Bengio
00036 
00041 #include "RBMModule.h"
00042 #include <plearn/vmat/VMat.h>
00043 #include <plearn_learners/online/RBMMatrixConnection.h>
00044 
00045 #define PL_LOG_MODULE_NAME "RBMModule"
00046 #include <plearn/io/pl_log.h>
00047 
00048 namespace PLearn {
00049 using namespace std;
00050 
00051 PLEARN_IMPLEMENT_OBJECT(
00052     RBMModule,
00053     "A Restricted Boltzmann Machine.",
00054     "An RBM contains a 'visible_layer', a 'hidden_layer' (both instances of a subclass\n"
00055     "of RBMLayer) and a 'connection' (an instance of a subclass of RBMConnection).\n"
00056     "It always has the following ports: \n"
00057     "  - 'visible' : expectations of the visible (normally input) layer\n"
00058     "  - 'hidden.state' : expectations of the hidden (normally output) layer\n"
00059     "  - 'hidden_activations.state' : activations of hidden units (given visible)\n"
00060     "  - 'visible_sample' : random sample obtained on visible units (input or output port)\n"
00061     "  - 'visible_expectation' : expectation of visible units (output port ONLY)\n"
00062     "  - 'visible_activation' : activation of visible units (output port ONLY)\n"
00063     "  - 'hidden_sample' : random sample obtained on hidden units\n"
00064     "  - 'energy' : energy of the joint (visible,hidden) pair or free-energy\n"
00065     "               of the visible (if given) or of the hidden (if given).\n"
00066     "  - 'hidden_bias' : externally controlled bias on the hidden units,\n"
00067     "                    used to implement conditional RBMs\n"
00068     "  - 'neg_log_likelihood' : USE WITH CARE, this is the exact negative log-likelihood\n"
00069     "    of the RBM. Computing it requires re-computing the partition function (which must\n"
00070     "    be recomputed if the parameters have changed) and takes O(2^{min(n_hidden,n_visible)})\n"
00071     "    computations of the free-energy.\n"
00072     "  - 'neg_log_phidden' : use as an optional input port when asking for an output on\n"
00073     "    the 'neg_log_pvisible_given_phidden' port. It is a a column matrix with one element\n"
00074     "    -log w_h for each row h of the input 'hidden.state'. The w_h could be interpreted as\n"
00075     "    probabilities, e.g. w_h = P(h) according to some prior probability P, and sum_w w_h=1\n"
00076     "    over the set of h's provided in the 'hidden.state' port.\n"
00077     "  - 'neg_log_pvisible_given_phidden' : this output port is used to ask the module to compute\n"
00078     "    a column matrix with entries = -log( sum_h P(x|h) w_h ) for each row x in the input\n"
00079     "    'visible' port. This quantity would be a valid - log P(x) if sum_h w_h = 1, under the\n"
00080     "    joint model P(x,h) = P(x|h) P(h), with P(h)=w_h.\n"
00081     "\n"
00082     "An RBM also has other ports that exist only if some options are set.\n"
00083     "If reconstruction_connection is given, then it has\n"
00084     "  - 'visible_reconstruction_activations.state' : the deterministic reconstruction of the\n"
00085     "     visible activations through the conditional expectations of the hidden given the visible.\n"
00086     "  - 'visible_reconstruction.state' : the deterministic reconstruction of the visible\n"
00087     "     values (expectations) through the conditional expectations of hidden | visible.\n"
00088     "  - 'reconstruction_error.state' : the auto-associator reconstruction error (NLL)\n"
00089     "    obtained by matching the visible_reconstruction with the given visible.\n"
00090     "Note that the above deterministic reconstruction may be made stochastic\n"
00091     "by using the advanced option 'stochastic_reconstruction'.\n"
00092     "If compute_contrastive_divergence is true, then the RBM also has these ports\n"
00093     "  - 'contrastive_divergence' : the quantity minimized by contrastive-divergence training.\n"
00094     "  - 'negative_phase_visible_samples.state' : the negative phase stochastic reconstruction\n"
00095     "    of the visible units, only provided to avoid recomputing them in bpropUpdate.\n"
00096     "  - 'negative_phase_hidden_expectations.state' : the negative phase hidden units\n"
00097     "    expected values, only provided to avoid recomputing them in bpropUpdate.\n"
00098     "The following ports are filled only in test mode when the option\n"
00099     "'compare_true_gradient_with_cd' is true:\n"
00100     "   - 'median_reldiff_cd_nll': median relative difference between the CD\n"
00101     "     update and the true NLL gradient. Here, the CD update is not\n"
00102     "     stochastic, but is computed exactly as the truncation of the log-\n"
00103     "     likelihood expansion. This port has size 'n_steps_compare': there\n"
00104     "     is one value for each step of the CD.\n"
00105     "   - 'mean_diff_cd_nll': mean of the absolute difference between the CD\n"
00106     "     and NLL gradient updates.\n"
00107     "   - 'agreement_cd_nll': fraction of weights for which the CD and NLL\n"
00108     "     gradient updates agree on the sign, followed by the fraction of\n"
00109     "     weights for which the CD update has same sign as the difference\n"
00110     "     between the NLL gradient and the CD update.\n"
00111     "   - 'agreement_stoch': same as the first half of above, except that\n"
00112     "     it is for the stochastic CD update rather than its expected value.\n"
00113     "   - 'bound_cd_nll': bound on the difference between the CD and NLL\n"
00114     "     gradient updates, as computed in (Bengio & Delalleau, 2008)\n"
00115     "   - 'weights_stats': first element is the median of the absolute value\n"
00116     "     of all weights and biases, second element is the mean, third\n"
00117     "     element is the maximum sum of weights and biases (in absolute\n"
00118     "     values) over columns of the weight matrix, and third element is\n"
00119     "     the same over rows.\n"
00120     "   - 'ratio_cd_leftout': median ratio between the absolute value of the\n"
00121     "     CD update and the absolute value of the term left out in CD (i.e.\n"
00122     "     the difference between NLL gradient and CD).\n"
00123     "   - 'abs_cd': average absolute value of the CD update. First for the\n"
00124     "     expected CD update, then its stochastic (sampled) version.\n"
00125     "   - 'nll_grad': NLL gradient.\n"
00126     "    \n"
00127     "\n"
00128     "The RBM can be trained by gradient descent (wrt to gradients provided on\n"
00129     "the 'hidden.state' port or on the 'reconstruction_error.state' port)\n"
00130     "if grad_learning_rate>0 or by contrastive divergence, if cd_learning_rate>0.\n"
00131 );
00132 
00134 // RBMModule //
00136 RBMModule::RBMModule():
00137     cd_learning_rate(0),
00138     grad_learning_rate(0),
00139     tied_connection_weights(false),
00140     compute_contrastive_divergence(false),
00141     compare_true_gradient_with_cd(false),
00142     n_steps_compare(1),
00143     n_Gibbs_steps_CD(1),
00144     min_n_Gibbs_steps(1),
00145     n_Gibbs_steps_per_generated_sample(-1),
00146     compute_log_likelihood(false),
00147     minimize_log_likelihood(false),
00148     Gibbs_step(0),
00149     log_partition_function(0),
00150     partition_function_is_stale(true),
00151     deterministic_reconstruction_in_cd(false),
00152     stochastic_reconstruction(false),
00153     standard_cd_grad(true),
00154     standard_cd_bias_grad(true),
00155     standard_cd_weights_grad(true),
00156     hidden_bias(NULL),
00157     weights(NULL),
00158     hidden_act(NULL),
00159     hidden_activations_are_computed(false)
00160 {
00161 }
00162 
00164 // declareOptions //
00166 void RBMModule::declareOptions(OptionList& ol)
00167 {
00168     // Build options.
00169 
00170     declareOption(ol, "visible_layer", &RBMModule::visible_layer,
00171                   OptionBase::buildoption,
00172         "Visible layer of the RBM.");
00173 
00174     declareOption(ol, "hidden_layer", &RBMModule::hidden_layer,
00175                   OptionBase::buildoption,
00176         "Hidden layer of the RBM.");
00177 
00178     declareOption(ol, "connection", &RBMModule::connection,
00179                   OptionBase::buildoption,
00180         "Connection between the visible and hidden layers.");
00181 
00182     declareOption(ol, "reconstruction_connection",
00183                   &RBMModule::reconstruction_connection,
00184                   OptionBase::buildoption,
00185         "Reconstruction connection between the hidden and visible layers.");
00186 
00187     declareOption(ol, "stochastic_reconstruction",
00188                   &RBMModule::stochastic_reconstruction,
00189                   OptionBase::buildoption,
00190         "If set to true, then reconstruction is not deterministic. Instead,\n"
00191         "we sample a hidden vector given the visible input, then use the\n"
00192         "visible layer's expectation given this sample as reconstruction.",
00193                   OptionBase::advanced_level);
00194 
00195     declareOption(ol, "grad_learning_rate", &RBMModule::grad_learning_rate,
00196                   OptionBase::buildoption,
00197         "Learning rate for the gradient descent step.");
00198 
00199     declareOption(ol, "cd_learning_rate", &RBMModule::cd_learning_rate,
00200                   OptionBase::buildoption,
00201         "Learning rate for the constrastive divergence step. Note that when\n"
00202         "set to 0, the gradient of the contrastive divergence will not be\n"
00203         "computed at all.");
00204 
00205     declareOption(ol, "tied_connection_weights", &RBMModule::tied_connection_weights,
00206                   OptionBase::buildoption,
00207         "Whether to keep fixed the connection weights during learning.");
00208 
00209     declareOption(ol, "compute_contrastive_divergence", &RBMModule::compute_contrastive_divergence,
00210                   OptionBase::buildoption,
00211         "Compute the constrastive divergence in an output port.");
00212 
00213     declareOption(ol, "deterministic_reconstruction_in_cd",
00214                   &RBMModule::deterministic_reconstruction_in_cd,
00215                   OptionBase::buildoption,
00216         "Whether to use the expectation of the visible (given a hidden sample)\n"
00217         "or a sample of the visible in the contrastive divergence learning.\n"
00218         "In other words, instead of the classical Gibbs sampling\n"
00219         "   v_0 --> h_0 ~ p(h|v_0) --> v_1 ~ p(v|h_0) -->  p(h|v_1)\n"
00220         "we will have by setting 'deterministic_reconstruction_in_cd=1'\n"
00221         "   v_0 --> h_0 ~ p(h|v_0) --> v_1 = E(v|h_0) -->  p(h|E(v|h_0)).");
00222 
00223     declareOption(ol, "standard_cd_grad",
00224                   &RBMModule::standard_cd_grad,
00225                   OptionBase::buildoption,
00226         "Whether to use the standard contrastive divergence gradient for\n"
00227         "updates, or the true gradient of the contrastive divergence. This\n"
00228         "affects only the gradient w.r.t. internal parameters of the layers\n"
00229         "and connections. Currently, this option works only with layers of\n"
00230         "the type 'RBMBinomialLayer', connected by a 'RBMMatrixConnection'.");
00231 
00232     declareOption(ol, "standard_cd_bias_grad",
00233                   &RBMModule::standard_cd_bias_grad,
00234                   OptionBase::buildoption,
00235         "This option is only used when biases of the hidden layer are given\n"
00236         "through the 'hidden_bias' port. When this is the case, the gradient\n"
00237         "of contrastive divergence w.r.t. these biases is either computed:\n"
00238         "- by the usual formula if 'standard_cd_bias_grad' is true\n"
00239         "- by the true gradient if 'standard_cd_bias_grad' is false.");
00240 
00241     declareOption(ol, "standard_cd_weights_grad",
00242                   &RBMModule::standard_cd_weights_grad,
00243                   OptionBase::buildoption,
00244         "This option is only used when weights of the connection are given\n"
00245         "through the 'weights' port. When this is the case, the gradient of\n"
00246         "contrastive divergence w.r.t. weights is either computed:\n"
00247         "- by the usual formula if 'standard_cd_weights_grad' is true\n"
00248         "- by the true gradient if 'standard_cd_weights_grad' is false.");
00249 
00250     declareOption(ol, "n_Gibbs_steps_CD",
00251                   &RBMModule::n_Gibbs_steps_CD,
00252                   OptionBase::buildoption,
00253                   "Number of Gibbs sampling steps in negative phase of "
00254                   "contrastive divergence.");
00255 
00256     declareOption(ol, "min_n_Gibbs_steps", &RBMModule::min_n_Gibbs_steps,
00257                   OptionBase::buildoption,
00258                   "Used in generative mode (when visible_sample or hidden_sample is requested)\n"
00259                   "when one has to sample from the joint or a marginal of visible and hidden,\n"
00260                   "and thus a Gibbs chain has to be run. This option gives the minimum number\n"
00261                   "of Gibbs steps to perform in the chain before outputting a sample.\n");
00262 
00263     declareOption(ol, "n_Gibbs_steps_per_generated_sample",
00264                   &RBMModule::n_Gibbs_steps_per_generated_sample,
00265                   OptionBase::buildoption,
00266                   "Used in generative mode (when visible_sample or hidden_sample is requested)\n"
00267                   "when one has to sample from the joint or a marginal of visible and hidden,\n"
00268                   "This option gives the number of steps to run in the Gibbs chain between\n"
00269                   "consecutive generated samples that are produced in output of the fprop method.\n"
00270                   "By default this is equal to min_n_Gibbs_steps.\n");
00271 
00272     declareOption(ol, "compute_log_likelihood",
00273                   &RBMModule::compute_log_likelihood,
00274                   OptionBase::buildoption,
00275                   "Whether to compute the exact RBM generative model's log-likelihood\n"
00276                   "(on the neg_log_likelihood port). If false then the neg_log_likelihood\n"
00277                   "port just computes the input visible's free energy.\n");
00278 
00279     declareOption(ol, "minimize_log_likelihood",
00280                   &RBMModule::minimize_log_likelihood,
00281                   OptionBase::buildoption,
00282                   "Whether to minimize the exact RBM generative model's log-likelihood\n"
00283                   "i.e. take stochastic gradient steps w.r.t. the log-likelihood instead\n"
00284                   "of w.r.t. the contrastive divergence.\n");
00285 
00286     declareOption(ol, "compare_true_gradient_with_cd",
00287                   &RBMModule::compare_true_gradient_with_cd,
00288                   OptionBase::buildoption,
00289         "If true, then will compute the true gradient (of the NLL) as well\n"
00290         "as the exact non-stochastic CD update, and compare them.",
00291                   OptionBase::advanced_level);
00292 
00293     declareOption(ol, "n_steps_compare",
00294                   &RBMModule::n_steps_compare,
00295                   OptionBase::buildoption,
00296         "Number of steps for which we want to compare CD with the true\n"
00297         "gradient (when 'compare_true_gradient_with_cd' is true). This will\n"
00298         "compute P(x_t|x) for t from 1 to 'n_steps_compare'.",
00299                   OptionBase::advanced_level);
00300 
00301     // Learnt options.
00302 
00303     declareOption(ol, "Gibbs_step",
00304                   &RBMModule::Gibbs_step,
00305                   OptionBase::learntoption,
00306                   "Used in generative mode (when visible_sample or hidden_sample is requested)\n"
00307                   "when one has to sample from the joint or a marginal of visible and hidden,\n"
00308                   "Keeps track of the number of steps that have been run since the beginning\n"
00309                   "of the chain.\n");
00310 
00311     declareOption(ol, "log_partition_function",
00312                   &RBMModule::log_partition_function,
00313                   OptionBase::learntoption,
00314                   "log(Z) = log(sum_{h,x} exp(-energy(h,x))\n"
00315                   "only computed if compute_log_likelihood is true and\n"
00316                   "the neg_log_likelihood port is requested.\n");
00317 
00318     declareOption(ol, "partition_function_is_stale",
00319                   &RBMModule::partition_function_is_stale,
00320                   OptionBase::learntoption,
00321                   "Whether parameters have changed since the last computation\n"
00322                   "of the log_partition_function (to know if it should be recomputed\n"
00323                   "when the neg_log_likelihood port is requested.\n");
00324 
00325     // Now call the parent class' declareOptions
00326     inherited::declareOptions(ol);
00327 }
00328 
00329 void RBMModule::declareMethods(RemoteMethodMap& rmm)
00330 {
00331     // Make sure that inherited methods are declared
00332     rmm.inherited(inherited::_getRemoteMethodMap_());
00333 
00334     declareMethod(rmm, "CDUpdate", &RBMModule::CDUpdate,
00335                   (BodyDoc("Perform one CD_k update"),
00336                    ArgDoc ("v_0", "Positive phase statistics on visible layer"),
00337                    ArgDoc ("h_0", "Positive phase statistics on hidden layer"),
00338                    ArgDoc ("v_k", "Negative phase statistics on visible layer"),
00339                    ArgDoc ("h_k", "Negative phase statistics on hidden layer")
00340                   ));
00341 
00342     declareMethod(rmm, "computePartitionFunction",
00343         &RBMModule::computePartitionFunction,
00344         (BodyDoc("Compute the log partition function (will be stored within "
00345                  "the 'log_partition_function' field)")));
00346 
00347     declareMethod(rmm, "computeLogLikelihoodOfVisible",
00348         &RBMModule::computeLogLikelihoodOfVisible,
00349         (BodyDoc("Compute log-likehood"),
00350          ArgDoc("visible", "Matrix of visible inputs"),
00351          RetDoc("A vector with the log-likelihood of each input")));
00352 }
00353 
00354 void RBMModule::CDUpdate(const Mat& v_0, const Mat& h_0,
00355                          const Mat& v_k, const Mat& h_k)
00356 {
00357     visible_layer->update(v_0, v_k);
00358     hidden_layer->update(h_0, h_k);
00359     connection->update(v_0, h_0, v_k, h_k);
00360     partition_function_is_stale = true;
00361 }
00362 
00364 // build_ //
00366 void RBMModule::build_()
00367 {
00368     PLASSERT( cd_learning_rate >= 0 && grad_learning_rate >= 0 );
00369     if(visible_layer)
00370         visible_bias_grad.resize(visible_layer->size);
00371 
00372     // Forward random generator to underlying modules.
00373     if (random_gen) {
00374         if (hidden_layer && !hidden_layer->random_gen) {
00375             hidden_layer->random_gen = random_gen;
00376             hidden_layer->build();
00377             hidden_layer->forget();
00378         }
00379         if (visible_layer && !visible_layer->random_gen) {
00380             visible_layer->random_gen = random_gen;
00381             visible_layer->build();
00382             visible_layer->forget();
00383         }
00384         if (connection && !connection->random_gen) {
00385             connection->random_gen = random_gen;
00386             connection->build();
00387             connection->forget();
00388         }
00389         if (reconstruction_connection &&
00390                 !reconstruction_connection->random_gen) {
00391             reconstruction_connection->random_gen = random_gen;
00392             reconstruction_connection->build();
00393             reconstruction_connection->forget();
00394         }
00395     }
00396 
00397     // buid ports and port_sizes
00398 
00399     ports.resize(0);
00400     portname_to_index.clear();
00401     addPortName("visible");
00402     addPortName("hidden.state");
00403     addPortName("hidden_activations.state");
00404     addPortName("visible_sample");
00405     addPortName("visible_expectation");
00406     addPortName("visible_activations.state");
00407     addPortName("hidden_sample");
00408     addPortName("energy");
00409     addPortName("hidden_bias");
00410     addPortName("weights");
00411     addPortName("neg_log_likelihood");
00412     // a column matrix with one element -log P(h) for each row h of "hidden",
00413     // used as an input port, with neg_log_pvisible_given_phidden as output
00414     addPortName("neg_log_phidden");
00415     // compute column matrix with one entry -log P(x) = -log( sum_h P(x|h) P(h) ) for
00416     // each row x of "visible", and where {P(h)}_h is provided
00417     // in "neg_log_phidden" for the set of h's in "hidden".
00418     addPortName("neg_log_pvisible_given_phidden");
00419     addPortName("median_reldiff_cd_nll");
00420     addPortName("mean_diff_cd_nll");
00421     addPortName("agreement_cd_nll");
00422     addPortName("agreement_stoch");
00423     addPortName("bound_cd_nll");
00424     addPortName("weights_stats");
00425     addPortName("ratio_cd_leftout");
00426     addPortName("abs_cd");
00427     addPortName("nll_grad");
00428     if(reconstruction_connection)
00429     {
00430         addPortName("visible_reconstruction.state");
00431         addPortName("visible_reconstruction_activations.state");
00432         addPortName("reconstruction_error.state");
00433     }
00434     if (compute_contrastive_divergence)
00435     {
00436         addPortName("contrastive_divergence");
00437         addPortName("negative_phase_visible_samples.state");
00438         addPortName("negative_phase_hidden_expectations.state");
00439         addPortName("negative_phase_hidden_activations.state");
00440     }
00441 
00442     port_sizes.resize(nPorts(), 2);
00443     port_sizes.fill(-1);
00444     if (visible_layer) {
00445         port_sizes(getPortIndex("visible"), 1) = visible_layer->size;
00446         port_sizes(getPortIndex("visible_sample"), 1) = visible_layer->size;
00447         port_sizes(getPortIndex("visible_expectation"), 1) = visible_layer->size;
00448         port_sizes(getPortIndex("visible_activations.state"), 1) = visible_layer->size;
00449     }
00450     if (hidden_layer) {
00451         port_sizes(getPortIndex("hidden.state"), 1) = hidden_layer->size;
00452         port_sizes(getPortIndex("hidden_activations.state"), 1) = hidden_layer->size;
00453         port_sizes(getPortIndex("hidden_sample"), 1) = hidden_layer->size;
00454         port_sizes(getPortIndex("hidden_bias"),1) = hidden_layer->size;
00455         if(visible_layer)
00456             port_sizes(getPortIndex("weights"),1) = hidden_layer->size * visible_layer->size;
00457     }
00458     port_sizes(getPortIndex("energy"),1) = 1;
00459     port_sizes(getPortIndex("neg_log_likelihood"),1) = 1;
00460     port_sizes(getPortIndex("neg_log_phidden"),1) = 1;
00461     port_sizes(getPortIndex("neg_log_pvisible_given_phidden"),1) = 1;
00462     if(reconstruction_connection)
00463     {
00464         if (visible_layer) {
00465             port_sizes(getPortIndex("visible_reconstruction.state"),1) =
00466                 visible_layer->size;
00467             port_sizes(getPortIndex("visible_reconstruction_activations.state"),1) =
00468                        visible_layer->size;
00469         }
00470         port_sizes(getPortIndex("reconstruction_error.state"),1) = 1;
00471     }
00472     if (compute_contrastive_divergence)
00473     {
00474         port_sizes(getPortIndex("contrastive_divergence"),1) = 1;
00475         if (visible_layer)
00476             port_sizes(getPortIndex("negative_phase_visible_samples.state"),1) = visible_layer->size;
00477         if (hidden_layer)
00478             port_sizes(getPortIndex("negative_phase_hidden_expectations.state"),1) = hidden_layer->size;
00479         if (fast_exact_is_equal(cd_learning_rate, 0))
00480             PLWARNING("In RBMModule::build_ - Contrastive divergence is "
00481                     "computed but 'cd_learning_rate' is set to 0: no internal "
00482                     "update will be performed AND no contrastive divergence "
00483                     "gradient will be propagated.");
00484     }
00485 
00486     PLCHECK_MSG(!(!standard_cd_grad && standard_cd_bias_grad), "You cannot "
00487             "compute the standard CD gradient w.r.t. external hidden bias and "
00488             "use the 'true' CD gradient w.r.t. internal hidden bias");
00489 
00490     if (n_Gibbs_steps_per_generated_sample<0)
00491         n_Gibbs_steps_per_generated_sample = min_n_Gibbs_steps;
00492 
00493 }
00494 
00496 // build //
00498 void RBMModule::build()
00499 {
00500     inherited::build();
00501     build_();
00502 }
00503 
00505 // addPortName //
00507 void RBMModule::addPortName(const string& name)
00508 {
00509     PLASSERT( portname_to_index.find(name) == portname_to_index.end() );
00510     portname_to_index[name] = ports.length();
00511     ports.append(name);
00512 }
00513 
00515 // computeEnergy //
00517 // FULLY OBSERVED CASE
00518 // we know x and h:
00519 // energy(h,x) = -b'x - c'h - h'Wx
00520 //  = visible_layer->energy(x) + hidden_layer->energy(h)
00521 //      - dot(h, hidden_layer->activation-c)
00522 //  = visible_layer->energy(x) - dot(h, hidden_layer->activation)
00523 void RBMModule::computeEnergy(const Mat& visible, const Mat& hidden,
00524                               Mat& energy, bool positive_phase)
00525 {
00526     int mbs=hidden.length();
00527     energy.resize(mbs, 1);
00528     Mat* hidden_activations = NULL;
00529     if (positive_phase) {
00530         computePositivePhaseHiddenActivations(visible);
00531         hidden_activations = hidden_act;
00532     } else {
00533         computeHiddenActivations(visible);
00534         hidden_activations = & hidden_layer->activations;
00535     }
00536     PLASSERT( hidden_activations );
00537     for (int i=0;i<mbs;i++)
00538         energy(i,0) = visible_layer->energy(visible(i))
00539             - dot(hidden(i), (*hidden_activations)(i));
00540             // Why not: + hidden_layer->energy(hidden(i)) ?
00541 }
00542 
00544 // computeFreeEnergyOfHidden //
00546 // FREE-ENERGY(hidden) CASE
00547 // we know h:
00548 // free energy = -log sum_x e^{-energy(h,x)}
00549 // or more robustly,
00550 //  = hidden_layer->energy(h)
00551 //    + visible_layer->freeEnergyContribution(visible_layer->activation)
00552 void RBMModule::computeFreeEnergyOfHidden(const Mat& hidden, Mat& energy)
00553 {
00554     int mbs=hidden.length();
00555     if (energy.isEmpty())
00556         energy.resize(mbs,1);
00557     else {
00558         PLASSERT( energy.length() == mbs && energy.width() == 1 );
00559     }
00560 
00561     computeVisibleActivations(hidden, false);
00562     for (int i=0;i<mbs;i++)
00563     {
00564         energy(i,0) = hidden_layer->energy(hidden(i))
00565             + visible_layer->freeEnergyContribution(
00566                 visible_layer->activations(i));
00567     }
00568 }
00569 
00571 // computeFreeEnergyOfVisible //
00573 // FREE-ENERGY(visible) CASE
00574 // we know x:
00575 // free energy = -log sum_h e^{-energy(h,x)}
00576 // or more robustly,
00577 //  = visible_layer->energy(x)
00578 //    + hidden_layer->freeEnergyContribution(hidden_layer->activation)
00579 void RBMModule::computeFreeEnergyOfVisible(const Mat& visible, Mat& energy,
00580                                            bool positive_phase)
00581 {
00582     int mbs=visible.length();
00583     if (energy.isEmpty())
00584         energy.resize(mbs,1);
00585     else {
00586         PLASSERT( energy.length() == mbs && energy.width() == 1 );
00587     }
00588 
00589     Mat* hidden_activations = NULL;
00590     if (positive_phase && hidden_act) {
00591         computePositivePhaseHiddenActivations(visible);
00592         hidden_activations = hidden_act;
00593     }
00594     else {
00595         computeHiddenActivations(visible);
00596         hidden_activations = & hidden_layer->activations;
00597     }
00598     PLASSERT( hidden_activations && hidden_activations->length() == mbs
00599             && hidden_activations->width() == hidden_layer->size );
00600     for (int i=0;i<mbs;i++)
00601     {
00602         energy(i,0) = visible_layer->energy(visible(i))
00603             + hidden_layer->freeEnergyContribution((*hidden_activations)(i));
00604     }
00605 }
00606 
00608 // computeHiddenActivations //
00610 void RBMModule::computeHiddenActivations(const Mat& visible)
00611 {
00612     if(weights && !weights->isEmpty())
00613     {
00614         Mat old_weights;
00615         Vec old_activation;
00616         connection->getAllWeights(old_weights);
00617         old_activation = hidden_layer->activation;
00618         int up = connection->up_size;
00619         int down = connection->down_size;
00620         PLASSERT( weights->width() == up * down  );
00621         hidden_layer->setBatchSize( visible.length() );
00622         for(int i=0; i<visible.length(); i++)
00623         {
00624             connection->setAllWeights(Mat(up, down, (*weights)(i)));
00625             connection->setAsDownInput(visible(i));
00626             hidden_layer->activation = hidden_layer->activations(i);
00627             hidden_layer->getAllActivations(connection, 0, false);
00628             if (hidden_bias && !hidden_bias->isEmpty())
00629                 hidden_layer->activation += (*hidden_bias)(i);
00630         }
00631         connection->setAllWeights(old_weights);
00632         hidden_layer->activation = old_activation;
00633     }
00634     else
00635     {
00636         connection->setAsDownInputs(visible);
00637         hidden_layer->getAllActivations(connection, 0, true);
00638         if (hidden_bias && !hidden_bias->isEmpty())
00639             hidden_layer->activations += *hidden_bias;
00640     }
00641 }
00642 
00644 // computeLogLikelihoodOfVisible //
00646 Vec RBMModule::computeLogLikelihoodOfVisible(const Mat& visible)
00647 {
00648     Mat energy;
00649     computePartitionFunction();
00650     computeFreeEnergyOfVisible(visible, energy, false);
00651     negateElements(energy);
00652     for (int i = 0; i < energy.length(); i++)
00653         energy(i, 0) -= log_partition_function;
00654     return energy.toVec();
00655 }
00656 
00658 // computeAllHiddenProbabilities //
00660 void RBMModule::computeAllHiddenProbabilities(const Mat& visible,
00661                                               const Mat& p_hidden)
00662 {
00663     Vec hidden(hidden_layer->size);
00664     computeHiddenActivations(visible);
00665     int n_conf = hidden_layer->getConfigurationCount();
00666     for (int i = 0; i < n_conf; i++) {
00667         hidden_layer->getConfiguration(i, hidden);
00668         for (int j = 0; j < visible.length(); j++) {
00669             hidden_layer->activation = hidden_layer->activations(j);
00670             real neg_log_p_h_given_v = hidden_layer->fpropNLL(hidden);
00671             p_hidden(i, j) = exp(-neg_log_p_h_given_v);
00672         }
00673     }
00674 }
00675 
00677 // computePositivePhaseHiddenActivations //
00679 void RBMModule::computePositivePhaseHiddenActivations(const Mat& visible)
00680 {
00681     if (hidden_activations_are_computed) {
00682         // Nothing to do.
00683         PLASSERT( !hidden_act || !hidden_act->isEmpty() );
00684         return;
00685     }
00686     computeHiddenActivations(visible);
00687     if (hidden_act && hidden_act->isEmpty())
00688     {
00689         hidden_act->resize(visible.length(),hidden_layer->size);
00690         *hidden_act << hidden_layer->activations;
00691     }
00692     hidden_activations_are_computed = true;
00693 }
00694 
00696 // computeVisibleActivations //
00698 void RBMModule::computeVisibleActivations(const Mat& hidden,
00699                                           bool using_reconstruction_connection)
00700 {
00701     if (using_reconstruction_connection)
00702     {
00703         PLASSERT( reconstruction_connection );
00704         reconstruction_connection->setAsUpInputs(hidden);
00705         visible_layer->getAllActivations(reconstruction_connection, 0, true);
00706     }
00707     else
00708     {
00709         if(weights && !weights->isEmpty())
00710         {
00711             PLASSERT( connection->classname() == "RBMMatrixConnection" );
00712             Mat old_weights;
00713             Vec old_activation;
00714             connection->getAllWeights(old_weights);
00715             old_activation = visible_layer->activation;
00716             int up = connection->up_size;
00717             int down = connection->down_size;
00718             PLASSERT( weights->width() == up * down  );
00719             visible_layer->setBatchSize( hidden.length() );
00720             for(int i=0; i<hidden.length(); i++)
00721             {
00722                 connection->setAllWeights(Mat(up,down,(*weights)(i)));
00723                 connection->setAsUpInput(hidden(i));
00724                 visible_layer->activation = visible_layer->activations(i);
00725                 visible_layer->getAllActivations(connection, 0, false);
00726             }
00727             connection->setAllWeights(old_weights);
00728             visible_layer->activation = old_activation;
00729         }
00730         else
00731         {
00732             connection->setAsUpInputs(hidden);
00733             visible_layer->getAllActivations(connection, 0, true);
00734         }
00735     }
00736 }
00737 
00739 // makeDeepCopyFromShallowCopy //
00741 void RBMModule::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00742 {
00743     inherited::makeDeepCopyFromShallowCopy(copies);
00744 
00745     deepCopyField(hidden_layer,     copies);
00746     deepCopyField(visible_layer,    copies);
00747     deepCopyField(connection,       copies);
00748     deepCopyField(reconstruction_connection, copies);
00749 
00750     deepCopyField(hidden_exp_grad, copies);
00751     deepCopyField(hidden_act_grad, copies);
00752     deepCopyField(store_weights_grad, copies);
00753     deepCopyField(store_hidden_bias_grad, copies);
00754     deepCopyField(visible_exp_grad, copies);
00755     deepCopyField(visible_act_grad, copies);
00756     deepCopyField(visible_bias_grad, copies);
00757     deepCopyField(hidden_exp_store, copies);
00758     deepCopyField(hidden_act_store, copies);
00759 
00760     deepCopyField(ports, copies);
00761     deepCopyField(energy_inputs, copies);
00762 
00763     deepCopyField(all_p_visible,            copies);
00764     deepCopyField(all_hidden_cond_prob,     copies);
00765     deepCopyField(all_visible_cond_prob,    copies);
00766     deepCopyField(p_ht_given_x,             copies);
00767     deepCopyField(p_xt_given_x,             copies);
00768 }
00769 
00771 // fprop //
00773 void RBMModule::fprop(const Vec& input, Vec& output) const
00774 {
00775     PLERROR("In RBMModule::fprop - Not implemented");
00776 }
00777 
00779 // computePartitionFunction //
00781 void RBMModule::computePartitionFunction()
00782 {
00783     int hidden_configurations = hidden_layer->getConfigurationCount();
00784     int visible_configurations = visible_layer->getConfigurationCount();
00785 
00786     PLASSERT_MSG(hidden_configurations != RBMLayer::INFINITE_CONFIGURATIONS ||
00787                  visible_configurations != RBMLayer::INFINITE_CONFIGURATIONS,
00788                  "To compute exact log-likelihood of an RBM maximum configurations of hidden "
00789                  "or visible layer must be less than 2^31.");
00790 
00791     // Compute partition function
00792     if (hidden_configurations > visible_configurations ||
00793         compare_true_gradient_with_cd)
00794         // do it by log-summing minus-free-energy of visible configurations
00795     {
00796         if (compare_true_gradient_with_cd) {
00797             all_p_visible.resize(visible_configurations);
00798             all_visible_cond_prob.resize(visible_configurations,
00799                                          hidden_configurations);
00800             all_hidden_cond_prob.resize(hidden_configurations,
00801                                         visible_configurations);
00802         }
00803         energy_inputs.resize(1, visible_layer->size);
00804         Vec input = energy_inputs(0);
00805         // COULD BE DONE MORE EFFICIENTLY BY DOING MANY CONFIGURATIONS
00806         // AT ONCE IN A 'MINIBATCH'
00807         Mat free_energy(1, 1);
00808         log_partition_function = 0;
00809         PP<ProgressBar> pb;
00810         if (verbosity >= 2)
00811             pb = new ProgressBar("Computing partition function",\
00812                                  visible_configurations);
00813         for (int c = 0; c < visible_configurations; c++)
00814         {
00815             visible_layer->getConfiguration(c, input);
00816             computeFreeEnergyOfVisible(energy_inputs, free_energy, false);
00817             real fe = free_energy(0,0);
00818             if (c==0)
00819                 log_partition_function = -fe;
00820             else
00821                 log_partition_function = logadd(log_partition_function, -fe);
00822             if (compare_true_gradient_with_cd) {
00823                 all_p_visible[c] = -fe;
00824                 // Compute P(visible | hidden) and P(hidden | visible) for all
00825                 // values of hidden.
00826                 computeAllHiddenProbabilities(input.toMat(1, input.length()),
00827                                               all_hidden_cond_prob.column(c));
00828                 Vec hidden(hidden_layer->size);
00829                 for (int d = 0; d < hidden_configurations; d++) {
00830                     hidden_layer->getConfiguration(d, hidden);
00831                     computeVisibleActivations(hidden.toMat(1, hidden.length()),
00832                                               false);
00833                     visible_layer->activation = visible_layer->activations(0);
00834                     real neg_log_p_v_given_h = visible_layer->fpropNLL(input);
00835                     all_visible_cond_prob(c, d) = exp(-neg_log_p_v_given_h);
00836                 }
00837             }
00838             if (pb)
00839                 pb->update(c + 1);
00840         }
00841         pb = NULL;
00842         hidden_activations_are_computed = false;
00843         if (compare_true_gradient_with_cd) {
00844             // Normalize probabilities.
00845             for (int i = 0; i < all_p_visible.length(); i++)
00846                 all_p_visible[i] =
00847                     exp(all_p_visible[i] - log_partition_function);
00848             //pout << "All P(x): " << all_p_visible << endl;
00849             //pout << "Sum_x P(x) = " << sum(all_p_visible) << endl;
00850             if (!is_equal(sum(all_p_visible), 1)) {
00851                 PLWARNING("The sum of all probability is not 1: %f",
00852                         sum(all_p_visible));
00853                 // Renormalize.
00854                 all_p_visible /= sum(all_p_visible);
00855             }
00856             PLCHECK( is_equal(sum(all_p_visible), 1) );
00857         }
00858     }
00859     else
00860         // do it by summing free-energy of hidden configurations
00861     {
00862         PLASSERT( !compare_true_gradient_with_cd );
00863         energy_inputs.resize(1, hidden_layer->size);
00864         Vec input = energy_inputs(0);
00865         // COULD BE DONE MORE EFFICIENTLY BY DOING MANY CONFIGURATIONS
00866         // AT ONCE IN A 'MINIBATCH'
00867         Mat free_energy(1, 1);
00868         log_partition_function = 0;
00869         for (int c = 0; c < hidden_configurations; c++)
00870         {
00871             hidden_layer->getConfiguration(c, input);
00872             //pout << "Input = " << input << endl;
00873             computeFreeEnergyOfHidden(energy_inputs, free_energy);
00874             //pout << "FE = " << free_energy(0, 0) << endl;
00875             real fe = free_energy(0,0);
00876             if (c==0)
00877                 log_partition_function = -fe;
00878             else
00879                 log_partition_function = logadd(log_partition_function, -fe);
00880         }
00881     }
00882     if (false)
00883         pout << "Log Z(" << name << ") = " << log_partition_function << endl;
00884 }
00885 
00887 // fprop //
00889 void RBMModule::fprop(const TVec<Mat*>& ports_value)
00890 {
00891 
00892     PLASSERT( ports_value.length() == nPorts() );
00893     PLASSERT( visible_layer );
00894     PLASSERT( hidden_layer );
00895     PLASSERT( connection );
00896 
00897     Mat* visible = ports_value[getPortIndex("visible")];
00898     bool visible_is_output = visible && visible->isEmpty();
00899     Mat* hidden = ports_value[getPortIndex("hidden.state")];
00900     // hidden_is_output is needed in BPROP, which is VERY BAD, VIOLATING OUR DESIGN ASSUMPTIONS
00901     hidden_is_output = hidden && hidden->isEmpty();
00902     hidden_act = ports_value[getPortIndex("hidden_activations.state")];
00903     bool hidden_act_is_output = hidden_act && hidden_act->isEmpty();
00904     Mat* visible_sample = ports_value[getPortIndex("visible_sample")];
00905     bool visible_sample_is_output = visible_sample && visible_sample->isEmpty();
00906     Mat* visible_expectation = ports_value[getPortIndex("visible_expectation")];
00907     bool visible_expectation_is_output = visible_expectation && visible_expectation->isEmpty();
00908     Mat* visible_activation = ports_value[getPortIndex("visible_activations.state")];
00909     bool visible_activation_is_output = visible_activation && visible_activation->isEmpty();
00910     Mat* hidden_sample = ports_value[getPortIndex("hidden_sample")];
00911     bool hidden_sample_is_output = hidden_sample && hidden_sample->isEmpty();
00912     Mat* energy = ports_value[getPortIndex("energy")];
00913     bool energy_is_output = energy && energy->isEmpty();
00914     Mat* neg_log_likelihood = ports_value[getPortIndex("neg_log_likelihood")];
00915     bool neg_log_likelihood_is_output = neg_log_likelihood && neg_log_likelihood->isEmpty();
00916     Mat* neg_log_phidden = ports_value[getPortIndex("neg_log_phidden")];
00917     bool neg_log_phidden_is_output = neg_log_phidden && neg_log_phidden->isEmpty();
00918     Mat* neg_log_pvisible_given_phidden = ports_value[getPortIndex("neg_log_pvisible_given_phidden")];
00919     bool neg_log_pvisible_given_phidden_is_output = neg_log_pvisible_given_phidden && neg_log_pvisible_given_phidden->isEmpty();
00920     Mat* median_reldiff_cd_nll = ports_value[getPortIndex("median_reldiff_cd_nll")];
00921     bool median_reldiff_cd_nll_is_output = median_reldiff_cd_nll && median_reldiff_cd_nll->isEmpty();
00922     Mat* mean_diff_cd_nll = ports_value[getPortIndex("mean_diff_cd_nll")];
00923     bool mean_diff_cd_nll_is_output = mean_diff_cd_nll && mean_diff_cd_nll->isEmpty();
00924     Mat* agreement_cd_nll = ports_value[getPortIndex("agreement_cd_nll")];
00925     bool agreement_cd_nll_is_output = agreement_cd_nll && agreement_cd_nll->isEmpty();
00926     Mat* agreement_stoch = ports_value[getPortIndex("agreement_stoch")];
00927     bool agreement_stoch_is_output = agreement_stoch && agreement_stoch->isEmpty();
00928     Mat* bound_cd_nll = ports_value[getPortIndex("bound_cd_nll")];
00929     bool bound_cd_nll_is_output = bound_cd_nll && bound_cd_nll->isEmpty();
00930     Mat* weights_stats = ports_value[getPortIndex("weights_stats")];
00931     bool weights_stats_is_output = weights_stats && weights_stats->isEmpty();
00932     Mat* ratio_cd_leftout = ports_value[getPortIndex("ratio_cd_leftout")];
00933     bool ratio_cd_leftout_is_output = ratio_cd_leftout && ratio_cd_leftout->isEmpty();
00934     Mat* abs_cd = ports_value[getPortIndex("abs_cd")];
00935     bool abs_cd_is_output = abs_cd && abs_cd->isEmpty();
00936     Mat* nll_grad = ports_value[getPortIndex("nll_grad")];
00937     bool nll_grad_is_output = nll_grad && nll_grad->isEmpty();
00938     hidden_bias = ports_value[getPortIndex("hidden_bias")];
00939     //bool hidden_bias_is_output = hidden_bias && hidden_bias->isEmpty();
00940     weights = ports_value[getPortIndex("weights")];
00941     //bool weights_is_output = weights && weights->isEmpty();
00942     Mat* visible_reconstruction = 0;
00943     Mat* visible_reconstruction_activations = 0;
00944     Mat* reconstruction_error = 0;
00945     if(reconstruction_connection)
00946     {
00947         visible_reconstruction =
00948             ports_value[getPortIndex("visible_reconstruction.state")];
00949         visible_reconstruction_activations =
00950             ports_value[getPortIndex("visible_reconstruction_activations.state")];
00951         reconstruction_error =
00952             ports_value[getPortIndex("reconstruction_error.state")];
00953     }
00954     bool visible_reconstruction_is_output = visible_reconstruction && visible_reconstruction->isEmpty();
00955     bool visible_reconstruction_activations_is_output = visible_reconstruction_activations && visible_reconstruction_activations->isEmpty();
00956     bool reconstruction_error_is_output = reconstruction_error && reconstruction_error->isEmpty();
00957     Mat* contrastive_divergence = 0;
00958     Mat* negative_phase_visible_samples = 0;
00959     Mat* negative_phase_hidden_expectations = 0;
00960     Mat* negative_phase_hidden_activations = NULL;
00961     if (compute_contrastive_divergence)
00962     {
00963         contrastive_divergence = ports_value[getPortIndex("contrastive_divergence")];
00964 /* YB: I don't agree with this error message: the behavior should be adapted to the provided ports.
00965       if (!contrastive_divergence || !contrastive_divergence->isEmpty())
00966             PLERROR("In RBMModule::fprop - When option "
00967                     "'compute_contrastive_divergence' is 'true', the "
00968                     "'contrastive_divergence' port should be provided, as an "
00969                     "output.");*/
00970         negative_phase_visible_samples =
00971             ports_value[getPortIndex("negative_phase_visible_samples.state")];
00972         negative_phase_hidden_expectations =
00973             ports_value[getPortIndex("negative_phase_hidden_expectations.state")];
00974         negative_phase_hidden_activations =
00975             ports_value[getPortIndex("negative_phase_hidden_activations.state")];
00976     }
00977     bool contrastive_divergence_is_output = contrastive_divergence && contrastive_divergence->isEmpty();
00978     //bool negative_phase_visible_samples_is_output = negative_phase_visible_samples && negative_phase_visible_samples->isEmpty();
00979     bool negative_phase_hidden_expectations_is_output = negative_phase_hidden_expectations && negative_phase_hidden_expectations->isEmpty();
00980     bool negative_phase_hidden_activations_is_output = negative_phase_hidden_activations && negative_phase_hidden_activations->isEmpty();
00981 
00982     bool hidden_expectations_are_computed = false;
00983     hidden_activations_are_computed = false;
00984     bool found_a_valid_configuration = false;
00985 
00986     if (visible && !visible_is_output)
00987     {
00988         // When an input is provided, that would restart the chain for
00989         // unconditional sampling, from that example.
00990         Gibbs_step = 0;
00991         visible_layer->samples.resize(visible->length(),visible->width());
00992         visible_layer->samples << *visible;
00993     }
00994 
00995     // COMPUTE ENERGY
00996     if (energy)
00997     {
00998         PLASSERT_MSG( energy_is_output,
00999                       "RBMModule: the energy port can only be an output port\n" );
01000         if (visible && !visible_is_output
01001             && hidden && !hidden_is_output)
01002         {
01003             computeEnergy(*visible, *hidden, *energy);
01004         }
01005         else if (visible && !visible_is_output)
01006         {
01007             computeFreeEnergyOfVisible(*visible,*energy);
01008         }
01009         else if (hidden && !hidden_is_output)
01010         {
01011             computeFreeEnergyOfHidden(*hidden,*energy);
01012         }
01013         else
01014         {
01015             PLERROR("RBMModule: unknown configuration to compute energy (currently\n"
01016                     "only possible if at least visible or hidden are provided).\n");
01017         }
01018         found_a_valid_configuration = true;
01019     }
01020 
01021 
01022     // COMPUTE UNSUPERVISED NLL
01023     if (neg_log_likelihood && neg_log_likelihood_is_output && compute_log_likelihood)
01024     {
01025         if (partition_function_is_stale && !during_training)
01026         {
01027             // Save layers' state
01028             Mat visible_activations = visible_layer->activations.copy();
01029             Mat visible_expectations = visible_layer->getExpectations().copy();
01030             Mat visible_samples = visible_layer->samples.copy();
01031 
01032             Mat hidden_activations = hidden_layer->activations.copy();
01033             Mat hidden_expectations = hidden_layer->getExpectations().copy();
01034             Mat hidden_samples = hidden_layer->samples.copy();
01035 
01036             computePartitionFunction();
01037 
01038             // Restore layers' state
01039             visible_layer->activations.resize(visible_activations.length(),
01040                                               visible_activations.width());
01041             visible_layer->activations << visible_activations;
01042 
01043             visible_layer->setExpectations(visible_expectations);
01044 
01045             visible_layer->samples.resize(visible_samples.length(),
01046                                           visible_samples.width());
01047             visible_layer->samples << visible_samples;
01048 
01049             hidden_layer->activations.resize(hidden_activations.length(),
01050                                               hidden_activations.width());
01051             hidden_layer->activations << hidden_activations;
01052 
01053             hidden_layer->setExpectations(hidden_expectations);
01054 
01055             hidden_layer->samples.resize(hidden_samples.length(),
01056                                           hidden_samples.width());
01057             hidden_layer->samples << hidden_samples;
01058 
01059             partition_function_is_stale=false;
01060         }
01061         if (visible && !visible_is_output
01062             && hidden && !hidden_is_output)
01063         {
01064             // neg-log-likelihood(visible,hidden) = energy(visible,hidden) + log(partition_function)
01065             computeEnergy(*visible,*hidden,*neg_log_likelihood);
01066             *neg_log_likelihood += log_partition_function;
01067         }
01068         else if (visible && !visible_is_output)
01069         {
01070             // neg-log-likelihood(visible) = free_energy(visible) + log(partition_function)
01071             computeFreeEnergyOfVisible(*visible,*neg_log_likelihood,hidden_act);
01072             *neg_log_likelihood += log_partition_function;
01073         }
01074         else if (hidden && !hidden_is_output)
01075         {
01076             // neg-log-likelihood(hidden) = free_energy(hidden) + log(partition_function)
01077             computeFreeEnergyOfHidden(*hidden,*neg_log_likelihood);
01078             *neg_log_likelihood += log_partition_function;
01079         }
01080         else PLERROR("RBMModule: neg_log_likelihood currently computable only of the visible as inputs");
01081         found_a_valid_configuration = true;
01082     }
01083 
01084 
01085     // REGULAR FPROP
01086     // we are given the visible units and we want to compute the hidden
01087     // activation and/or the hidden expectation
01088     if ( visible && !visible_is_output &&
01089          hidden && hidden_is_output )
01090     {
01091         computePositivePhaseHiddenActivations(*visible);
01092         PLCHECK_MSG( !hidden_layer->expectations_are_up_to_date, "Safety "
01093                      "check: how were expectations computed previously?" );
01094         hidden_layer->computeExpectations();
01095         hidden_expectations_are_computed=true;
01096         const Mat& hidden_out = hidden_layer->getExpectations();
01097         hidden->resize(hidden_out.length(), hidden_out.width());
01098         *hidden << hidden_out;
01099 
01100         // Since we return below, the other ports must be unused.
01101         //PLASSERT( !visible_sample && !hidden_sample );
01102         found_a_valid_configuration = true;
01103     }
01104 
01105     // DOWNWARD FPROP
01106     // we are given hidden  and we want to compute the visible or visible_activation
01107     if ( hidden && !hidden_is_output && visible && visible_is_output)
01108     {
01109         computeVisibleActivations(*hidden,true);
01110         if (visible_activation)
01111         {
01112             PLASSERT_MSG(visible_activation_is_output,"visible_activation should be an output");
01113             visible_activation->resize(visible_layer->activations.length(),
01114                                        visible_layer->size);
01115             *visible_activation << visible_layer->activations;
01116         }
01117         if (visible)
01118         {
01119             PLASSERT_MSG(visible_is_output,"visible should be an output");
01120             visible_layer->computeExpectations();
01121             const Mat expectations=visible_layer->getExpectations();
01122             visible->resize(expectations.length(),visible_layer->size);
01123             *visible << expectations;
01124         }
01125         if (hidden_act && hidden_act_is_output)
01126         {
01127             // THIS IS STUPID CODE TO HANDLE THE BAD state SYSTEM AND AVOID AN UNNECESSARY ERROR MESSAGE
01128             // (hidden_act is a "state" port that must always be produced, even if we don't compute it!)
01129             hidden_act->resize(hidden_layer->samples.length(),
01130                                hidden_layer->samples.width());
01131         }
01132         found_a_valid_configuration = true;
01133     }
01134 
01135     // COMPUTE AUTOASSOCIATOR RECONSTRUCTION ERROR
01136     if ( visible && !visible_is_output &&
01137          ( ( visible_reconstruction && visible_reconstruction_is_output ) ||
01138            ( visible_reconstruction_activations &&
01139              visible_reconstruction_activations_is_output ) ||
01140            ( reconstruction_error && reconstruction_error_is_output ) ) )
01141     {
01142         // Autoassociator reconstruction cost
01143         PLASSERT( ports_value.length() == nPorts() );
01144 
01145         Mat h;
01146         if (hidden && !hidden_is_output) {
01147             h = *hidden;
01148             PLASSERT(!stochastic_reconstruction);
01149         } else {
01150             if(!hidden_expectations_are_computed)
01151             {
01152                 computePositivePhaseHiddenActivations(*visible);
01153                 hidden_layer->computeExpectations();
01154                 hidden_expectations_are_computed=true;
01155             }
01156             if (stochastic_reconstruction) {
01157                 hidden_layer->generateSamples();
01158                 h = hidden_layer->samples;
01159             } else
01160                 h = hidden_layer->getExpectations();
01161         }
01162 
01163         // Don't need to verify if they are asked in a port, this was done previously
01164 
01165         computeVisibleActivations(h, true);
01166         if(visible_reconstruction_activations)
01167         {
01168             PLASSERT( visible_reconstruction_activations_is_output );
01169             const Mat& to_store = visible_layer->activations;
01170             visible_reconstruction_activations->resize(to_store.length(),
01171                                                        to_store.width());
01172             *visible_reconstruction_activations << to_store;
01173         }
01174         if (visible_reconstruction || reconstruction_error)
01175         {
01176             visible_layer->computeExpectations();
01177             if(visible_reconstruction)
01178             {
01179                 PLASSERT( visible_reconstruction_is_output );
01180                 const Mat& to_store = visible_layer->getExpectations();
01181                 visible_reconstruction->resize(to_store.length(),
01182                                                to_store.width());
01183                 *visible_reconstruction << to_store;
01184             }
01185             if(reconstruction_error)
01186             {
01187                 PLASSERT( reconstruction_error_is_output );
01188                 reconstruction_error->resize(visible->length(),1);
01189                 visible_layer->setBatchSize( visible->length() );
01190                 visible_layer->fpropNLL(*visible,
01191                                         *reconstruction_error);
01192             }
01193         }
01194         found_a_valid_configuration = true;
01195     }
01196     // COMPUTE VISIBLE GIVEN HIDDEN
01197     else if ( visible_reconstruction && visible_reconstruction_is_output
01198          && hidden && !hidden_is_output)
01199     {
01200         PLASSERT_MSG(!stochastic_reconstruction,
01201                      "Not yet implemented");
01202         // Don't need to verify if they are asked in a port, this was done previously
01203         computeVisibleActivations(*hidden,true);
01204         if(visible_reconstruction_activations)
01205         {
01206             PLASSERT( visible_reconstruction_activations_is_output );
01207             const Mat& to_store = visible_layer->activations;
01208             visible_reconstruction_activations->resize(to_store.length(),
01209                                                        to_store.width());
01210             *visible_reconstruction_activations << to_store;
01211         }
01212         visible_layer->computeExpectations();
01213         PLASSERT( visible_reconstruction_is_output );
01214         const Mat& to_store = visible_layer->getExpectations();
01215         visible_reconstruction->resize(to_store.length(),
01216                                        to_store.width());
01217         *visible_reconstruction << to_store;
01218         found_a_valid_configuration = true;
01219     }
01220 
01221     // Compute column matrix with one entry:
01222     //      -log P(x) = -log( sum_h P(x|h) P(h) )
01223     // for each row x of "visible", and where {P(h)}_h is provided
01224     // in "neg_log_phidden" for the set of h's in "hidden".
01225     //
01226     // neg_log_phidden is an optional column matrix with one element:
01227     //      -log P(h)
01228     // for each row h of "hidden", used as an input port,
01229     // with neg_log_pvisible_given_phidden as output.
01230     //
01231     // If neg_log_phidden is provided, it is assumed to be
01232     // 1/n_h (n_h=h->length()).
01233     if (neg_log_pvisible_given_phidden
01234         && neg_log_pvisible_given_phidden_is_output
01235         && hidden && !hidden_is_output
01236         && visible && !visible_is_output)
01237     {
01238         // estimate P(x) by sum_h P(x|h) P(h) where P(h) is either constant
01239         // or provided by neg_log_phidden
01240         if (neg_log_phidden)
01241         {
01242             PLASSERT_MSG(!neg_log_phidden_is_output,
01243                          "If neg_log_phidden is provided, it must be an input");
01244             PLASSERT_MSG(neg_log_phidden->length()==hidden->length(),
01245                         "If neg_log_phidden is provided, it must have the same"
01246                         " length as hidden.state");
01247             PLASSERT_MSG(neg_log_phidden->width()==1,
01248                          "neg_log_phidden must have width 1 (single column)");
01249         }
01250         computeNegLogPVisibleGivenPHidden(*visible,
01251                                           *hidden,
01252                                           neg_log_phidden,
01253                                           *neg_log_pvisible_given_phidden);
01254         found_a_valid_configuration = true;
01255     }
01256 
01257     // SAMPLING
01258     if ((visible_sample && visible_sample_is_output)
01259             // is asked to sample visible units (discrete)
01260         || (visible_expectation && visible_expectation_is_output)
01261             //              "                   (continous)
01262         || (hidden_sample && hidden_sample_is_output)
01263             // or to sample hidden units
01264         )
01265     {
01266         if (hidden_sample && !hidden_sample_is_output)
01267             // sample visible conditionally on hidden
01268         {
01269             sampleVisibleGivenHidden(*hidden_sample);
01270             Gibbs_step=0;
01271             //cout << "sampling visible from hidden" << endl;
01272         }
01273         else if (visible_sample && !visible_sample_is_output)
01274             // if an input is provided, sample hidden conditionally
01275         {
01276             sampleHiddenGivenVisible(*visible_sample);
01277             hidden_activations_are_computed = false;
01278             Gibbs_step = 0;
01279             //cout << "sampling hidden from visible" << endl;
01280         }
01281         else if (visible_expectation && !visible_expectation_is_output)
01282         {
01283              PLERROR("In RBMModule::fprop visible_expectation can only be an output port (use visible as input port");
01284         }
01285         else // sample unconditionally: Gibbs sample after k steps
01286         {
01287             // Find out how many samples we want.
01288             // TODO: check if this code is OK.
01289             int n_samples = -1;
01290             if (visible_sample_is_output)
01291             {
01292                 // Not exactly sure of where to pick the sizes from
01293                 visible_sample->resize(visible_layer->samples.length(),
01294                                        visible_layer->samples.width());
01295                 n_samples = visible_sample->length();
01296             }
01297             if (visible_expectation_is_output)
01298             {
01299                 // Not exactly sure of where to pick the sizes from
01300                 visible_expectation->resize(visible_layer->samples.length(),
01301                                             visible_layer->samples.width());
01302                 PLASSERT( n_samples == -1 ||
01303                           n_samples == visible_expectation->length() );
01304                 n_samples = visible_expectation->length();
01305             }
01306             if (hidden_sample_is_output)
01307             {
01308                 // Not exactly sure of where to pick the sizes from
01309                 hidden_sample->resize(hidden_layer->samples.length(),
01310                                       hidden_layer->samples.width());
01311 
01312                 PLASSERT( n_samples == -1 ||
01313                           n_samples == hidden_sample->length() );
01314                 n_samples = hidden_sample->length();
01315             }
01316             PLCHECK( n_samples > 0 );
01317 
01318             // the visible_layer->expectations contain the "state" from which we
01319             // start or continue the chain
01320             if (visible_layer->samples.isEmpty())
01321             {
01322                 // There are no samples already available to continue the
01323                 // chain: we restart it.
01324                 Gibbs_step = 0;
01325                 if (visible && !visible_is_output)
01326                     visible_layer->samples << *visible;
01327                 else if (!visible_layer->getExpectations().isEmpty())
01328                     visible_layer->samples << visible_layer->getExpectations();
01329                 else if (!hidden_layer->samples.isEmpty())
01330                     sampleVisibleGivenHidden(hidden_layer->samples);
01331                 else if (!hidden_layer->getExpectations().isEmpty())
01332                     sampleVisibleGivenHidden(hidden_layer->getExpectations());
01333                 else {
01334                     // There is no available data to initialize the chain: we
01335                     // initialize it with a zero vector.
01336                     Mat& zero_vector = visible_layer->samples;
01337                     PLASSERT( zero_vector.width() > 0 );
01338                     zero_vector.resize(1, zero_vector.width());
01339                     zero_vector.clear();
01340                 }
01341             }
01342             int min_n = max(Gibbs_step+n_Gibbs_steps_per_generated_sample,
01343                             min_n_Gibbs_steps);
01344             //cout << "Gibbs sampling " << Gibbs_step+1;
01345             PP<ProgressBar> pb =
01346                 verbosity >= 2 ? new ProgressBar("Gibbs sampling",
01347                                                  min_n - Gibbs_step)
01348                                : NULL;
01349             int start = Gibbs_step;
01350             for (;Gibbs_step<min_n;Gibbs_step++)
01351             {
01352                 sampleHiddenGivenVisible(visible_layer->samples);
01353                 sampleVisibleGivenHidden(hidden_layer->samples);
01354                 if (pb)
01355                     pb->update(Gibbs_step - start);
01356             }
01357             if (pb)
01358                 pb = NULL;
01359             hidden_activations_are_computed = false;
01360             //cout << " -> " << Gibbs_step << endl;
01361         }
01362 
01363         if ( hidden && hidden_is_output)
01364             // fill hidden.state with expectations
01365         {
01366               const Mat& hidden_expect = hidden_layer->getExpectations();
01367               hidden->resize(hidden_expect.length(), hidden_expect.width());
01368               *hidden << hidden_expect;
01369         }
01370         if (visible_sample && visible_sample_is_output)
01371             // provide sample of the visible units
01372         {
01373             visible_sample->resize(visible_layer->samples.length(),
01374                                    visible_layer->samples.width());
01375             PLASSERT( visible_sample->length() ==
01376                       visible_layer->samples.length() );
01377             *visible_sample << visible_layer->samples;
01378         }
01379         if (hidden_sample && hidden_sample_is_output)
01380             // provide sample of the hidden units
01381         {
01382             hidden_sample->resize(hidden_layer->samples.length(),
01383                                   hidden_layer->samples.width());
01384             PLASSERT( hidden_sample->length() ==
01385                       hidden_layer->samples.length() );
01386             *hidden_sample << hidden_layer->samples;
01387         }
01388         if (visible_expectation && visible_expectation_is_output)
01389             // provide expectation of the visible units
01390         {
01391             const Mat& to_store = visible_layer->getExpectations();
01392             visible_expectation->resize(to_store.length(),
01393                                         to_store.width());
01394             PLASSERT( visible_expectation->length() == to_store.length() );
01395             *visible_expectation << to_store;
01396         }
01397         if (hidden && hidden_is_output)
01398         {
01399             hidden->resize(hidden_layer->getExpectations().length(),
01400                            hidden_layer->getExpectations().width());
01401             PLASSERT( hidden->length() ==
01402                       hidden_layer->getExpectations().length() );
01403             *hidden << hidden_layer->getExpectations();
01404         }
01405         if (hidden_act && hidden_act_is_output)
01406         {
01407             hidden_act->resize(hidden_layer->activations.length(),
01408                                hidden_layer->activations.width());
01409             PLASSERT( hidden_act->length() ==
01410                       hidden_layer->activations.length() );
01411             *hidden_act << hidden_layer->activations;
01412         }
01413         found_a_valid_configuration = true;
01414     }// END SAMPLING
01415 
01416     // COMPUTE CONTRASTIVE DIVERGENCE CRITERION
01417     if (contrastive_divergence)
01418     {
01419         PLASSERT_MSG( contrastive_divergence_is_output,
01420                       "RBMModule: the contrastive_divergence port can only be an output port\n" );
01421         if (visible && !visible_is_output)
01422         {
01423             int mbs = visible->length();
01424             const Mat& hidden_expectations = hidden_layer->getExpectations();
01425             Mat* h=0;
01426             Mat* h_act=0;
01427             if (!hidden_activations_are_computed)
01428                 // it must be because neither hidden nor hidden_act were asked
01429             {
01430                 PLASSERT(!hidden_act);
01431                 computePositivePhaseHiddenActivations(*visible);
01432 
01433                 // we need to save the hidden activations somewhere
01434                 hidden_act_store.resize(mbs,hidden_layer->size);
01435                 hidden_act_store << hidden_layer->activations;
01436                 h_act = &hidden_act_store;
01437             }
01438             else
01439             {
01440                 // hidden_act must have been computed above if they were
01441                 // requested on port
01442                 PLASSERT(hidden_act && !hidden_act->isEmpty());
01443                 h_act = hidden_act;
01444             }
01445             if (!hidden_expectations_are_computed)
01446                 // it must be because hidden outputs were not asked
01447             {
01448                 PLASSERT(!hidden);
01449                 hidden_layer->computeExpectations();
01450                 hidden_expectations_are_computed=true;
01451                 // we need to save the hidden expectations somewhere
01452                 hidden_exp_store.resize(mbs,hidden_layer->size);
01453                 hidden_exp_store << hidden_expectations;
01454                 h = &hidden_exp_store;
01455             }
01456             else
01457             {
01458                 // hidden exp. must have been computed above if they were
01459                 // requested on port
01460                 PLASSERT(hidden && !hidden->isEmpty());
01461                 h = hidden;
01462             }
01463             // perform negative phase
01464             for( int i=0; i<n_Gibbs_steps_CD; i++)
01465             {
01466                 hidden_layer->generateSamples();
01467                 if (deterministic_reconstruction_in_cd)
01468                 {
01469                    // (Negative phase) compute visible expectations
01470                    computeVisibleActivations(hidden_layer->samples);
01471                    visible_layer->computeExpectations();
01472                    // compute corresponding hidden expectations.
01473                    computeHiddenActivations(visible_layer->getExpectations());
01474                 }
01475                 else
01476                 {
01477                    // (Negative phase) Generate visible samples.
01478                    sampleVisibleGivenHidden(hidden_layer->samples);
01479                    // compute corresponding hidden expectations.
01480                    computeHiddenActivations(visible_layer->samples);
01481                 }
01482                 hidden_activations_are_computed = false;
01483                 hidden_layer->computeExpectations();
01484             }
01485             PLASSERT(negative_phase_visible_samples);
01486             PLASSERT(negative_phase_hidden_expectations &&
01487                      negative_phase_hidden_expectations_is_output);
01488             PLASSERT(negative_phase_hidden_activations &&
01489                      negative_phase_hidden_activations_is_output);
01490             negative_phase_visible_samples->resize(mbs,visible_layer->size);
01491             if (deterministic_reconstruction_in_cd)
01492                *negative_phase_visible_samples <<
01493                    visible_layer->getExpectations();
01494             else
01495                *negative_phase_visible_samples << visible_layer->samples;
01496 
01497             negative_phase_hidden_expectations->resize(
01498                 hidden_expectations.length(),
01499                 hidden_expectations.width());
01500             *negative_phase_hidden_expectations << hidden_expectations;
01501             const Mat& neg_hidden_act = hidden_layer->activations;
01502             negative_phase_hidden_activations->resize(neg_hidden_act.length(),
01503                                                       neg_hidden_act.width());
01504             *negative_phase_hidden_activations << neg_hidden_act;
01505 
01506             contrastive_divergence->resize(hidden_expectations.length(),1);
01507             // compute contrastive divergence itself
01508             for (int i=0;i<mbs;i++)
01509             {
01510                 // + Free energy of positive example
01511                 // - free energy of negative example
01512                 (*contrastive_divergence)(i,0) =
01513                     visible_layer->energy((*visible)(i))
01514                   + hidden_layer->freeEnergyContribution((*h_act)(i))
01515                   - visible_layer->energy(visible_layer->samples(i))
01516                   - hidden_layer->freeEnergyContribution(hidden_layer->activations(i));
01517             }
01518         }
01519         else
01520             PLERROR("RBMModule: unknown configuration to compute contrastive_divergence (currently\n"
01521                     "only possible if only visible are provided in input).\n");
01522         found_a_valid_configuration = true;
01523     }
01524 
01525     if (compare_true_gradient_with_cd) {
01526         PLCHECK_MSG(!partition_function_is_stale,
01527                 "The partition function must be computed for the comparison "
01528                 "between true gradient and contrastive divergence to work.");
01529         PLCHECK_MSG(visible && !visible_is_output, "Visible must be as input");
01530         // Compute P(x_t|x) for all t and inputs x.
01531         int n_visible_conf = visible_layer->getConfigurationCount();
01532         int n_hidden_conf = hidden_layer->getConfigurationCount();
01533         p_xt_given_x.resize(n_visible_conf, visible->length());
01534         p_ht_given_x.resize(n_hidden_conf, visible->length());
01535         Vec input(visible_layer->size);
01536         Mat input_mat = input.toMat(1, input.length());
01537         Mat grad_nll(hidden_layer->size, visible_layer->size);
01538         Mat grad_cd(hidden_layer->size, visible_layer->size);
01539         Mat grad_stoch_cd(hidden_layer->size, visible_layer->size);
01540         Mat grad_first_term(hidden_layer->size, visible_layer->size);
01541         grad_nll.fill(0);
01542         if (median_reldiff_cd_nll_is_output)
01543             median_reldiff_cd_nll->resize(visible->length(), n_steps_compare);
01544         if (mean_diff_cd_nll_is_output)
01545             mean_diff_cd_nll->resize(visible->length(), n_steps_compare);
01546         if (agreement_cd_nll_is_output)
01547             agreement_cd_nll->resize(visible->length(), 2 * n_steps_compare);
01548         if (agreement_stoch_is_output)
01549             agreement_stoch->resize(visible->length(), n_steps_compare);
01550         real bound_coeff = MISSING_VALUE;
01551         if (bound_cd_nll_is_output || weights_stats_is_output) {
01552             if (bound_cd_nll_is_output)
01553                 bound_cd_nll->resize(visible->length(), n_steps_compare);
01554             if (weights_stats_is_output)
01555                 weights_stats->resize(visible->length(), 4);
01556             if (ratio_cd_leftout_is_output)
01557                 ratio_cd_leftout->resize(visible->length(), n_steps_compare);
01558             if (abs_cd_is_output)
01559                 abs_cd->resize(visible->length(), 2 * n_steps_compare);
01560             if (nll_grad_is_output)
01561                 nll_grad->resize(visible->length(),
01562                         visible_layer->size * hidden_layer->size);
01563             // Compute main bound coefficient:
01564             // (1 - N_x N_h sigm(-alpha)^d_x sigm(-beta)^d_h).
01565             PP<RBMMatrixConnection> matrix_conn =
01566                 (RBMMatrixConnection*) get_pointer(connection);
01567             PLCHECK(matrix_conn);
01568             Vec all_abs_weights_and_biases;
01569             // Compute alpha.
01570             real alpha = 0;
01571             for (int j = 0; j < hidden_layer->size; j++) {
01572                 real alpha_j = abs(hidden_layer->bias[j]);
01573                 all_abs_weights_and_biases.append(alpha_j);
01574                 for (int i = 0; i < visible_layer->size; i++) {
01575                     real abs_w_ij = abs(matrix_conn->weights(j, i));
01576                     alpha_j += abs_w_ij;
01577                     all_abs_weights_and_biases.append(abs_w_ij);
01578                 }
01579                 if (alpha_j > alpha)
01580                     alpha = alpha_j;
01581             }
01582             // Compute beta.
01583             real beta = 0;
01584             for (int i = 0; i < visible_layer->size; i++) {
01585                 real beta_i = abs(visible_layer->bias[i]);
01586                 all_abs_weights_and_biases.append(beta_i);
01587                 for (int j = 0; j < hidden_layer->size; j++)
01588                     beta_i += abs(matrix_conn->weights(j, i));
01589                 if (beta_i > beta)
01590                     beta = beta_i;
01591             }
01592             bound_coeff = 1 -
01593                 (visible_layer->getConfigurationCount() *
01594                     ipow(sigmoid(-alpha), visible_layer->size)) *
01595                 (hidden_layer->getConfigurationCount() *
01596                     ipow(sigmoid(-beta), hidden_layer->size));
01597             //pout << "bound_coeff = " << bound_coeff << endl;
01598             if (weights_stats_is_output) {
01599                 real med_weight = median(all_abs_weights_and_biases);
01600                 real mean_weight = mean(all_abs_weights_and_biases);
01601                 for (int i = 0; i < visible->length(); i++) {
01602                     (*weights_stats)(i, 0) = med_weight;
01603                     (*weights_stats)(i, 1) = mean_weight;
01604                     (*weights_stats)(i, 2) = alpha;
01605                     (*weights_stats)(i, 3) = beta;
01606                 }
01607             }
01608         }
01609         for (int i = 0; i < visible->length(); i++) {
01610             // Compute dF(visible)/dWij.
01611             PLASSERT_MSG( visible->length() == 1, "The comparison can "
01612                     "currently be made only with one input example at a "
01613                     "time" );
01614             computeHiddenActivations(*visible);
01615             hidden_layer->computeExpectations();
01616             transposeProduct(grad_first_term,
01617                     hidden_layer->getExpectations(),
01618                     *visible);
01619             // First compute P(h|x) for inputs x.
01620             computeAllHiddenProbabilities(*visible, p_ht_given_x);
01621             for (int t = 0; t < n_steps_compare; t++) {
01622                 // Compute P(x_t|x).
01623                 product(p_xt_given_x, all_visible_cond_prob, p_ht_given_x);
01624                 /*
01625                 pout << "P(x_" << (t + 1) << "|x) = " << endl << p_xt_given_x
01626                      << endl;
01627                      */
01628                 Vec colsum(p_xt_given_x.width());
01629                 columnSum(p_xt_given_x, colsum);
01630                 for (int j = 0; j < colsum.length(); j++) {
01631                     PLCHECK( is_equal(colsum[j], 1) );
01632                 }
01633                 //pout << "Sum = " << endl << colsum << endl;
01634                 int best_idx = argmax(p_xt_given_x.column(0).toVecCopy());
01635                 Vec tmp(visible_layer->size);
01636                 visible_layer->getConfiguration(best_idx, tmp);
01637                 /*
01638                 pout << "Best (P = " << p_xt_given_x.column(0)(best_idx, 0) <<
01639                     ") for x = " << (*visible)(0) << ":" <<
01640                     endl << tmp << endl;
01641                 */
01642                 int stoch_idx = -1;
01643                 if (abs_cd_is_output) {
01644                     grad_stoch_cd.fill(0);
01645                     // Pick a random X_t drawn from X_t | x.
01646                     stoch_idx = random_gen->multinomial_sample(
01647                             p_xt_given_x.toVecCopy());
01648                 }
01649                 // Compute E_{X_t}[dF(X_t)/dWij | x].
01650                 grad_cd.fill(0);
01651                 for (int k = 0; k < n_visible_conf; k++) {
01652                     visible_layer->getConfiguration(k, input);
01653                     computeHiddenActivations(input_mat);
01654                     hidden_layer->computeExpectations();
01655                     transposeProductScaleAcc(grad_cd,
01656                                              hidden_layer->getExpectations(),
01657                                              input_mat,
01658                                              -p_xt_given_x(k, 0),
01659                                              real(1));
01660                     if (t == 0) {
01661                         // Also compute the gradient for the NLL.
01662                         transposeProductScaleAcc(
01663                                 grad_nll,
01664                                 hidden_layer->getExpectations(),
01665                                 input_mat,
01666                                 -all_p_visible[k],
01667                                 real(1));
01668                     }
01669                     if (k == stoch_idx) {
01670                         transposeProduct(grad_stoch_cd,
01671                                 hidden_layer->getExpectations(),
01672                                 input_mat);
01673                         negateElements(grad_stoch_cd);
01674                     }
01675                 }
01676                 // Compute difference between CD and NLL updates.
01677                 Mat diff = grad_nll.copy();
01678                 diff -= grad_cd;
01679                 grad_cd += grad_first_term;
01680                 if (abs_cd_is_output) {
01681                     grad_stoch_cd += grad_first_term;
01682                 }
01683                 //pout << "Grad_CD_" << t+1 << "=" << endl << grad_cd << endl;
01684                 //pout << "Diff =" << endl << diff << endl;
01685                 // Compute average relative difference.
01686                 Vec all_relative_diffs;
01687                 Vec all_abs_diffs;
01688                 Vec all_ratios;
01689                 for (int p = 0; p < diff.length(); p++)
01690                     for (int q = 0; q < diff.width(); q++) {
01691                         all_abs_diffs.append(abs(diff(p, q)));
01692                         if (!fast_exact_is_equal(grad_nll(p, q), 0))
01693                             all_relative_diffs.append(abs(diff(p, q) / grad_nll(p, q)));
01694                         if (!fast_exact_is_equal(diff(p, q), 0))
01695                             all_ratios.append(abs(grad_cd(p, q) / diff(p, q)));
01696                     }
01697                 //pout << "All relative diffs: " << all_relative_diffs << endl;
01698                 (*median_reldiff_cd_nll)(i, t) = median(all_relative_diffs);
01699                 (*mean_diff_cd_nll)(i, t) = mean(all_abs_diffs);
01700                 // Compute the fraction of parameters for which both updates
01701                 // agree.
01702                 int agree = 0;
01703                 int agree2 = 0;
01704                 int agree_stoch = 0;
01705                 real mean_abs_updates = 0;
01706                 real mean_abs_stoch_updates = 0;
01707                 for (int p = 0; p < grad_cd.length(); p++)
01708                     for (int q = 0; q < grad_cd.width(); q++) {
01709                         if (grad_cd(p, q) *
01710                                 (grad_first_term(p, q) + grad_nll(p, q)) >= 0)
01711                         {
01712                             agree++;
01713                         }
01714                         if (grad_cd(p, q) * diff(p, q) >= 0)
01715                             agree2++;
01716                         if (abs_cd_is_output) {
01717                             mean_abs_updates += abs(grad_cd(p, q));
01718                             mean_abs_stoch_updates += abs(grad_stoch_cd(p, q));
01719                         }
01720                         if (agreement_stoch_is_output &&
01721                                 grad_stoch_cd(p, q) *
01722                                 (grad_first_term(p, q) + grad_nll(p, q)) >= 0)
01723                         {
01724                             agree_stoch++;
01725                         }
01726                     }
01727                 mean_abs_updates /= real(grad_cd.size());
01728                 mean_abs_stoch_updates /= real(grad_cd.size());
01729                 if (agreement_cd_nll_is_output) {
01730                     (*agreement_cd_nll)(i, t) = agree / real(grad_cd.size());
01731                     (*agreement_cd_nll)(i, t + n_steps_compare) =
01732                         agree2 / real(grad_cd.size());
01733                 }
01734                 if (agreement_stoch_is_output)
01735                     (*agreement_stoch)(i, t) = agree_stoch / real(grad_cd.size());
01736                 if (bound_cd_nll_is_output)
01737                     (*bound_cd_nll)(i, t) =
01738                         visible_layer->getConfigurationCount() *
01739                         ipow(bound_coeff, t + 1);
01740                 if (ratio_cd_leftout_is_output) {
01741                     if (all_ratios.isEmpty())
01742                         (*ratio_cd_leftout)(i, t) = MISSING_VALUE;
01743                     else
01744                         (*ratio_cd_leftout)(i, t) = median(all_ratios);
01745                 }
01746                 if (abs_cd_is_output) {
01747                     (*abs_cd)(i, t) = mean_abs_updates;
01748                     (*abs_cd)(i, t + n_steps_compare) = mean_abs_stoch_updates;
01749                 }
01750                 /*
01751                 pout << "Median relative difference: "
01752                     << median(all_relative_diffs) << endl;
01753                 pout << "Mean relative difference: "
01754                     << mean(all_relative_diffs) << endl;
01755                     */
01756                 // If it is not the last step, update P(h_t|x).
01757                 if (t < n_steps_compare - 1)
01758                     product(p_ht_given_x, all_hidden_cond_prob, p_xt_given_x);
01759             }
01760             //pout << "P(x)=" << endl << all_p_visible << endl;
01761             grad_nll += grad_first_term;
01762             if (nll_grad_is_output) {
01763                 //real mean_nll_grad = 0;
01764                 int idx = 0;
01765                 for (int p = 0; p < grad_nll.length(); p++)
01766                     for (int q = 0; q < grad_nll.width(); q++, idx++)
01767                         (*nll_grad)(i, idx) = grad_nll(p, q);
01768                         //mean_nll_grad += abs(grad_nll(p, q));
01769                 //mean_nll_grad /= real(grad_nll.size());
01770                 //(*nll_grad)(i, 0) = mean_nll_grad;
01771             }
01772             //pout << "Grad_NLL=" << endl << grad_nll << endl;
01773             //pout << "Grad first term=" << endl << grad_first_term << endl;
01774         }
01775     }
01776 
01777     // Fill ports that are skipped during training with missing values.
01778     if (median_reldiff_cd_nll_is_output && median_reldiff_cd_nll->isEmpty()) {
01779         PLASSERT( during_training );
01780         median_reldiff_cd_nll->resize(visible->length(), n_steps_compare);
01781         median_reldiff_cd_nll->fill(MISSING_VALUE);
01782     }
01783     if (mean_diff_cd_nll_is_output && mean_diff_cd_nll->isEmpty()) {
01784         PLASSERT( during_training );
01785         mean_diff_cd_nll->resize(visible->length(), n_steps_compare);
01786         mean_diff_cd_nll->fill(MISSING_VALUE);
01787     }
01788     if (agreement_cd_nll_is_output && agreement_cd_nll->isEmpty()) {
01789         PLASSERT( during_training );
01790         agreement_cd_nll->resize(visible->length(), 2 * n_steps_compare);
01791         agreement_cd_nll->fill(MISSING_VALUE);
01792     }
01793     if (agreement_stoch_is_output && agreement_stoch->isEmpty()) {
01794         PLASSERT( during_training );
01795         agreement_stoch->resize(visible->length(), n_steps_compare);
01796         agreement_stoch->fill(MISSING_VALUE);
01797     }
01798     if (bound_cd_nll_is_output && bound_cd_nll->isEmpty()) {
01799         PLASSERT( during_training );
01800         bound_cd_nll->resize(visible->length(), n_steps_compare);
01801         bound_cd_nll->fill(MISSING_VALUE);
01802     }
01803     if (weights_stats_is_output && weights_stats->isEmpty()) {
01804         PLASSERT( during_training );
01805         weights_stats->resize(visible->length(), 4);
01806         weights_stats->fill(MISSING_VALUE);
01807     }
01808     if (ratio_cd_leftout_is_output && ratio_cd_leftout->isEmpty()) {
01809         PLASSERT( during_training );
01810         ratio_cd_leftout->resize(visible->length(), n_steps_compare);
01811         ratio_cd_leftout->fill(MISSING_VALUE);
01812     }
01813     if (abs_cd_is_output && abs_cd->isEmpty()) {
01814         PLASSERT( during_training );
01815         abs_cd->resize(visible->length(), 2 * n_steps_compare);
01816         abs_cd->fill(MISSING_VALUE);
01817     }
01818     if (nll_grad_is_output && nll_grad->isEmpty()) {
01819         PLASSERT( during_training );
01820         nll_grad->resize(visible->length(),
01821                          visible_layer->size * hidden_layer->size);
01822         nll_grad->fill(MISSING_VALUE);
01823     }
01824 
01825     // UGLY HACK TO DEAL WITH THE PROBLEM THAT XXX.state MAY NOT BE NEEDED
01826     // BUT IS ALWAYS EXPECTED BECAUSE IT IS A STATE (!@#$%!!!)
01827     if (hidden_act && hidden_act->isEmpty())
01828         hidden_act->resize(1,1);
01829     if (visible_activation && visible_activation->isEmpty())
01830         visible_activation->resize(1,1);
01831     if (hidden && hidden->isEmpty())
01832         hidden->resize(1,1);
01833     if (visible_reconstruction && visible_reconstruction->isEmpty())
01834         visible_reconstruction->resize(1,1);
01835     if (visible_reconstruction_activations && visible_reconstruction_activations->isEmpty())
01836         visible_reconstruction_activations->resize(1,1);
01837     if (reconstruction_error && reconstruction_error->isEmpty())
01838         reconstruction_error->resize(1,1);
01839     if (negative_phase_visible_samples && negative_phase_visible_samples->isEmpty())
01840         negative_phase_visible_samples->resize(1,1);
01841     if (negative_phase_hidden_expectations && negative_phase_hidden_expectations->isEmpty())
01842         negative_phase_hidden_expectations->resize(1,1);
01843     if (negative_phase_hidden_activations && negative_phase_hidden_activations->isEmpty())
01844         negative_phase_hidden_activations->resize(1,1);
01845 
01846     // Reset some class fields to ensure they are not reused by mistake.
01847     hidden_act = NULL;
01848     hidden_bias = NULL;
01849     weights = NULL;
01850     hidden_activations_are_computed = false;
01851 
01852 
01853     if (!found_a_valid_configuration)
01854     {
01855         PLERROR("In RBMModule::fprop - Unknown port configuration for module %s", name.c_str());
01856     }
01857 
01858     checkProp(ports_value);
01859 
01860 }
01861 
01862 void RBMModule::computeNegLogPVisibleGivenPHidden(Mat visible, Mat hidden, Mat* neg_log_phidden, Mat& neg_log_pvisible_given_phidden)
01863 {
01864     computeVisibleActivations(hidden,true);
01865     int n_h = hidden.length();
01866     int T = visible.length();
01867     real default_neg_log_ph = safelog(real(n_h)); // default P(h)=1/Nh: -log(1/Nh) = log(Nh)
01868     Vec old_act = visible_layer->activation;
01869     neg_log_pvisible_given_phidden.resize(T,1);
01870     for (int t=0;t<T;t++)
01871     {
01872         Vec x_t = visible(t);
01873         real log_p_xt=0;
01874         for (int i=0;i<n_h;i++)
01875         {
01876             visible_layer->activation = visible_layer->activations(i);
01877             real neg_log_p_xt_given_hi = visible_layer->fpropNLL(x_t);
01878             real neg_log_p_hi = neg_log_phidden?(*neg_log_phidden)(i,0):default_neg_log_ph;
01879             if (i==0)
01880                 log_p_xt = -(neg_log_p_xt_given_hi + neg_log_p_hi);
01881             else
01882                 log_p_xt = logadd(log_p_xt, -(neg_log_p_xt_given_hi + neg_log_p_hi));
01883         }
01884         neg_log_pvisible_given_phidden(t,0) = -log_p_xt;
01885     }
01886     visible_layer->activation = old_act;
01887 }
01888 
01890 // bpropAccUpdate //
01892 void RBMModule::bpropAccUpdate(const TVec<Mat*>& ports_value,
01893                                const TVec<Mat*>& ports_gradient)
01894 {
01895     PLASSERT( ports_value.length() == nPorts() );
01896     PLASSERT( ports_gradient.length() == nPorts() );
01897     Mat* visible = ports_value[getPortIndex("visible")];
01898     Mat* visible_grad = ports_gradient[getPortIndex("visible")];
01899     Mat* hidden_grad = ports_gradient[getPortIndex("hidden.state")];
01900     Mat* hidden_activations_grad =
01901         ports_gradient[getPortIndex("hidden_activations.state")];
01902     Mat* hidden = ports_value[getPortIndex("hidden.state")];
01903     hidden_act = ports_value[getPortIndex("hidden_activations.state")];
01904     Mat* visible_activations = ports_value[getPortIndex("visible_activations.state")];
01905     Mat* reconstruction_error_grad = 0;
01906     Mat* hidden_bias_grad = ports_gradient[getPortIndex("hidden_bias")];
01907     weights = ports_value[getPortIndex("weights")];
01908     Mat* weights_grad = ports_gradient[getPortIndex("weights")];
01909     hidden_bias = ports_value[getPortIndex("hidden_bias")];
01910     Mat* energy_grad = ports_gradient[getPortIndex("energy")];
01911     Mat* contrastive_divergence_grad = NULL;
01912     Mat* contrastive_divergence = NULL;
01913     if (compute_contrastive_divergence)
01914         contrastive_divergence = ports_value[getPortIndex("contrastive_divergence")];
01915     bool computed_contrastive_divergence = compute_contrastive_divergence &&
01916         contrastive_divergence && !contrastive_divergence->isEmpty();
01917 
01918     // Ensure the gradient w.r.t. contrastive divergence is 1 (if provided).
01919     if (computed_contrastive_divergence) {
01920         contrastive_divergence_grad =
01921             ports_gradient[getPortIndex("contrastive_divergence")];
01922         if (contrastive_divergence_grad) {
01923             PLASSERT( !contrastive_divergence_grad->isEmpty() );
01924             PLASSERT( min(*contrastive_divergence_grad) >= 1 );
01925             PLASSERT( max(*contrastive_divergence_grad) <= 1 );
01926         }
01927     }
01928 
01929     if(reconstruction_connection)
01930         reconstruction_error_grad =
01931             ports_gradient[getPortIndex("reconstruction_error.state")];
01932 
01933     // Ensure the visible gradient is not provided as input. This is because we
01934     // accumulate more than once in 'visible_grad'.
01935 //    PLASSERT_MSG( !visible_grad || visible_grad->isEmpty(), "If visible gradient is desired "
01936 //                  " the corresponding matrix should have 0 length" );
01937 
01938     bool compute_visible_grad = visible_grad && visible_grad->isEmpty();
01939     bool compute_hidden_grad = hidden_grad && hidden_grad->isEmpty();
01940     bool compute_weights_grad = weights_grad && weights_grad->isEmpty();
01941     bool provided_hidden_grad = hidden_grad && !hidden_grad->isEmpty();
01942     bool provided_hidden_act_grad = hidden_activations_grad &&
01943                                     !hidden_activations_grad->isEmpty();
01944 
01945     int mbs = (visible && !visible->isEmpty()) ? visible->length() : -1;
01946 
01947     // BPROP of UPWARD FPROP
01948     if (provided_hidden_grad || provided_hidden_act_grad)
01949     {
01950         // Note: the assert below is for behavior compatibility with previous
01951         // code. It might not be necessary, or might need to be modified.
01952         PLASSERT( visible && !visible->isEmpty() );
01953 
01954         // Note: we need to perform the following steps even if the gradient
01955         // learning rate is equal to 0. This is because we must propagate the
01956         // gradient to the visible layer, even though no update is required.
01957         if (tied_connection_weights)
01958            setLearningRatesOnlyForLayers(grad_learning_rate);
01959         else
01960            setAllLearningRates(grad_learning_rate);
01961 
01962         PLASSERT_MSG( hidden && hidden_act ,
01963                       "To compute gradients in bprop, the "
01964                       "hidden_activations.state port must have been filled "
01965                       "during fprop" );
01966 
01967         // Compute gradient w.r.t. activations of the hidden layer.
01968         if (provided_hidden_grad)
01969             hidden_layer->bpropUpdate(
01970                     *hidden_act, *hidden, hidden_act_grad, *hidden_grad,
01971                     false);
01972         if (provided_hidden_act_grad) {
01973             if (!provided_hidden_grad) {
01974                 // 'hidden_act_grad' will not have been resized nor filled yet,
01975                 // so we need to do it now.
01976                 hidden_act_grad.resize(hidden_activations_grad->length(),
01977                                        hidden_activations_grad->width());
01978                 hidden_act_grad.clear();
01979             }
01980             hidden_act_grad += *hidden_activations_grad;
01981         }
01982 
01983         if (hidden_bias_grad)
01984         {
01985             PLASSERT( hidden_bias_grad->isEmpty() &&
01986                       hidden_bias_grad->width() == hidden_layer->size );
01987             hidden_bias_grad->resize(mbs,hidden_layer->size);
01988             *hidden_bias_grad += hidden_act_grad;
01989         }
01990         // Compute gradient w.r.t. expectations of the visible layer (=
01991         // inputs).
01992         Mat* store_visible_grad = NULL;
01993         if (compute_visible_grad) {
01994             PLASSERT( visible_grad->width() == visible_layer->size );
01995             store_visible_grad = visible_grad;
01996         } else {
01997             // We do not actually need to store the gradient, but since it
01998             // is required in bpropUpdate, we provide a dummy matrix to
01999             // store it.
02000             store_visible_grad = &visible_exp_grad;
02001         }
02002         store_visible_grad->resize(mbs,visible_layer->size);
02003 
02004         if (weights)
02005         {
02006             int up = connection->up_size;
02007             int down = connection->down_size;
02008             PLASSERT( !weights->isEmpty() &&
02009                       weights_grad && weights_grad->isEmpty() &&
02010                       weights_grad->width() == up * down );
02011             weights_grad->resize(mbs, up * down);
02012             Mat w, wg;
02013             Vec v,h,vg,hg;
02014             for(int i=0; i<mbs; i++)
02015             {
02016                 w = Mat(up, down,(*weights)(i));
02017                 wg = Mat(up, down,(*weights_grad)(i));
02018                 v = (*visible)(i);
02019                 h = (*hidden_act)(i);
02020                 vg = (*store_visible_grad)(i);
02021                 hg = hidden_act_grad(i);
02022                 connection->petiteCulotteOlivierUpdate(
02023                     v,
02024                     w,
02025                     h,
02026                     vg,
02027                     wg,
02028                     hg,true);
02029             }
02030         }
02031         else
02032         {
02033             connection->bpropUpdate(
02034                 *visible, *hidden_act, *store_visible_grad,
02035                 hidden_act_grad, true);
02036         }
02037         partition_function_is_stale = true;
02038     }
02039 
02040     // BPROP of DOWNWARD FPROP
02041     if (compute_hidden_grad && visible_grad && !compute_visible_grad)
02042     {
02043         PLASSERT(visible && !visible->isEmpty());
02044         PLASSERT(visible_activations && !visible_activations->isEmpty());
02045         PLASSERT(hidden && !hidden->isEmpty());
02046         setAllLearningRates(grad_learning_rate);
02047         visible_layer->bpropUpdate(*visible_activations,
02048                                    *visible, visible_act_grad, *visible_grad,
02049                                    false);
02050 
02051 //        PLASSERT_MSG(!visible_bias_grad,"back-prop into visible bias  not implemented for downward fprop");
02052 //        PLASSERT_MSG(!weights_grad,"back-prop into weights  not implemented for downward fprop");
02053 //        hidden_grad->resize(mbs,hidden_layer->size);
02054         TVec<Mat*> ports_value(2);
02055         TVec<Mat*> ports_gradient(2);
02056         ports_value[0] = visible_activations;
02057         ports_value[1] = hidden;
02058         ports_gradient[0] = &visible_act_grad;
02059         ports_gradient[1] = hidden_grad;
02060         connection->bpropAccUpdate(ports_value,ports_gradient);
02061     }
02062 
02063     if (cd_learning_rate > 0 && minimize_log_likelihood) {
02064         PLASSERT( visible && !visible->isEmpty() );
02065         PLASSERT( hidden && !hidden->isEmpty() );
02066         if (tied_connection_weights)
02067            setLearningRatesOnlyForLayers(cd_learning_rate);
02068         else
02069            setAllLearningRates(cd_learning_rate);
02070 
02071         // positive phase
02072         visible_layer->accumulatePosStats(*visible);
02073         hidden_layer->accumulatePosStats(*hidden);
02074         connection->accumulatePosStats(*visible,*hidden);
02075 
02076         // negative phase
02077         PLCHECK_MSG(hidden_layer->size<32 || visible_layer->size<32,
02078                      "To minimize exact log-likelihood of an RBM, hidden_layer->size "
02079                      "or visible_layer->size must be <32");
02080         // gradient of partition function
02081         if (hidden_layer->size > visible_layer->size)
02082             // do it by summing over visible configurations
02083         {
02084             PLASSERT(visible_layer->classname()=="RBMBinomialLayer");
02085             // assuming a binary input we sum over all bit configurations
02086             int n_configurations = 1 << visible_layer->size; // = 2^{visible_layer->size}
02087             energy_inputs.resize(1, visible_layer->size);
02088             Vec input = energy_inputs(0);
02089             // COULD BE DONE MORE EFFICIENTLY BY DOING MANY CONFIGURATIONS
02090             // AT ONCE IN A 'MINIBATCH'
02091             for (int c=0;c<n_configurations;c++)
02092             {
02093                 // convert integer c into a bit-wise visible representation
02094                 int x=c;
02095                 for (int i=0;i<visible_layer->size;i++)
02096                 {
02097                     input[i]= x & 1; // take least significant bit
02098                     x >>= 1; // and shift right (divide by 2)
02099                 }
02100                 connection->setAsDownInput(input);
02101                 hidden_layer->getAllActivations(connection,0,false);
02102                 hidden_layer->computeExpectation();
02103                 visible_layer->accumulateNegStats(input);
02104                 hidden_layer->accumulateNegStats(hidden_layer->expectation);
02105                 connection->accumulateNegStats(input,hidden_layer->expectation);
02106             }
02107         }
02108         else
02109         {
02110             PLASSERT(hidden_layer->classname()=="RBMBinomialLayer");
02111             // assuming a binary hidden we sum over all bit configurations
02112             int n_configurations = 1 << hidden_layer->size; // = 2^{hidden_layer->size}
02113             energy_inputs.resize(1, hidden_layer->size);
02114             Vec h = energy_inputs(0);
02115             for (int c=0;c<n_configurations;c++)
02116             {
02117                 // convert integer c into a bit-wise hidden representation
02118                 int x=c;
02119                 for (int i=0;i<hidden_layer->size;i++)
02120                 {
02121                     h[i]= x & 1; // take least significant bit
02122                     x >>= 1; // and shift right (divide by 2)
02123                 }
02124                 connection->setAsUpInput(h);
02125                 visible_layer->getAllActivations(connection,0,false);
02126                 visible_layer->computeExpectation();
02127                 visible_layer->accumulateNegStats(visible_layer->expectation);
02128                 hidden_layer->accumulateNegStats(h);
02129                 connection->accumulateNegStats(visible_layer->expectation,h);
02130             }
02131         }
02132         // update
02133         visible_layer->update();
02134         hidden_layer->update();
02135         connection->update();
02136     }
02137     if (cd_learning_rate > 0 && !minimize_log_likelihood) {
02138         EXTREME_MODULE_LOG << "Performing contrastive divergence step in RBM '"
02139                            << name << "'" << endl;
02140         // Perform a step of contrastive divergence.
02141         PLASSERT( visible && !visible->isEmpty() );
02142         if (tied_connection_weights)
02143            setLearningRatesOnlyForLayers(cd_learning_rate);
02144         else
02145            setAllLearningRates(cd_learning_rate);
02146         Mat* negative_phase_visible_samples =
02147             computed_contrastive_divergence?ports_value[getPortIndex("negative_phase_visible_samples.state")]:0;
02148         const Mat* negative_phase_hidden_expectations =
02149             computed_contrastive_divergence ?
02150                 ports_value[getPortIndex("negative_phase_hidden_expectations.state")]
02151                 : NULL;
02152         Mat* negative_phase_hidden_activations =
02153             computed_contrastive_divergence ?
02154                 ports_value[getPortIndex("negative_phase_hidden_activations.state")]
02155                 : NULL;
02156 
02157         PLASSERT( visible && hidden );
02158         PLASSERT( !negative_phase_visible_samples ||
02159                   !negative_phase_visible_samples->isEmpty() );
02160 
02161         Mat vis_expect_ptr;
02162         if (!negative_phase_visible_samples)
02163         {
02164             // Generate hidden samples.
02165             hidden_layer->setExpectations(*hidden);
02166             for( int i=0; i<n_Gibbs_steps_CD; i++)
02167             {
02168                 hidden_layer->generateSamples();
02169                 if (deterministic_reconstruction_in_cd)
02170                 {
02171                    // (Negative phase) compute visible expectations
02172                    computeVisibleActivations(hidden_layer->samples);
02173                    visible_layer->computeExpectations();
02174                    // compute corresponding hidden expectations.
02175                    computeHiddenActivations(visible_layer->getExpectations());
02176                 }
02177                 else // classical CD learning
02178                 {
02179                    // (Negative phase) Generate visible samples.
02180                    sampleVisibleGivenHidden(hidden_layer->samples);
02181                    // compute corresponding hidden expectations.
02182                    computeHiddenActivations(visible_layer->samples);
02183                 }
02184                 hidden_layer->computeExpectations();
02185             }
02186             PLASSERT( !computed_contrastive_divergence );
02187             PLASSERT( !negative_phase_hidden_expectations );
02188             PLASSERT( !negative_phase_hidden_activations );
02189             if (deterministic_reconstruction_in_cd) {
02190                 vis_expect_ptr = visible_layer->getExpectations();
02191                 negative_phase_visible_samples = &vis_expect_ptr;
02192             }
02193             else // classical CD learning
02194                negative_phase_visible_samples = &(visible_layer->samples);
02195             negative_phase_hidden_activations = &(hidden_layer->activations);
02196             negative_phase_hidden_expectations = &(hidden_layer->getExpectations());
02197         }
02198         PLASSERT( negative_phase_hidden_expectations &&
02199                   !negative_phase_hidden_expectations->isEmpty() );
02200         PLASSERT( negative_phase_hidden_activations &&
02201                   !negative_phase_hidden_activations->isEmpty() );
02202 
02203         // Perform update.
02204         visible_layer->update(*visible, *negative_phase_visible_samples);
02205 
02206         bool connection_update_is_done = false;
02207         if (compute_weights_grad) {
02208             // First resize the 'weights_grad' matrix.
02209             int up = connection->up_size;
02210             int down = connection->down_size;
02211             PLASSERT( weights && !weights->isEmpty() &&
02212                       weights_grad->width() == up * down );
02213             weights_grad->resize(mbs, up * down);
02214 
02215             if (standard_cd_weights_grad)
02216             {
02217                 // Perform both computation of weights gradient and do update
02218                 // at the same time.
02219                 Mat wg;
02220                 Vec vp, hp, vn, hn;
02221                 for(int i=0; i<mbs; i++)
02222                 {
02223                     vp = (*visible)(i);
02224                     hp = (*hidden)(i);
02225                     vn = (*negative_phase_visible_samples)(i);
02226                     hn = (*negative_phase_hidden_expectations)(i);
02227                     wg = Mat(up, down,(*weights_grad)(i));
02228                     connection->petiteCulotteOlivierCD(
02229                             vp, hp,
02230                             vn,
02231                             hn,
02232                             wg,
02233                             true);
02234                     connection_update_is_done = true;
02235                 }
02236             }
02237         }
02238         if (!standard_cd_weights_grad || !standard_cd_grad) {
02239             // Compute 'true' gradient of contrastive divergence w.r.t.
02240             // the weights matrix.
02241             int up = connection->up_size;
02242             int down = connection->down_size;
02243             Mat* weights_g = weights_grad;
02244             if (!weights_g) {
02245                 // We need to store the gradient in another matrix.
02246                 store_weights_grad.resize(mbs, up * down);
02247                 store_weights_grad.clear();
02248                 weights_g = & store_weights_grad;
02249             }
02250             PLASSERT( connection->classname() == "RBMMatrixConnection" &&
02251                       visible_layer->classname() == "RBMBinomialLayer" &&
02252                       hidden_layer->classname() == "RBMBinomialLayer" );
02253 
02254             for (int k = 0; k < mbs; k++) {
02255                 int idx = 0;
02256                 for (int i = 0; i < up; i++) {
02257                     real p_i_p = (*hidden)(k, i);
02258                     real a_i_p = (*hidden_act)(k, i);
02259                     real p_i_n =
02260                         (*negative_phase_hidden_expectations)(k, i);
02261                     real a_i_n =
02262                         (*negative_phase_hidden_activations)(k, i);
02263 
02264                     real scale_p = 1 + (1 - p_i_p) * a_i_p;
02265                     real scale_n = 1 + (1 - p_i_n) * a_i_n;
02266                     for (int j = 0; j < down; j++, idx++) {
02267                         // Weight 'idx' is the (i,j)-th element in the
02268                         // 'weights' matrix.
02269                         real v_j_p = (*visible)(k, j);
02270                         real v_j_n =
02271                             (*negative_phase_visible_samples)(k, j);
02272                         (*weights_g)(k, idx) +=
02273                             p_i_n * v_j_n * scale_n     // Negative phase.
02274                             -(p_i_p * v_j_p * scale_p); // Positive phase.
02275                     }
02276                 }
02277             }
02278             if (!standard_cd_grad && !tied_connection_weights) {
02279                 // Update connection manually.
02280                 Mat& weights = ((RBMMatrixConnection*)
02281                                 get_pointer(connection))->weights;
02282                 real lr = cd_learning_rate / mbs;
02283                 for (int k = 0; k < mbs; k++) {
02284                     int idx = 0;
02285                     for (int i = 0; i < up; i++)
02286                         for (int j = 0; j < down; j++, idx++)
02287                             weights(i, j) -= lr * (*weights_g)(k, idx);
02288                 }
02289                 connection_update_is_done = true;
02290             }
02291         }
02292         if (!connection_update_is_done)
02293             connection->update(*visible, *hidden,
02294                     *negative_phase_visible_samples,
02295                     *negative_phase_hidden_expectations);
02296 
02297         Mat* hidden_bias_g = hidden_bias_grad;
02298         if (!standard_cd_grad && !hidden_bias_grad) {
02299             // We need to compute the CD gradient w.r.t. bias of hidden layer,
02300             // but there is no bias coming from the outside. Thus we need
02301             // another matrix to store this gradient.
02302             store_hidden_bias_grad.resize(mbs, hidden_layer->size);
02303             store_hidden_bias_grad.clear();
02304             hidden_bias_g = & store_hidden_bias_grad;
02305         }
02306 
02307         if (hidden_bias_g)
02308         {
02309             if (hidden_bias_g->isEmpty()) {
02310                 PLASSERT(hidden_bias_g->width() == hidden_layer->size);
02311                 hidden_bias_g->resize(mbs,hidden_layer->size);
02312             }
02313             PLASSERT_MSG( hidden_layer->classname() == "RBMBinomialLayer" &&
02314                           visible_layer->classname() == "RBMBinomialLayer",
02315                           "Only implemented for binomial layers" );
02316             // d(contrastive_divergence)/dhidden_bias
02317             for (int k = 0; k < hidden_bias_g->length(); k++) {
02318                 for (int i = 0; i < hidden_bias_g->width(); i++) {
02319                     real p_i_p = (*hidden)(k, i);
02320                     real a_i_p = (*hidden_act)(k, i);
02321                     real p_i_n = (*negative_phase_hidden_expectations)(k, i);
02322                     real a_i_n = (*negative_phase_hidden_activations)(k, i);
02323                     (*hidden_bias_g)(k, i) +=
02324                         standard_cd_bias_grad ? p_i_n - p_i_p :
02325                         p_i_n * (1 - p_i_n) * a_i_n + p_i_n     // Neg. phase
02326                      -( p_i_p * (1 - p_i_p) * a_i_p + p_i_p );  // Pos. phase
02327 
02328                 }
02329             }
02330         }
02331 
02332         if (standard_cd_grad) {
02333             hidden_layer->update(*hidden, *negative_phase_hidden_expectations);
02334         } else {
02335             PLASSERT( hidden_layer->classname() == "RBMBinomialLayer" );
02336             // Update hidden layer by hand.
02337             Vec& bias = hidden_layer->bias;
02338             real lr = cd_learning_rate / mbs;
02339             for (int i = 0; i < mbs; i++)
02340                 bias -= lr * (*hidden_bias_g)(i);
02341         }
02342 
02343         partition_function_is_stale = true;
02344     } else {
02345         PLCHECK_MSG( !contrastive_divergence_grad ||
02346                      (!hidden_bias_grad && !weights_grad),
02347                 "You currently cannot compute the "
02348                 "gradient of contrastive divergence w.r.t. external ports "
02349                 "when 'cd_learning_rate' is set to 0" );
02350     }
02351 
02352     if (reconstruction_error_grad && !reconstruction_error_grad->isEmpty()) {
02353         if (tied_connection_weights)
02354            setLearningRatesOnlyForLayers(grad_learning_rate);
02355         else
02356            setAllLearningRates(grad_learning_rate);
02357         PLASSERT( reconstruction_connection != 0 );
02358         // Perform gradient descent on Autoassociator reconstruction cost
02359         Mat* visible_reconstruction = ports_value[getPortIndex("visible_reconstruction.state")];
02360         Mat* visible_reconstruction_activations = ports_value[getPortIndex("visible_reconstruction_activations.state")];
02361         Mat* reconstruction_error = ports_value[getPortIndex("reconstruction_error.state")];
02362         PLASSERT( hidden != 0 );
02363         PLASSERT( visible  && hidden_act &&
02364                   visible_reconstruction && visible_reconstruction_activations &&
02365                   reconstruction_error);
02366         //int mbs = reconstruction_error_grad->length();
02367 
02368         PLCHECK_MSG( !weights, "In RBMModule::bpropAccUpdate(): reconstruction cost "
02369                      "for conditional weights is not implemented");
02370 
02371         // Backprop reconstruction gradient
02372 
02373         // Must change visible_layer's expectation
02374         visible_layer->getExpectations() << *visible_reconstruction;
02375         visible_layer->bpropNLL(*visible,*reconstruction_error,
02376                                 visible_act_grad);
02377 
02378         // Combine with incoming gradient
02379         PLASSERT( (*reconstruction_error_grad).width() == 1 );
02380         for (int t=0;t<mbs;t++)
02381             visible_act_grad(t) *= (*reconstruction_error_grad)(t,0);
02382 
02383         // Visible bias update
02384         columnMean(visible_act_grad, visible_bias_grad);
02385         visible_layer->update(visible_bias_grad);
02386 
02387         // Reconstruction connection update
02388         hidden_exp_grad.resize(mbs, hidden_layer->size);
02389         hidden_exp_grad.clear();
02390         hidden_exp_grad.resize(0, hidden_layer->size);
02391 
02392         TVec<Mat*> rec_ports_value(2);
02393         rec_ports_value[0] = visible_reconstruction_activations;
02394         rec_ports_value[1] = hidden;
02395         TVec<Mat*> rec_ports_gradient(2);
02396         rec_ports_gradient[0] = &visible_act_grad;
02397         rec_ports_gradient[1] = &hidden_exp_grad;
02398 
02399         reconstruction_connection->bpropAccUpdate( rec_ports_value,
02400                                                    rec_ports_gradient );
02401 
02402         // UGLY HACK WHICH BREAKS THE RULE THAT RBMMODULE CAN BE CALLED IN DIFFERENT CONTEXTS AND fprop/bprop ORDERS
02403         // BUT NECESSARY WHEN hidden WAS AN INPUT
02404         if (hidden_is_output)
02405         {
02406             // Hidden layer bias update
02407             hidden_layer->bpropUpdate(*hidden_act,
02408                                       *hidden, hidden_act_grad,
02409                                       hidden_exp_grad, false);
02410             if (hidden_bias_grad)
02411             {
02412                 if (hidden_bias_grad->isEmpty()) {
02413                     PLASSERT( hidden_bias_grad->width() == hidden_layer->size );
02414                     hidden_bias_grad->resize(mbs,hidden_layer->size);
02415                 }
02416                 *hidden_bias_grad += hidden_act_grad;
02417             }
02418             // Connection update
02419             if(compute_visible_grad)
02420             {
02421                 // The length of 'visible_grad' must be either 0 (if not computed
02422                 // previously) or the size of the mini-batches (otherwise).
02423                 PLASSERT( visible_grad->width() == visible_layer->size &&
02424                           (visible_grad->length() == 0 ||
02425                            visible_grad->length() == mbs) );
02426                 visible_grad->resize(mbs, visible_grad->width());
02427                 connection->bpropUpdate(
02428                     *visible, *hidden_act,
02429                     *visible_grad, hidden_act_grad, true);
02430             }
02431             else
02432             {
02433                 visible_exp_grad.resize(mbs,visible_layer->size);
02434                 connection->bpropUpdate(
02435                     *visible, *hidden_act,
02436                     visible_exp_grad, hidden_act_grad, true);
02437             }
02438         }
02439         else if (hidden_grad && hidden_grad->isEmpty()) // copy the hidden gradient
02440         {
02441             hidden_grad->resize(mbs,hidden_layer->size);
02442             *hidden_grad << hidden_exp_grad;
02443         }
02444 
02445         partition_function_is_stale = true;
02446     }
02447 
02448     if (energy_grad && !energy_grad->isEmpty() &&
02449         visible_grad && visible_grad->isEmpty())
02450         // compute the gradient of the free-energy wrt input
02451     {
02452         // very cheap shot, specializing to the common case...
02453         PLASSERT(hidden_layer->classname()=="RBMBinomialLayer");
02454         PLASSERT(visible_layer->classname()=="RBMBinomialLayer" ||
02455                  visible_layer->classname()=="RBMGaussianlLayer");
02456         PLASSERT(connection->classname()=="RBMMatrixConnection");
02457         PLASSERT(hidden && !hidden->isEmpty());
02458         // FE(x) = -b'x - sum_i softplus(hidden_layer->activation[i])
02459         // dFE(x)/dx = -b - sum_i sigmoid(hidden_layer->activation[i]) W_i
02460         // dC/dxt = -b dC/dFE - dC/dFE sum_i p_ti W_i
02461         int mbs=energy_grad->length();
02462         visible_grad->resize(mbs,visible_layer->size);
02463         Mat& weights = ((RBMMatrixConnection*)
02464                         get_pointer(connection))->weights;
02465         bool same_dC_dFE=true;
02466         real dC_dFE=(*energy_grad)(0,0);
02467         const Mat& p = *hidden;
02468         for (int t=0;t<mbs;t++)
02469         {
02470             real new_dC_dFE=(*energy_grad)(t,0);
02471             if (new_dC_dFE!=dC_dFE)
02472                 same_dC_dFE=false;
02473             dC_dFE = new_dC_dFE;
02474             multiplyAcc((*visible_grad)(t),visible_layer->bias,-dC_dFE);
02475         }
02476         if (same_dC_dFE)
02477             productScaleAcc(*visible_grad, p, false, weights, false, -dC_dFE,
02478                             real(1));
02479         else
02480             for (int t=0;t<mbs;t++)
02481                 productScaleAcc((*visible_grad)(t), weights, true, p(t),
02482                         -(*energy_grad)(t, 0), real(1));
02483     }
02484 
02485     // Explicit error message in the case of the 'visible' port.
02486     if (compute_visible_grad && visible_grad->isEmpty())
02487         PLERROR("In RBMModule::bpropAccUpdate - The gradient with respect "
02488                 "to the 'visible' port was asked, but not computed");
02489 
02490     checkProp(ports_gradient);
02491 
02492     // Reset pointers to ensure we do not reuse them by mistake.
02493     hidden_act = NULL;
02494     weights = NULL;
02495     hidden_bias = NULL;
02496 }
02497 
02499 // forget //
02501 void RBMModule::forget()
02502 {
02503     DBG_MODULE_LOG << "Forgetting RBMModule '" << name << "'" << endl;
02504     PLASSERT( hidden_layer && visible_layer && connection );
02505     hidden_layer->forget();
02506     visible_layer->forget();
02507     connection->forget();
02508     if (reconstruction_connection && reconstruction_connection != connection)
02509         // We avoid to call forget() twice if the connections are the same.
02510         reconstruction_connection->forget();
02511     Gibbs_step = 0;
02512     partition_function_is_stale = true;
02513 }
02514 
02516 // getPortIndex //
02518 int RBMModule::getPortIndex(const string& port)
02519 {
02520     map<string, int>::const_iterator it = portname_to_index.find(port);
02521     if (it == portname_to_index.end())
02522         return -1;
02523     else
02524         return it->second;
02525 }
02526 
02528 // getPorts //
02530 const TVec<string>& RBMModule::getPorts()
02531 {
02532     return ports;
02533 }
02534 
02536 // getPortsSizes //
02538 const TMat<int>& RBMModule::getPortSizes()
02539 {
02540     return port_sizes;
02541 }
02542 
02544 // bpropDoesNothing //
02546 /* THIS METHOD IS OPTIONAL
02547 bool RBMModule::bpropDoesNothing()
02548 {
02549 }
02550 */
02551 
02553 // setAllLearningRates //
02555 void RBMModule::setAllLearningRates(real lr)
02556 {
02557     hidden_layer->setLearningRate(lr);
02558     visible_layer->setLearningRate(lr);
02559     connection->setLearningRate(lr);
02560     if(reconstruction_connection)
02561         reconstruction_connection->setLearningRate(lr);
02562 }
02563 
02564 void RBMModule::setLearningRatesOnlyForLayers(real lr)
02565 {
02566     hidden_layer->setLearningRate(lr);
02567     visible_layer->setLearningRate(lr);
02568     connection->setLearningRate(0.);
02569     if(reconstruction_connection)
02570         reconstruction_connection->setLearningRate(0.);
02571 }
02572 
02573 
02575 // sampleHiddenGivenVisible //
02577 void RBMModule::sampleHiddenGivenVisible(const Mat& visible)
02578 {
02579     computeHiddenActivations(visible);
02580     hidden_layer->computeExpectations();
02581     hidden_layer->generateSamples();
02582 }
02583 
02585 // sampleVisibleGivenHidden //
02587 void RBMModule::sampleVisibleGivenHidden(const Mat& hidden)
02588 {
02589     computeVisibleActivations(hidden);
02590     visible_layer->computeExpectations();
02591     visible_layer->generateSamples();
02592 }
02593 
02595 // setLearningRate //
02597 void RBMModule::setLearningRate(real dynamic_learning_rate)
02598 {
02599     // Out of safety, force the user to go through the two different learning
02600     // rate. May need to be removed if it causes unwanted crashes.
02601     PLERROR("In RBMModule::setLearningRate - Do not use this method, instead "
02602             "explicitely use 'cd_learning_rate' and 'grad_learning_rate'");
02603 }
02604 
02605 } // end of namespace PLearn
02606 
02607 
02608 /*
02609   Local Variables:
02610   mode:c++
02611   c-basic-offset:4
02612   c-file-style:"stroustrup"
02613   c-file-offsets:((innamespace . 0)(inline-open . 0))
02614   indent-tabs-mode:nil
02615   fill-column:79
02616   End:
02617 */
02618 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines