PLearn 0.1
KLp0p1RBMModule.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // KLp0p1RBMModule.cc
00004 //
00005 // Copyright (C) 2007 Olivier Delalleau, Yoshua Bengio
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Olivier Delalleau, Yoshua Bengio
00036 
00041 #include "KLp0p1RBMModule.h"
00042 //#include <plearn/vmat/AutoVMatrix.h>
00043 #include <plearn/vmat/VMat.h>
00044 #include <plearn_learners/online/RBMMatrixConnection.h>
00045 
00046 #define PL_LOG_MODULE_NAME "KLp0p1RBMModule"
00047 #include <plearn/io/pl_log.h>
00048 
00049 namespace PLearn {
00050 using namespace std;
00051 
00052 PLEARN_IMPLEMENT_OBJECT(
00053     KLp0p1RBMModule,
00054     "Implement KL(p0||p1) criterion for RBMs",
00055     "This criterion is described and justified in the paper by Le Roux and Bengio entitled"
00056     "'Representational Power of Restricted Boltzmann Machines and Deep Belief Networks'."
00057     "The exact and very inefficient implementation of this criterion is done here."
00058     "  KL(p0||p1) = sum_x p0(x) log p0(x)/p1(x) = - sum_i (1/n) log p1(x_i) + sum_i (1/n) log(1/n)"
00059     "For an example x the cost is:"
00060     "  C(x) = - log p1(x) - log n = - log sum_{k=1}^n sum_h P(x|h) P(h|x^k)"
00061     "where {x^1, ... x^n} is the training set of examples x^k, h is a hidden layer bit vector,"
00062     "P(x|h) is the hidden-to-visible conditional distribution and P(h|x) is the"
00063     "input-to-hidden conditional distribution. Both are the usual found in Binomial"
00064     "layer RBMs here."
00065     "The gradient on the weight Wij is"
00066     "  dC(x)/dWij = (-1/(n p1(x))) "
00067     "       sum_{k=1}^n sum_h P(x|h) P(h|x^k) (h_i(x_j - P(x_j=1|h)) + x_j^k(h_i - P(h_i=1|x^k)))"
00068     "Apart from the KLp0p1 output port, and the fact that CD learning is replaced by minimization"
00069     "of KLp0p1, this module acts like a regular RBMModule."
00070 );
00071 
00073 // KLp0p1RBMModule //
00075 KLp0p1RBMModule::KLp0p1RBMModule():
00076     cd_learning_rate(0),
00077     grad_learning_rate(0),
00078     klp0p1_learning_rate(0),
00079     compute_contrastive_divergence(false),
00080     n_Gibbs_steps_CD(1),
00081     min_n_Gibbs_steps(1),
00082     n_Gibbs_steps_per_generated_sample(-1),
00083     compute_log_likelihood(false),
00084     minimize_log_likelihood(false),
00085     Gibbs_step(0),
00086     log_partition_function(0),
00087     partition_function_is_stale(true),
00088     standard_cd_grad(true),
00089     standard_cd_bias_grad(true),
00090     standard_cd_weights_grad(true),
00091     hidden_bias(NULL),
00092     weights(NULL),
00093     hidden_act(NULL),
00094     hidden_activations_are_computed(false)
00095 {
00096 }
00097 
00099 // declareOptions //
00101 void KLp0p1RBMModule::declareOptions(OptionList& ol)
00102 {
00103     declareOption(ol, "training_set", &KLp0p1RBMModule::training_set,
00104                   OptionBase::buildoption,
00105                   "VMatrix with one input example per row, the training set.");
00106 
00107     declareOption(ol, "visible_layer", &KLp0p1RBMModule::visible_layer,
00108                   OptionBase::buildoption,
00109         "Visible layer of the RBM.");
00110 
00111     declareOption(ol, "hidden_layer", &KLp0p1RBMModule::hidden_layer,
00112                   OptionBase::buildoption,
00113         "Hidden layer of the RBM.");
00114 
00115     declareOption(ol, "connection", &KLp0p1RBMModule::connection,
00116                   OptionBase::buildoption,
00117         "Connection between the visible and hidden layers.");
00118 
00119     declareOption(ol, "reconstruction_connection", 
00120                   &KLp0p1RBMModule::reconstruction_connection,
00121                   OptionBase::buildoption,
00122         "Reconstuction connection between the hidden and visible layers.");
00123 
00124     declareOption(ol, "grad_learning_rate", &KLp0p1RBMModule::grad_learning_rate,
00125                   OptionBase::buildoption,
00126         "Learning rate for the gradient descent step.");
00127 
00128     declareOption(ol, "cd_learning_rate", &KLp0p1RBMModule::cd_learning_rate,
00129                   OptionBase::buildoption,
00130         "Learning rate for the constrastive divergence step. Note that when\n"
00131         "set to 0, the gradient of the contrastive divergence will not be\n"
00132         "computed at all.");
00133 
00134     declareOption(ol, "klp0p1_learning_rate", &KLp0p1RBMModule::klp0p1_learning_rate,
00135                   OptionBase::buildoption,
00136         "Learning rate for the KLp0p1 criterion update. If\n"
00137         "set to 0, the gradient of KLp0p1 (and corresponding update) will not be\n"
00138         "computed at all.");
00139 
00140     declareOption(ol, "compute_contrastive_divergence", &KLp0p1RBMModule::compute_contrastive_divergence,
00141                   OptionBase::buildoption,
00142         "Compute the constrastive divergence in an output port.");
00143 
00144     declareOption(ol, "standard_cd_grad",
00145                   &KLp0p1RBMModule::standard_cd_grad,
00146                   OptionBase::buildoption,
00147         "Whether to use the standard contrastive divergence gradient for\n"
00148         "updates, or the true gradient of the contrastive divergence. This\n"
00149         "affects only the gradient w.r.t. internal parameters of the layers\n"
00150         "and connections. Currently, this option works only with layers of\n"
00151         "the type 'RBMBinomialLayer', connected by a 'RBMMatrixConnection'.");
00152 
00153     declareOption(ol, "standard_cd_bias_grad",
00154                   &KLp0p1RBMModule::standard_cd_bias_grad,
00155                   OptionBase::buildoption,
00156         "This option is only used when biases of the hidden layer are given\n"
00157         "through the 'hidden_bias' port. When this is the case, the gradient\n"
00158         "of contrastive divergence w.r.t. these biases is either computed:\n"
00159         "- by the usual formula if 'standard_cd_bias_grad' is true\n"
00160         "- by the true gradient if 'standard_cd_bias_grad' is false.");
00161 
00162     declareOption(ol, "standard_cd_weights_grad",
00163                   &KLp0p1RBMModule::standard_cd_weights_grad,
00164                   OptionBase::buildoption,
00165         "This option is only used when weights of the connection are given\n"
00166         "through the 'weights' port. When this is the case, the gradient of\n"
00167         "contrastive divergence w.r.t. weights is either computed:\n"
00168         "- by the usual formula if 'standard_cd_weights_grad' is true\n"
00169         "- by the true gradient if 'standard_cd_weights_grad' is false.");
00170 
00171     declareOption(ol, "n_Gibbs_steps_CD", 
00172                   &KLp0p1RBMModule::n_Gibbs_steps_CD,
00173                   OptionBase::buildoption,
00174                   "Number of Gibbs sampling steps in negative phase of "
00175                   "contrastive divergence.");
00176 
00177     declareOption(ol, "min_n_Gibbs_steps", &KLp0p1RBMModule::min_n_Gibbs_steps,
00178                   OptionBase::buildoption,
00179                   "Used in generative mode (when visible_sample or hidden_sample is requested)\n"
00180                   "when one has to sample from the joint or a marginal of visible and hidden,\n"
00181                   "and thus a Gibbs chain has to be run. This option gives the minimum number\n"
00182                   "of Gibbs steps to perform in the chain before outputting a sample.\n");
00183 
00184     declareOption(ol, "n_Gibbs_steps_per_generated_sample", 
00185                   &KLp0p1RBMModule::n_Gibbs_steps_per_generated_sample,
00186                   OptionBase::buildoption,
00187                   "Used in generative mode (when visible_sample or hidden_sample is requested)\n"
00188                   "when one has to sample from the joint or a marginal of visible and hidden,\n"
00189                   "This option gives the number of steps to run in the Gibbs chain between\n"
00190                   "consecutive generated samples that are produced in output of the fprop method.\n"
00191                   "By default this is equal to min_n_Gibbs_steps.\n");
00192 
00193     declareOption(ol, "compute_log_likelihood",
00194                   &KLp0p1RBMModule::compute_log_likelihood,
00195                   OptionBase::buildoption,
00196                   "Whether to compute the exact RBM generative model's log-likelihood\n"
00197                   "(on the neg_log_likelihood port). If false then the neg_log_likelihood\n"
00198                   "port just computes the input visible's free energy.\n");
00199     
00200     declareOption(ol, "minimize_log_likelihood",
00201                   &KLp0p1RBMModule::minimize_log_likelihood,
00202                   OptionBase::buildoption,
00203                   "Whether to minimize the exact RBM generative model's log-likelihood\n"
00204                   "i.e. take stochastic gradient steps w.r.t. the log-likelihood instead\n"
00205                   "of w.r.t. the contrastive divergence.\n");
00206 
00207     declareOption(ol, "Gibbs_step", 
00208                   &KLp0p1RBMModule::Gibbs_step,
00209                   OptionBase::learntoption,
00210                   "Used in generative mode (when visible_sample or hidden_sample is requested)\n"
00211                   "when one has to sample from the joint or a marginal of visible and hidden,\n"
00212                   "Keeps track of the number of steps that have been run since the beginning\n"
00213                   "of the chain.\n");
00214 
00215     declareOption(ol, "log_partition_function", 
00216                   &KLp0p1RBMModule::log_partition_function,
00217                   OptionBase::learntoption,
00218                   "log(Z) = log(sum_{h,x} exp(-energy(h,x))\n"
00219                   "only computed if compute_log_likelihood is true and\n"
00220                   "the neg_log_likelihood port is requested.\n");
00221 
00222     declareOption(ol, "partition_function_is_stale", 
00223                   &KLp0p1RBMModule::partition_function_is_stale,
00224                   OptionBase::learntoption,
00225                   "Whether parameters have changed since the last computation\n"
00226                   "of the log_partition_function (to know if it should be recomputed\n"
00227                   "when the neg_log_likelihood port is requested.\n");
00228 
00229     // Now call the parent class' declareOptions
00230     inherited::declareOptions(ol);
00231 }
00232 
00234 // build_ //
00236 void KLp0p1RBMModule::build_()
00237 {
00238     PLASSERT( cd_learning_rate >= 0 && grad_learning_rate >= 0 );
00239     if(visible_layer)
00240         visible_bias_grad.resize(visible_layer->size);
00241 
00242     // copy layers to allow different storage of activations and samples
00243     // but keep the same parameters 
00244     conf_visible_layer = PLearn::deepCopy(visible_layer);
00245     // (this pointing of bias would not suffice with RBMGaussianLayer, which has other params)
00246     conf_visible_layer->bias = visible_layer->bias;
00247     conf_hidden_layer = PLearn::deepCopy(hidden_layer);
00248     conf_hidden_layer->bias = hidden_layer->bias;
00249 
00250 
00251     // Forward random generator to underlying modules.
00252     if (random_gen) {
00253         if (hidden_layer && !hidden_layer->random_gen) {
00254             hidden_layer->random_gen = random_gen;
00255             hidden_layer->build();
00256             hidden_layer->forget();
00257         }
00258         if (visible_layer && !visible_layer->random_gen) {
00259             visible_layer->random_gen = random_gen;
00260             visible_layer->build();
00261             visible_layer->forget();
00262         }
00263         if (connection && !connection->random_gen) {
00264             connection->random_gen = random_gen;
00265             connection->build();
00266             connection->forget();
00267         }
00268         if (reconstruction_connection &&
00269                 !reconstruction_connection->random_gen) {
00270             reconstruction_connection->random_gen = random_gen;
00271             reconstruction_connection->build();
00272             reconstruction_connection->forget();
00273         }
00274     }
00275 
00276     // buid ports and port_sizes
00277 
00278     ports.resize(0);
00279     portname_to_index.clear();
00280     addPortName("visible");
00281     addPortName("hidden.state");
00282     addPortName("hidden_activations.state");
00283     addPortName("visible_sample");
00284     addPortName("visible_expectation");
00285     addPortName("hidden_sample");
00286     addPortName("energy");
00287     addPortName("hidden_bias"); 
00288     addPortName("weights"); 
00289     addPortName("neg_log_likelihood");
00290     addPortName("KLp0p1"); 
00291     if(reconstruction_connection)
00292     {
00293         addPortName("visible_reconstruction.state");
00294         addPortName("visible_reconstruction_activations.state");
00295         addPortName("reconstruction_error.state");
00296     }
00297     if (compute_contrastive_divergence)
00298     {
00299         addPortName("contrastive_divergence");
00300         addPortName("negative_phase_visible_samples.state");
00301         addPortName("negative_phase_hidden_expectations.state");
00302         addPortName("negative_phase_hidden_activations.state");
00303     }
00304 
00305     port_sizes.resize(nPorts(), 2);
00306     port_sizes.fill(-1);
00307     if (visible_layer) {
00308         port_sizes(getPortIndex("visible"), 1) = visible_layer->size;
00309         port_sizes(getPortIndex("visible_sample"), 1) = visible_layer->size;
00310         port_sizes(getPortIndex("visible_expectation"), 1) = visible_layer->size;
00311     }
00312     if (hidden_layer) {
00313         port_sizes(getPortIndex("hidden.state"), 1) = hidden_layer->size;
00314         port_sizes(getPortIndex("hidden_activations.state"), 1) = hidden_layer->size; 
00315         port_sizes(getPortIndex("hidden_sample"), 1) = hidden_layer->size; 
00316         port_sizes(getPortIndex("hidden_bias"),1) = hidden_layer->size;
00317         if(visible_layer)
00318             port_sizes(getPortIndex("weights"),1) = hidden_layer->size * visible_layer->size;
00319     }
00320     port_sizes(getPortIndex("energy"),1) = 1;
00321     port_sizes(getPortIndex("neg_log_likelihood"),1) = 1;
00322     port_sizes(getPortIndex("KLp0p1"),1) = 1;
00323     if(reconstruction_connection)
00324     {
00325         if (visible_layer) {
00326             port_sizes(getPortIndex("visible_reconstruction.state"),1) = 
00327                 visible_layer->size; 
00328             port_sizes(getPortIndex("visible_reconstruction_activations.state"),1) = 
00329                        visible_layer->size; 
00330         }
00331         port_sizes(getPortIndex("reconstruction_error.state"),1) = 1; 
00332     }
00333     if (compute_contrastive_divergence)
00334     {
00335         port_sizes(getPortIndex("contrastive_divergence"),1) = 1; 
00336         if (visible_layer) 
00337             port_sizes(getPortIndex("negative_phase_visible_samples.state"),1) = visible_layer->size; 
00338         if (hidden_layer)
00339             port_sizes(getPortIndex("negative_phase_hidden_expectations.state"),1) = hidden_layer->size; 
00340         if (fast_exact_is_equal(cd_learning_rate, 0))
00341             PLWARNING("In KLp0p1RBMModule::build_ - Contrastive divergence is "
00342                     "computed but 'cd_learning_rate' is set to 0: no internal "
00343                     "update will be performed AND no contrastive divergence "
00344                     "gradient will be propagated.");
00345     }
00346 
00347     PLCHECK_MSG(!(!standard_cd_grad && standard_cd_bias_grad), "You cannot "
00348             "compute the standard CD gradient w.r.t. external hidden bias and "
00349             "use the 'true' CD gradient w.r.t. internal hidden bias");
00350 
00351     if (n_Gibbs_steps_per_generated_sample<0)
00352         n_Gibbs_steps_per_generated_sample = min_n_Gibbs_steps;
00353 
00354 }
00355 
00357 // build //
00359 void KLp0p1RBMModule::build()
00360 {
00361     inherited::build();
00362     build_();
00363 }
00364 
00366 // addPortName //
00368 void KLp0p1RBMModule::addPortName(const string& name)
00369 {
00370     PLASSERT( portname_to_index.find(name) == portname_to_index.end() );
00371     portname_to_index[name] = ports.length();
00372     ports.append(name);
00373 }
00374 
00376 // computeEnergy //
00378 // FULLY OBSERVED CASE
00379 // we know x and h:
00380 // energy(h,x) = -b'x - c'h - h'Wx
00381 //  = visible_layer->energy(x) + hidden_layer->energy(h)
00382 //      - dot(h, hidden_layer->activation-c)
00383 //  = visible_layer->energy(x) - dot(h, hidden_layer->activation)
00384 void KLp0p1RBMModule::computeEnergy(const Mat& visible, const Mat& hidden,
00385                               Mat& energy, bool positive_phase)
00386 {
00387     int mbs=hidden.length();
00388     energy.resize(mbs, 1);
00389     Mat* hidden_activations = NULL;
00390     if (positive_phase) {
00391         computePositivePhaseHiddenActivations(visible);
00392         hidden_activations = hidden_act;
00393     } else {
00394         computeHiddenActivations(visible);
00395         hidden_activations = & hidden_layer->activations;
00396     }
00397     PLASSERT( hidden_activations );
00398     for (int i=0;i<mbs;i++)
00399         energy(i,0) = visible_layer->energy(visible(i))
00400             - dot(hidden(i), (*hidden_activations)(i));
00401             // Why not: + hidden_layer->energy(hidden(i)) ?
00402 }
00403 
00405 // computeFreeEnergyOfHidden //
00407 // FREE-ENERGY(hidden) CASE
00408 // we know h:
00409 // free energy = -log sum_x e^{-energy(h,x)}
00410 //  = -c'h - sum_i log sigmoid(b_i + W_{.i}'h) .... FOR BINOMIAL INPUT LAYER
00411 // or more robustly,
00412 //  = hidden_layer->energy(h) - sum_i softplus(visible_layer->activation[i])
00413 void KLp0p1RBMModule::computeFreeEnergyOfHidden(const Mat& hidden, Mat& energy)
00414 {
00415     int mbs=hidden.length();
00416     if (energy.isEmpty())
00417         energy.resize(mbs,1);
00418     else {
00419         PLASSERT( energy.length() == mbs && energy.width() == 1 );
00420     }
00421     PLASSERT(visible_layer->classname()=="RBMBinomialLayer");
00422     computeVisibleActivations(hidden, false);
00423     for (int i=0;i<mbs;i++)
00424     {
00425         energy(i,0) = hidden_layer->energy(hidden(i));
00426         if (use_fast_approximations)
00427             for (int j=0;j<visible_layer->size;j++)
00428                 energy(i,0) -= tabulated_softplus(visible_layer->activations(i,j));
00429         else
00430             for (int j=0;j<visible_layer->size;j++)
00431                 energy(i,0) -= softplus(visible_layer->activations(i,j));
00432     }
00433 }
00434 
00436 // computeFreeEnergyOfVisible //
00438 // FREE-ENERGY(visible) CASE
00439 // we know x:
00440 // free energy = -log sum_h e^{-energy(h,x)}
00441 //  = -b'x - sum_i log sigmoid(c_i + W_i'x) .... FOR BINOMIAL HIDDEN LAYER
00442 // or more robustly,
00443 //  = visible_layer->energy(x) - sum_i softplus(hidden_layer->activation[i])
00444 void KLp0p1RBMModule::computeFreeEnergyOfVisible(const Mat& visible, Mat& energy,
00445                                            bool positive_phase)
00446 {
00447     int mbs=visible.length();
00448     if (energy.isEmpty())
00449         energy.resize(mbs,1);
00450     else {
00451         PLASSERT( energy.length() == mbs && energy.width() == 1 );
00452     }
00453     PLASSERT(hidden_layer->classname()=="RBMBinomialLayer");
00454     Mat* hidden_activations = NULL;
00455     if (positive_phase) {
00456         computePositivePhaseHiddenActivations(visible);
00457         hidden_activations = hidden_act;
00458     }
00459     else {
00460         computeHiddenActivations(visible);
00461         hidden_activations = & hidden_layer->activations;
00462     }
00463     PLASSERT( hidden_activations && hidden_activations->length() == mbs
00464             && hidden_activations->width() == hidden_layer->size );
00465     for (int i=0;i<mbs;i++)
00466     {
00467         energy(i,0) = visible_layer->energy(visible(i));
00468         if (use_fast_approximations)
00469             for (int j=0;j<hidden_layer->size;j++)
00470                 energy(i,0) -= tabulated_softplus((*hidden_activations)(i,j));
00471         else
00472             for (int j=0;j<hidden_layer->size;j++)
00473                 energy(i,0) -= softplus((*hidden_activations)(i,j));
00474     }
00475 }
00476 
00478 // computeHiddenActivations //
00480 void KLp0p1RBMModule::computeHiddenActivations(const Mat& visible)
00481 {
00482     if(weights && !weights->isEmpty())
00483     {
00484         Mat old_weights;
00485         Vec old_activation;
00486         connection->getAllWeights(old_weights);
00487         old_activation = hidden_layer->activation;
00488         int up = connection->up_size;
00489         int down = connection->down_size;
00490         PLASSERT( weights->width() == up * down  );
00491         hidden_layer->setBatchSize( visible.length() );
00492         for(int i=0; i<visible.length(); i++)
00493         {
00494             connection->setAllWeights(Mat(up, down, (*weights)(i)));
00495             connection->setAsDownInput(visible(i));
00496             hidden_layer->activation = hidden_layer->activations(i);
00497             hidden_layer->getAllActivations(connection, 0, false);
00498             if (hidden_bias && !hidden_bias->isEmpty())
00499                 hidden_layer->activation += (*hidden_bias)(i);
00500         }
00501         connection->setAllWeights(old_weights);
00502         hidden_layer->activation = old_activation;
00503     }
00504     else
00505     {
00506         connection->setAsDownInputs(visible);
00507         hidden_layer->getAllActivations(connection, 0, true);
00508         if (hidden_bias && !hidden_bias->isEmpty())
00509             hidden_layer->activations += *hidden_bias;
00510     }
00511 }
00512 
00514 // computePositivePhaseHiddenActivations //
00516 void KLp0p1RBMModule::computePositivePhaseHiddenActivations(const Mat& visible)
00517 {
00518     if (hidden_activations_are_computed) {
00519         // Nothing to do.
00520         PLASSERT( !hidden_act || !hidden_act->isEmpty() );
00521         return;
00522     }
00523     computeHiddenActivations(visible);
00524     if (hidden_act && hidden_act->isEmpty())
00525     {
00526         hidden_act->resize(visible.length(),hidden_layer->size);
00527         *hidden_act << hidden_layer->activations;
00528     }
00529     hidden_activations_are_computed = true;
00530 }
00531 
00533 // computeVisibleActivations //
00535 void KLp0p1RBMModule::computeVisibleActivations(const Mat& hidden,
00536                                           bool using_reconstruction_connection)
00537 {
00538     if (using_reconstruction_connection)
00539     {
00540         PLASSERT( reconstruction_connection );
00541         reconstruction_connection->setAsUpInputs(hidden);
00542         visible_layer->getAllActivations(reconstruction_connection, 0, true);
00543     }
00544     else
00545     {
00546         if(weights && !weights->isEmpty())
00547         {
00548             PLASSERT( connection->classname() == "RBMMatrixConnection" );
00549             Mat old_weights;
00550             Vec old_activation;
00551             connection->getAllWeights(old_weights);
00552             old_activation = visible_layer->activation;
00553             int up = connection->up_size;
00554             int down = connection->down_size;
00555             PLASSERT( weights->width() == up * down  );
00556             visible_layer->setBatchSize( hidden.length() );
00557             for(int i=0; i<hidden.length(); i++)
00558             {
00559                 connection->setAllWeights(Mat(up,down,(*weights)(i)));
00560                 connection->setAsUpInput(hidden(i));
00561                 visible_layer->activation = visible_layer->activations(i);
00562                 visible_layer->getAllActivations(connection, 0, false);
00563             }
00564             connection->setAllWeights(old_weights);
00565             visible_layer->activation = old_activation;
00566         }
00567         else
00568         {
00569             connection->setAsUpInputs(hidden);
00570             visible_layer->getAllActivations(connection, 0, true);
00571         }
00572     }
00573 }
00574 
00576 // makeDeepCopyFromShallowCopy //
00578 void KLp0p1RBMModule::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00579 {
00580     inherited::makeDeepCopyFromShallowCopy(copies);
00581 
00582     deepCopyField(hidden_layer,     copies);
00583     deepCopyField(visible_layer,    copies);
00584     deepCopyField(conf_hidden_layer,     copies);
00585     deepCopyField(conf_visible_layer,    copies);
00586     deepCopyField(connection,       copies);
00587     deepCopyField(reconstruction_connection, copies);
00588 
00589     deepCopyField(hidden_exp_grad, copies);
00590     deepCopyField(hidden_act_grad, copies);
00591     deepCopyField(store_weights_grad, copies);
00592     deepCopyField(store_hidden_bias_grad, copies);
00593     deepCopyField(visible_exp_grad, copies);
00594     deepCopyField(visible_act_grad, copies);
00595     deepCopyField(visible_bias_grad, copies);
00596     deepCopyField(hidden_exp_store, copies);
00597     deepCopyField(hidden_act_store, copies);
00598 
00599     deepCopyField(ports, copies);
00600     deepCopyField(energy_inputs, copies);
00601 }
00602 
00604 // fprop //
00606 void KLp0p1RBMModule::fprop(const Vec& input, Vec& output) const
00607 {
00608     PLERROR("In KLp0p1RBMModule::fprop - Not implemented");
00609 }
00610 
00611 void KLp0p1RBMModule::fprop(const TVec<Mat*>& ports_value)
00612 {
00613 
00614     PLASSERT( ports_value.length() == nPorts() );
00615     PLASSERT( visible_layer );
00616     PLASSERT( hidden_layer );
00617     PLASSERT( connection );
00618 
00619     Mat* visible = ports_value[getPortIndex("visible")]; 
00620     Mat* hidden = ports_value[getPortIndex("hidden.state")];
00621     hidden_act = ports_value[getPortIndex("hidden_activations.state")];
00622     Mat* visible_sample = ports_value[getPortIndex("visible_sample")];
00623     Mat* visible_expectation = ports_value[getPortIndex("visible_expectation")];
00624     Mat* hidden_sample = ports_value[getPortIndex("hidden_sample")];
00625     Mat* energy = ports_value[getPortIndex("energy")];
00626     Mat* neg_log_likelihood = ports_value[getPortIndex("neg_log_likelihood")];
00627     Mat* KLp0p1 = ports_value[getPortIndex("KLp0p1")];
00628     hidden_bias = ports_value[getPortIndex("hidden_bias")];
00629     weights = ports_value[getPortIndex("weights")];
00630     Mat* visible_reconstruction = 0;
00631     Mat* visible_reconstruction_activations = 0;
00632     Mat* reconstruction_error = 0;
00633     if(reconstruction_connection)
00634     {
00635         visible_reconstruction =
00636             ports_value[getPortIndex("visible_reconstruction.state")];
00637         visible_reconstruction_activations =
00638             ports_value[getPortIndex("visible_reconstruction_activations.state")];
00639         reconstruction_error =
00640             ports_value[getPortIndex("reconstruction_error.state")];
00641     }
00642     Mat* contrastive_divergence = 0;
00643     Mat* negative_phase_visible_samples = 0;
00644     Mat* negative_phase_hidden_expectations = 0;
00645     Mat* negative_phase_hidden_activations = NULL;
00646     if (compute_contrastive_divergence)
00647     {
00648         contrastive_divergence = ports_value[getPortIndex("contrastive_divergence")];
00649         if (!contrastive_divergence || !contrastive_divergence->isEmpty())
00650             PLERROR("In KLp0p1RBMModule::fprop - When option "
00651                     "'compute_contrastive_divergence' is 'true', the "
00652                     "'contrastive_divergence' port should be provided, as an "
00653                     "output.");
00654         negative_phase_visible_samples =
00655             ports_value[getPortIndex("negative_phase_visible_samples.state")];
00656         negative_phase_hidden_expectations =
00657             ports_value[getPortIndex("negative_phase_hidden_expectations.state")];
00658         negative_phase_hidden_activations =
00659             ports_value[getPortIndex("negative_phase_hidden_activations.state")];
00660     }
00661 
00662     bool hidden_expectations_are_computed = false;
00663     hidden_activations_are_computed = false;
00664     bool found_a_valid_configuration = false;
00665 
00666     if (visible && !visible->isEmpty())
00667     {
00668         // When an input is provided, that would restart the chain for
00669         // unconditional sampling, from that example.
00670         Gibbs_step = 0;
00671         visible_layer->samples.resize(visible->length(),visible->width());
00672         visible_layer->samples << *visible;
00673     }
00674 
00675     // COMPUTE ENERGY
00676     if (energy)
00677     {
00678         PLASSERT_MSG( energy->isEmpty(), 
00679                       "KLp0p1RBMModule: the energy port can only be an output port\n" );
00680         if (visible && !visible->isEmpty()
00681             && hidden && !hidden->isEmpty())
00682         {
00683             computeEnergy(*visible, *hidden, *energy);
00684         }
00685         else if (visible && !visible->isEmpty())
00686         {
00687             computeFreeEnergyOfVisible(*visible,*energy);
00688         }
00689         else if (hidden && !hidden->isEmpty())
00690         {
00691             computeFreeEnergyOfHidden(*hidden,*energy);
00692         }
00693         else
00694         {
00695             PLERROR("KLp0p1RBMModule: unknown configuration to compute energy (currently\n"
00696                     "only possible if at least visible or hidden are provided).\n");
00697         }
00698         found_a_valid_configuration = true;
00699     }
00700     // COMPUTE NLL
00701     if (neg_log_likelihood && neg_log_likelihood->isEmpty() && compute_log_likelihood)
00702     {
00703         if (partition_function_is_stale && !during_training)
00704         {
00705             PLASSERT_MSG(hidden_layer->size<32 || visible_layer->size<32,
00706                          "To compute exact log-likelihood of an RBM, hidden_layer->size "
00707                          "or visible_layer->size must be <32");
00708             // recompute partition function
00709             if (hidden_layer->size > visible_layer->size)
00710                 // do it by log-summing minus-free-energy of visible configurations
00711             {
00712                 PLASSERT(visible_layer->classname()=="RBMBinomialLayer");
00713                 // assuming a binary input we sum over all bit configurations
00714                 int n_configurations = 1 << visible_layer->size; // = 2^{visible_layer->size}
00715                 energy_inputs.resize(1, visible_layer->size);
00716                 Vec input = energy_inputs(0);
00717                 // COULD BE DONE MORE EFFICIENTLY BY DOING MANY CONFIGURATIONS
00718                 // AT ONCE IN A 'MINIBATCH'
00719                 Mat free_energy(1, 1);
00720                 log_partition_function = 0;
00721                 for (int c=0;c<n_configurations;c++)
00722                 {
00723                     // convert integer c into a bit-wise visible representation
00724                     int x=c;
00725                     for (int i=0;i<visible_layer->size;i++)
00726                     {
00727                         input[i]= x & 1; // take least significant bit
00728                         x >>= 1; // and shift right (divide by 2)
00729                     }
00730                     computeFreeEnergyOfVisible(energy_inputs,free_energy,false);
00731                     if (c==0)
00732                         log_partition_function = -free_energy(0,0);
00733                     else
00734                         log_partition_function = logadd(log_partition_function,-free_energy(0,0));
00735                 }
00736             }
00737             else
00738                 // do it by summing free-energy of hidden configurations
00739             {
00740                 PLASSERT(hidden_layer->classname()=="RBMBinomialLayer");
00741                 // assuming a binary hidden we sum over all bit configurations
00742                 int n_configurations = 1 << hidden_layer->size; // = 2^{hidden_layer->size}
00743                 energy_inputs.resize(1, hidden_layer->size);
00744                 Vec input = energy_inputs(0);
00745                 // COULD BE DONE MORE EFFICIENTLY BY DOING MANY CONFIGURATIONS
00746                 // AT ONCE IN A 'MINIBATCH'
00747                 Mat free_energy(1,1);
00748                 log_partition_function = 0;
00749                 for (int c=0;c<n_configurations;c++)
00750                 {
00751                     // convert integer c into a bit-wise hidden representation
00752                     int x=c;
00753                     for (int i=0;i<hidden_layer->size;i++)
00754                     {
00755                         input[i]= x & 1; // take least significant bit
00756                         x >>= 1; // and shift right (divide by 2)
00757                     }
00758                     computeFreeEnergyOfHidden(energy_inputs, free_energy);
00759                     if (c==0)
00760                         log_partition_function = -free_energy(0,0);
00761                     else
00762                         log_partition_function = logadd(log_partition_function,-free_energy(0,0));
00763                 }
00764             }
00765             partition_function_is_stale=false;
00766         }
00767         if (visible && !visible->isEmpty()
00768             && hidden && !hidden->isEmpty())
00769         {
00770             // neg-log-likelihood(visible,hidden) = energy(visible,visible) + log(partition_function)
00771             computeEnergy(*visible,*hidden,*neg_log_likelihood);
00772             *neg_log_likelihood += log_partition_function;
00773         }
00774         else if (visible && !visible->isEmpty())
00775         {
00776             // neg-log-likelihood(visible) = free_energy(visible) + log(partition_function)
00777             computeFreeEnergyOfVisible(*visible,*neg_log_likelihood);
00778             *neg_log_likelihood += log_partition_function;
00779         }
00780         else if (hidden && !hidden->isEmpty())
00781         {
00782             // neg-log-likelihood(hidden) = free_energy(hidden) + log(partition_function)
00783             computeFreeEnergyOfHidden(*hidden,*neg_log_likelihood);
00784             *neg_log_likelihood += log_partition_function;
00785         }
00786         else PLERROR("RBMModule: neg_log_likelihood currently computable only of the visible as inputs");
00787     }
00788 
00789     // REGULAR FPROP
00790     // we are given the visible units and we want to compute the hidden
00791     // activation and/or the hidden expectation
00792     if ( visible && !visible->isEmpty() &&
00793          ((hidden && hidden->isEmpty() ) ||
00794           (hidden_act && hidden_act->isEmpty())) )
00795     {
00796         computePositivePhaseHiddenActivations(*visible);
00797         if (hidden) {
00798             PLASSERT( hidden->isEmpty() );
00799             PLCHECK_MSG( !hidden_layer->expectations_are_up_to_date, "Safety "
00800                     "check: how were expectations computed previously?" );
00801             hidden_layer->computeExpectations();
00802             hidden_expectations_are_computed=true;
00803             const Mat& hidden_out = hidden_layer->getExpectations();
00804             hidden->resize(hidden_out.length(), hidden_out.width());
00805             *hidden << hidden_out;
00806         }
00807         // Since we return below, the other ports must be unused.
00808         //PLASSERT( !visible_sample && !hidden_sample );
00809         found_a_valid_configuration = true;
00810     }
00811 
00812     // COMPUTE AUTOASSOCIATOR RECONSTRUCTION ERROR
00813     if ( visible && !visible->isEmpty() &&
00814          ( ( visible_reconstruction && visible_reconstruction->isEmpty() ) ||
00815            ( visible_reconstruction_activations &&
00816              visible_reconstruction_activations->isEmpty() ) ||
00817            ( reconstruction_error && reconstruction_error->isEmpty() ) ) )
00818     {
00819         // Autoassociator reconstruction cost
00820         PLASSERT( ports_value.length() == nPorts() );
00821 
00822         // if hidden is provided, condition on it rather than
00823         // use the P(h|visible) as hidden.
00824         Mat h;
00825         if (hidden && !hidden->isEmpty())
00826             h = *hidden;
00827         else {
00828             if(!hidden_expectations_are_computed)
00829             {
00830                 computePositivePhaseHiddenActivations(*visible);
00831                 hidden_layer->computeExpectations();
00832                 hidden_expectations_are_computed=true;
00833             }
00834             h = hidden_layer->getExpectations();
00835         }
00836 
00837         // Don't need to verify if they are asked in a port, this was done previously
00838 
00839         computeVisibleActivations(h, true);
00840         if(visible_reconstruction_activations)
00841         {
00842             PLASSERT( visible_reconstruction_activations->isEmpty() );
00843             const Mat& to_store = visible_layer->activations;
00844             visible_reconstruction_activations->resize(to_store.length(),
00845                                                        to_store.width());
00846             *visible_reconstruction_activations << to_store;
00847         }
00848         if (visible_reconstruction || reconstruction_error)
00849         {
00850             visible_layer->computeExpectations();
00851             if(visible_reconstruction)
00852             {
00853                 PLASSERT( visible_reconstruction->isEmpty() );
00854                 const Mat& to_store = visible_layer->getExpectations();
00855                 visible_reconstruction->resize(to_store.length(),
00856                                                to_store.width());
00857                 *visible_reconstruction << to_store;
00858             }
00859             if(reconstruction_error)
00860             {
00861                 PLASSERT( reconstruction_error->isEmpty() );
00862                 reconstruction_error->resize(visible->length(),1);
00863                 visible_layer->fpropNLL(*visible,
00864                                         *reconstruction_error);
00865             }
00866         }
00867         found_a_valid_configuration = true;
00868     }
00869     // COMPUTE VISIBLE GIVEN HIDDEN
00870     else if ( visible_reconstruction && visible_reconstruction->isEmpty()
00871          && hidden && !hidden->isEmpty())
00872     {
00873         // Don't need to verify if they are asked in a port, this was done previously
00874         computeVisibleActivations(*hidden,true);
00875         if(visible_reconstruction_activations)
00876         {
00877             PLASSERT( visible_reconstruction_activations->isEmpty() );
00878             const Mat& to_store = visible_layer->activations;
00879             visible_reconstruction_activations->resize(to_store.length(),
00880                                                        to_store.width());
00881             *visible_reconstruction_activations << to_store;
00882         }
00883         visible_layer->computeExpectations();
00884         PLASSERT( visible_reconstruction->isEmpty() );
00885         const Mat& to_store = visible_layer->getExpectations();
00886         visible_reconstruction->resize(to_store.length(),
00887                                        to_store.width());
00888         *visible_reconstruction << to_store;
00889         found_a_valid_configuration = true;
00890     }
00891 
00892     // compute KLp0p1 cost, given visible input
00893     if (KLp0p1 && KLp0p1->isEmpty() && visible && !visible->isEmpty())
00894     {
00895         int mbs=visible->length();
00896         KLp0p1->resize(mbs,1);
00897         KLp0p1->clear();
00898 #if 0
00899         if (!training_set) {
00900             training_set = new AutoVMatrix("/u/delallea/tmp/kl/data.amat");
00901         } else
00902 #else
00903             PLASSERT_MSG(training_set,"KLp0p1RBMModule: training_set must be provided");
00904 #endif
00905         int n=training_set.length();
00906         PLASSERT_MSG(n>0,"KLp0p1RBMModule: training_set must have n>0 rows");
00907 
00908         // compute all P(hidden_i=1|x^k) for all x^k in training set
00909         hidden_layer->setBatchSize(n);
00910         visible_layer->setBatchSize(n);
00911         const Mat& ha=hidden_layer->activations;
00912         const Mat& X=visible_layer->getExpectations();
00913         training_set->getMat(0,0,X);
00914         PP<RBMMatrixConnection> matrix_connection = NULL;
00915 #if 0
00916         if (weights) {
00917             matrix_connection = PP<RBMMatrixConnection>(connection);
00918             matrix_connection->weights << (*weights)(0);
00919             pout << "Weights: " << endl << matrix_connection->weights << endl;
00920         }
00921 #endif
00922         connection->setAsDownInputs(visible_layer->getExpectations());
00923         hidden_layer->getAllActivations(connection,0,true);
00924         hidden_layer->computeExpectations();
00925 
00926         PLASSERT_MSG(hidden_layer->size<32,"To compute KLp0p1 of an RBM, hidden_layer->size must be <32");
00927         PLASSERT(hidden_layer->classname()=="RBMBinomialLayer");
00928         //real logn=safelog((real)n);
00929         // assuming a binary hidden we sum over all bit configurations
00930         int n_configurations = 1 << hidden_layer->size; // = 2^{hidden_layer->size}
00931         // put all h configurations in the hidden_layer->samples
00932         conf_hidden_layer->setBatchSize(n_configurations);
00933         conf_visible_layer->setBatchSize(n_configurations);
00934         for (int c=0;c<n_configurations;c++)
00935         {
00936             // convert integer c into a bit-wise hidden representation
00937             int N=c;
00938             for (int i=0;i<hidden_layer->size;i++)
00939             {
00940                 conf_hidden_layer->samples(c,i)= N & 1; // take least significant bit
00941                 N >>= 1; // and shift right (divide by 2)
00942             }
00943         }
00944         // compute all P(visible_i=1|h) for each h configuration
00945         connection->setAsUpInputs(conf_hidden_layer->samples);
00946         conf_visible_layer->getAllActivations(connection,0,true);
00947 
00948         //Vec check_sum_to_one(n);
00949 
00950         for (int c=0;c<n_configurations;c++)
00951         {
00952             // KL(p0|p1) = sum_t (1/n) log ((1/n) / p1(x_t)) = (1/n) sum_t C(x_t)
00953             //  p1(x) = sum_k (1/n) sum_h P(x|h) P(h|x_k)
00954             //  C(x) =  -log p1(x) - log n
00955             //       =  log n - log sum_{k=1}^n sum_h P(x|h) P(h|x^k)  - log n
00956             //       =  - log sum_h P(x|h) sum_k P(h|x^k)
00957 
00958             real log_sum_ph_given_xk = 0;
00959             Vec h = conf_hidden_layer->samples(c);
00960             for (int k=0;k<n;k++)
00961             {
00962                 real lp=0;
00963                 for (int i=0;i<hidden_layer->size;i++)
00964                 {
00965                     real act=ha(k,i);
00966                     // note that log sigmoid(act) = -softplus(-act)
00967                     // and       log(1 - sigmoid(act)) = -act -softplus(-act)
00968                     // and  h log(sigm(act))+(1-h)log(1-sigm(act)) = act*h-softplus(act)
00969                     lp += h[i]*act-softplus(act); 
00970                 }
00971 #if 0
00972                 if (c==0)
00973                     check_sum_to_one[k]=lp;
00974                 else
00975                     check_sum_to_one[k]=logadd(check_sum_to_one[k],lp);
00976 #endif
00977                 // now lp = log P(h|x^k)
00978                 if (k==0)
00979                     log_sum_ph_given_xk = lp;
00980                 else
00981                     log_sum_ph_given_xk = logadd(log_sum_ph_given_xk,lp);
00982             }
00983             // now log_sum_ph_given_xk = log sum_k P(h|x^k)
00984             conf_visible_layer->activation << conf_visible_layer->activations(c);
00985             //real log_sum_p_xt = 0;
00986             for (int t=0;t<mbs;t++)
00987             {
00988                 real log_p_xt = -conf_visible_layer->fpropNLL((*visible)(t));
00989                 //if (t==0) // check if sum_xt p(xt|h) = 1 (when testing with the full set of possible inputs)
00990                 //    log_sum_p_xt = log_p_xt;
00991                 //else
00992                 //    log_sum_p_xt = logadd(log_sum_p_xt,log_p_xt);
00993                 if (c==0) // at this point we accumulate log sum_h P(x_t|h) sum_k P(h|x_k) in KLp0p1
00994                     (*KLp0p1)(t,0) = log_p_xt + log_sum_ph_given_xk;
00995                 else {
00996                     (*KLp0p1)(t,0) = logadd((*KLp0p1)(t,0), log_p_xt + log_sum_ph_given_xk);
00997                     //if ((*KLp0p1)(t,0) > 0)
00998                     // PLWARNING("KLp0p1: training example %d is getting mass > 1/n! KL=%g after getting to configuration %d",t,(double)(*KLp0p1)(t,0),c);
00999                 }
01000             }
01001             //if (!during_training)
01002             //    cout << "sum_t(p(x_t|h)) = " << exp(log_sum_p_xt) << endl;
01003         }
01004 #if 0 
01005         for (int k=0;k<n;k++)
01006         {
01007             real p_k=exp(check_sum_to_one[k]);
01008             if (fabs(p_k-1)>1e-6)
01009                 PLWARNING("Probabilities that do not sum to 1!");
01010         }
01011 #endif
01012         *KLp0p1 *= -1;
01013 #if 0 
01014         if (!during_training)
01015         {
01016             real sum_pxt=0;
01017             for (int t=0;t<mbs;t++)
01018             {
01019                 sum_pxt += exp(-(*KLp0p1)(t,0) -logn);
01020                 if ((*KLp0p1)(t,0) < 0)
01021                     PLWARNING("KLp0p1: training example %d is getting mass = %g > 1/n!",t,(double)exp(-(*KLp0p1)(t,0)-logn));
01022             }
01023             cout << "sum_t p1(x_t) = " << sum_pxt << endl;
01024         }
01025 #endif
01026         hidden_layer->setBatchSize(mbs);
01027         visible_layer->setBatchSize(mbs);
01028     }
01029 
01030     // SAMPLING
01031     if ((visible_sample && visible_sample->isEmpty())               // is asked to sample visible units (discrete)
01032         || (visible_expectation && visible_expectation->isEmpty())  //              "                   (continous)
01033         || (hidden_sample && hidden_sample->isEmpty()))             // or to sample hidden units
01034     {
01035         if (hidden_sample && !hidden_sample->isEmpty()) // sample visible conditionally on hidden
01036         {
01037             sampleVisibleGivenHidden(*hidden_sample);
01038             Gibbs_step=0;
01039             //cout << "sampling visible from hidden" << endl;
01040         }
01041         else if (visible_sample && !visible_sample->isEmpty()) // if an input is provided, sample hidden conditionally
01042         {
01043             sampleHiddenGivenVisible(*visible_sample);
01044             hidden_activations_are_computed = false;
01045             Gibbs_step=0;
01046             //cout << "sampling hidden from visible" << endl;
01047         }
01048         else if (visible_expectation && !visible_expectation->isEmpty())
01049         {
01050              PLERROR("In KLp0p1RBMModule::fprop visible_expectation can only be an output port (use visible as input port");
01051         }
01052         else // sample unconditionally: Gibbs sample after k steps
01053         {
01054             // the visible_layer->expectations contain the "state" from which we
01055             // start or continue the chain
01056             if (visible_layer->samples.isEmpty())
01057             {
01058                 if (visible && !visible->isEmpty())
01059                     visible_layer->samples << *visible;
01060                 else if (!visible_layer->getExpectations().isEmpty())
01061                     visible_layer->samples << visible_layer->getExpectations();
01062                 else if (!hidden_layer->samples.isEmpty())
01063                     sampleVisibleGivenHidden(hidden_layer->samples);    
01064                 else if (!hidden_layer->getExpectations().isEmpty())
01065                     sampleVisibleGivenHidden(hidden_layer->getExpectations());    
01066             }
01067             int min_n = max(Gibbs_step+n_Gibbs_steps_per_generated_sample,
01068                             min_n_Gibbs_steps);
01069             //cout << "Gibbs sampling " << Gibbs_step+1;
01070             for (;Gibbs_step<min_n;Gibbs_step++)
01071             {
01072                 sampleHiddenGivenVisible(visible_layer->samples);
01073                 sampleVisibleGivenHidden(hidden_layer->samples);
01074             }
01075             hidden_activations_are_computed = false;
01076             //cout << " -> " << Gibbs_step << endl;
01077         }
01078 
01079         if ( hidden && hidden->isEmpty())   // fill hidden.state with expectations
01080         {
01081               const Mat& hidden_expect = hidden_layer->getExpectations();
01082               hidden->resize(hidden_expect.length(), hidden_expect.width());
01083               *hidden << hidden_expect;
01084         }
01085         if (visible_sample && visible_sample->isEmpty()) // provide sample of the visible units
01086         {
01087             visible_sample->resize(visible_layer->samples.length(),
01088                                    visible_layer->samples.width());
01089             *visible_sample << visible_layer->samples;
01090         }
01091         if (hidden_sample && hidden_sample->isEmpty()) // provide sample of the hidden units
01092         {
01093             hidden_sample->resize(hidden_layer->samples.length(),
01094                                   hidden_layer->samples.width());
01095             *hidden_sample << hidden_layer->samples;
01096         }
01097         if (visible_expectation && visible_expectation->isEmpty()) // provide expectation of the visible units
01098         {
01099             const Mat& to_store = visible_layer->getExpectations();
01100             visible_expectation->resize(to_store.length(),
01101                                         to_store.width());
01102             *visible_expectation << to_store;
01103         }
01104         if (hidden && hidden->isEmpty())
01105         {
01106             hidden->resize(hidden_layer->samples.length(),
01107                            hidden_layer->samples.width());
01108             *hidden << hidden_layer->samples;
01109         }
01110         if (hidden_act && hidden_act->isEmpty())
01111         {
01112             hidden_act->resize(hidden_layer->samples.length(),
01113                                hidden_layer->samples.width());
01114             *hidden_act << hidden_layer->getExpectations();
01115         }
01116         found_a_valid_configuration = true;
01117     }// END SAMPLING
01118 
01119     // COMPUTE CONTRASTIVE DIVERGENCE CRITERION
01120     if (contrastive_divergence)
01121     {
01122         PLASSERT_MSG( contrastive_divergence->isEmpty(), 
01123                       "KLp0p1RBMModule: the contrastive_divergence port can only be an output port\n" );
01124         if (visible && !visible->isEmpty())
01125         {
01126             int mbs = visible->length();
01127             const Mat& hidden_expectations = hidden_layer->getExpectations();
01128             Mat* h=0;
01129             Mat* h_act=0;
01130             if (!hidden_activations_are_computed) // it must be because neither hidden nor hidden_act were asked
01131             {
01132                 PLASSERT(!hidden_act);
01133                 computePositivePhaseHiddenActivations(*visible);
01134 
01135                 // we need to save the hidden activations somewhere
01136                 hidden_act_store.resize(mbs,hidden_layer->size);
01137                 hidden_act_store << hidden_layer->activations;
01138                 h_act = &hidden_act_store;
01139             }
01140             else
01141             {
01142                 // hidden_act must have been computed above if they were requested on port
01143                 PLASSERT(hidden_act && !hidden_act->isEmpty());
01144                 h_act = hidden_act;
01145             }
01146             if (!hidden_expectations_are_computed) // it must be because hidden outputs were not asked
01147             {
01148                 PLASSERT(!hidden);
01149                 hidden_layer->computeExpectations();
01150                 hidden_expectations_are_computed=true;
01151                 // we need to save the hidden expectations somewhere
01152                 hidden_exp_store.resize(mbs,hidden_layer->size);
01153                 hidden_exp_store << hidden_expectations;
01154                 h = &hidden_exp_store;
01155             }
01156             else
01157             {
01158                 // hidden exp. must have been computed above if they were requested on port
01159                 PLASSERT(hidden && !hidden->isEmpty());
01160                 h = hidden;
01161             }
01162             // perform negative phase
01163             for( int i=0; i<n_Gibbs_steps_CD; i++)
01164             {
01165                 hidden_layer->generateSamples();
01166                 // (Negative phase) Generate visible samples.
01167                 sampleVisibleGivenHidden(hidden_layer->samples);
01168                 // compute corresponding hidden expectations.
01169                 computeHiddenActivations(visible_layer->samples);
01170                 hidden_activations_are_computed = false;
01171                 hidden_layer->computeExpectations();
01172             }
01173             PLASSERT(negative_phase_visible_samples);
01174             PLASSERT(negative_phase_hidden_expectations &&
01175                      negative_phase_hidden_expectations->isEmpty());
01176             PLASSERT(negative_phase_hidden_activations &&
01177                      negative_phase_hidden_activations->isEmpty());
01178             negative_phase_visible_samples->resize(mbs,visible_layer->size);
01179             *negative_phase_visible_samples << visible_layer->samples;
01180             negative_phase_hidden_expectations->resize(hidden_expectations.length(),
01181                                                        hidden_expectations.width());
01182             *negative_phase_hidden_expectations << hidden_expectations;
01183             const Mat& neg_hidden_act = hidden_layer->activations;
01184             negative_phase_hidden_activations->resize(neg_hidden_act.length(),
01185                                                       neg_hidden_act.width());
01186             *negative_phase_hidden_activations << neg_hidden_act;
01187 
01188             // compute the energy (again for now only in the binomial case)
01189             PLASSERT(hidden_layer->classname()=="RBMBinomialLayer");
01190 
01191             // note that h_act and h may point to hidden_act_store and hidden_exp_store
01192             PLASSERT(h_act && !h_act->isEmpty());
01193             PLASSERT(h && !h->isEmpty());
01194 
01195             contrastive_divergence->resize(hidden_expectations.length(),1);
01196             // compute contrastive divergence itself
01197             for (int i=0;i<mbs;i++)
01198             {
01199                 (*contrastive_divergence)(i,0) =
01200                     // positive phase energy
01201                     visible_layer->energy((*visible)(i))
01202                     - dot((*h)(i),(*h_act)(i))
01203                     // minus
01204                     -
01205                     // negative phase energy
01206                     (visible_layer->energy(visible_layer->samples(i))
01207                      - dot(hidden_expectations(i),hidden_layer->activations(i)));
01208             }
01209         }
01210         else
01211             PLERROR("KLp0p1RBMModule: unknown configuration to compute contrastive_divergence (currently\n"
01212                     "only possible if only visible are provided in input).\n");
01213         found_a_valid_configuration = true;
01214     }
01215 
01216 
01217     // Reset some class fields to ensure they are not reused by mistake.
01218     hidden_act = NULL;
01219     hidden_bias = NULL;
01220     weights = NULL;
01221     hidden_activations_are_computed = false;
01222 
01223 
01224 
01225     if (!found_a_valid_configuration)
01226     {
01227         /*
01228         if (visible)
01229             cout << "visible_empty : "<< (bool) visible->isEmpty() << endl;
01230         if (hidden)
01231             cout << "hidden_empty : "<< (bool) hidden->isEmpty() << endl;
01232         if (visible_sample)
01233             cout << "visible_sample_empty : "<< (bool) visible_sample->isEmpty() << endl;
01234         if (hidden_sample)
01235             cout << "hidden_sample_empty : "<< (bool) hidden_sample->isEmpty() << endl;
01236         if (visible_expectation)
01237             cout << "visible_expectation_empty : "<< (bool) visible_expectation->isEmpty() << endl;
01238 
01239         */
01240         PLERROR("In RBMModule::fprop - Unknown port configuration for module %s", name.c_str());
01241     }
01242 
01243     checkProp(ports_value);
01244 
01245 }
01246 
01248 // bpropAccUpdate //
01250 void KLp0p1RBMModule::bpropAccUpdate(const TVec<Mat*>& ports_value,
01251                                const TVec<Mat*>& ports_gradient)
01252 {
01253     PLASSERT( ports_value.length() == nPorts() );
01254     PLASSERT( ports_gradient.length() == nPorts() );
01255     Mat* visible_grad = ports_gradient[getPortIndex("visible")];
01256     Mat* hidden_grad = ports_gradient[getPortIndex("hidden.state")];
01257     Mat* visible = ports_value[getPortIndex("visible")];
01258     Mat* hidden = ports_value[getPortIndex("hidden.state")];
01259     hidden_act = ports_value[getPortIndex("hidden_activations.state")];
01260     Mat* reconstruction_error_grad = 0;
01261     Mat* hidden_bias_grad = ports_gradient[getPortIndex("hidden_bias")];
01262     weights = ports_value[getPortIndex("weights")];
01263     Mat* weights_grad = ports_gradient[getPortIndex("weights")];
01264     hidden_bias = ports_value[getPortIndex("hidden_bias")];
01265     Mat* contrastive_divergence_grad = NULL;
01266     Mat* KLp0p1 = ports_value[getPortIndex("KLp0p1")];
01267 
01268     // Ensure the gradient w.r.t. contrastive divergence is 1 (if provided).
01269     if (compute_contrastive_divergence) {
01270         contrastive_divergence_grad =
01271             ports_gradient[getPortIndex("contrastive_divergence")];
01272         if (contrastive_divergence_grad) {
01273             PLASSERT( !contrastive_divergence_grad->isEmpty() );
01274             PLASSERT( min(*contrastive_divergence_grad) >= 1 );
01275             PLASSERT( max(*contrastive_divergence_grad) <= 1 );
01276         }
01277     }
01278 
01279     if(reconstruction_connection)
01280         reconstruction_error_grad =
01281             ports_gradient[getPortIndex("reconstruction_error.state")];
01282 
01283     // Ensure the visible gradient is not provided as input. This is because we
01284     // accumulate more than once in 'visible_grad'.
01285     PLASSERT_MSG( !visible_grad || visible_grad->isEmpty(), "Cannot provide "
01286             "an input gradient w.r.t. visible units" );
01287 
01288     bool compute_visible_grad = visible_grad && visible_grad->isEmpty();
01289     bool compute_weights_grad = weights_grad && weights_grad->isEmpty();
01290 
01291     int mbs = (visible && !visible->isEmpty()) ? visible->length() : -1;
01292 
01293     if (hidden_grad && !hidden_grad->isEmpty())
01294     {
01295         // Note: the assert below is for behavior compatibility with previous
01296         // code. It might not be necessary, or might need to be modified.
01297         PLASSERT( visible && !visible->isEmpty() );
01298 
01299         // Note: we need to perform the following steps even if the gradient
01300         // learning rate is equal to 0. This is because we must propagate the
01301         // gradient to the visible layer, even though no update is required.
01302         setAllLearningRates(grad_learning_rate);
01303         PLASSERT( hidden && hidden_act );
01304         // Compute gradient w.r.t. activations of the hidden layer.
01305         hidden_layer->bpropUpdate(
01306                 *hidden_act, *hidden, hidden_act_grad, *hidden_grad,
01307                 false);
01308         if (hidden_bias_grad)
01309         {
01310             PLASSERT( hidden_bias_grad->isEmpty() &&
01311                       hidden_bias_grad->width() == hidden_layer->size );
01312             hidden_bias_grad->resize(mbs,hidden_layer->size);
01313             *hidden_bias_grad += hidden_act_grad;
01314         }
01315         // Compute gradient w.r.t. expectations of the visible layer (=
01316         // inputs).
01317         Mat* store_visible_grad = NULL;
01318         if (compute_visible_grad) {
01319             PLASSERT( visible_grad->width() == visible_layer->size );
01320             store_visible_grad = visible_grad;
01321         } else {
01322             // We do not actually need to store the gradient, but since it
01323             // is required in bpropUpdate, we provide a dummy matrix to
01324             // store it.
01325             store_visible_grad = &visible_exp_grad;
01326         }
01327         store_visible_grad->resize(mbs,visible_layer->size);
01328 
01329         if (weights)
01330         {
01331             int up = connection->up_size;
01332             int down = connection->down_size;
01333             PLASSERT( !weights->isEmpty() &&
01334                       weights_grad && weights_grad->isEmpty() &&
01335                       weights_grad->width() == up * down );
01336             weights_grad->resize(mbs, up * down);
01337             Mat w, wg;
01338             Vec v,h,vg,hg;
01339             for(int i=0; i<mbs; i++)
01340             {
01341                 w = Mat(up, down,(*weights)(i));
01342                 wg = Mat(up, down,(*weights_grad)(i));
01343                 v = (*visible)(i);
01344                 h = (*hidden_act)(i);
01345                 vg = (*store_visible_grad)(i);
01346                 hg = hidden_act_grad(i);
01347                 connection->petiteCulotteOlivierUpdate(
01348                     v,
01349                     w,
01350                     h,
01351                     vg,
01352                     wg,
01353                     hg,true);
01354             }
01355         }
01356         else
01357         {
01358             connection->bpropUpdate(
01359                 *visible, *hidden_act, *store_visible_grad,
01360                 hidden_act_grad, true);
01361         }
01362         partition_function_is_stale = true;
01363     }
01364 
01365     if (cd_learning_rate > 0 && minimize_log_likelihood) {
01366         PLASSERT( visible && !visible->isEmpty() );
01367         PLASSERT( hidden && !hidden->isEmpty() );
01368         setAllLearningRates(cd_learning_rate);
01369 
01370         // positive phase
01371         visible_layer->accumulatePosStats(*visible);
01372         hidden_layer->accumulatePosStats(*hidden);
01373         connection->accumulatePosStats(*visible,*hidden);
01374 
01375         // negative phase
01376         PLASSERT_MSG(hidden_layer->size<32 || visible_layer->size<32,
01377                      "To minimize exact log-likelihood of an RBM, hidden_layer->size "
01378                      "or visible_layer->size must be <32");
01379         // gradient of partition function
01380         if (hidden_layer->size > visible_layer->size)
01381             // do it by summing over visible configurations
01382         {
01383             PLASSERT(visible_layer->classname()=="RBMBinomialLayer");
01384             // assuming a binary input we sum over all bit configurations
01385             int n_configurations = 1 << visible_layer->size; // = 2^{visible_layer->size}
01386             energy_inputs.resize(1, visible_layer->size);
01387             Vec input = energy_inputs(0);
01388             // COULD BE DONE MORE EFFICIENTLY BY DOING MANY CONFIGURATIONS
01389             // AT ONCE IN A 'MINIBATCH'
01390             for (int c=0;c<n_configurations;c++)
01391             {
01392                 // convert integer c into a bit-wise visible representation
01393                 int x=c;
01394                 for (int i=0;i<visible_layer->size;i++)
01395                 {
01396                     input[i]= x & 1; // take least significant bit
01397                     x >>= 1; // and shift right (divide by 2)
01398                 }
01399                 connection->setAsDownInput(input);
01400                 hidden_layer->getAllActivations(connection,0,false);
01401                 hidden_layer->computeExpectation();
01402                 visible_layer->accumulateNegStats(input);
01403                 hidden_layer->accumulateNegStats(hidden_layer->expectation);
01404                 connection->accumulateNegStats(input,hidden_layer->expectation);
01405             }
01406         }
01407         else
01408         {
01409             PLASSERT(hidden_layer->classname()=="RBMBinomialLayer");
01410             // assuming a binary hidden we sum over all bit configurations
01411             int n_configurations = 1 << hidden_layer->size; // = 2^{hidden_layer->size}
01412             energy_inputs.resize(1, hidden_layer->size);
01413             Vec h = energy_inputs(0);
01414             for (int c=0;c<n_configurations;c++)
01415             {
01416                 // convert integer c into a bit-wise hidden representation
01417                 int x=c;
01418                 for (int i=0;i<hidden_layer->size;i++)
01419                 {
01420                     h[i]= x & 1; // take least significant bit
01421                     x >>= 1; // and shift right (divide by 2)
01422                 }
01423                 connection->setAsUpInput(h);
01424                 visible_layer->getAllActivations(connection,0,false);
01425                 visible_layer->computeExpectation();
01426                 visible_layer->accumulateNegStats(visible_layer->expectation);
01427                 hidden_layer->accumulateNegStats(h);
01428                 connection->accumulateNegStats(visible_layer->expectation,h);
01429             }
01430         }
01431         // update
01432         visible_layer->update();
01433         hidden_layer->update();
01434         connection->update();
01435     }
01436     if (cd_learning_rate > 0 && !minimize_log_likelihood) {
01437         EXTREME_MODULE_LOG << "Performing contrastive divergence step in RBM '"
01438                            << name << "'" << endl;
01439         // Perform a step of contrastive divergence.
01440         PLASSERT( visible && !visible->isEmpty() );
01441         setAllLearningRates(cd_learning_rate);
01442         Mat* negative_phase_visible_samples =
01443             compute_contrastive_divergence?ports_value[getPortIndex("negative_phase_visible_samples.state")]:0;
01444         const Mat* negative_phase_hidden_expectations =
01445             compute_contrastive_divergence ?
01446                 ports_value[getPortIndex("negative_phase_hidden_expectations.state")]
01447                 : NULL;
01448         Mat* negative_phase_hidden_activations =
01449             compute_contrastive_divergence ?
01450                 ports_value[getPortIndex("negative_phase_hidden_activations.state")]
01451                 : NULL;
01452 
01453         PLASSERT( visible && hidden );
01454         PLASSERT( !negative_phase_visible_samples ||
01455                   !negative_phase_visible_samples->isEmpty() );
01456         if (!negative_phase_visible_samples)
01457         {
01458             // Generate hidden samples.
01459             hidden_layer->setExpectations(*hidden);
01460             for( int i=0; i<n_Gibbs_steps_CD; i++)
01461             {
01462                 hidden_layer->generateSamples();
01463                 // (Negative phase) Generate visible samples.
01464                 sampleVisibleGivenHidden(hidden_layer->samples);
01465                 // compute corresponding hidden expectations.
01466                 computeHiddenActivations(visible_layer->samples);
01467                 hidden_layer->computeExpectations();
01468             }
01469             PLASSERT( !compute_contrastive_divergence );
01470             PLASSERT( !negative_phase_hidden_expectations );
01471             PLASSERT( !negative_phase_hidden_activations );
01472             negative_phase_hidden_expectations = &(hidden_layer->getExpectations());
01473             negative_phase_visible_samples = &(visible_layer->samples);
01474             negative_phase_hidden_activations = &(hidden_layer->activations);
01475         }
01476         PLASSERT( negative_phase_hidden_expectations &&
01477                   !negative_phase_hidden_expectations->isEmpty() );
01478         PLASSERT( negative_phase_hidden_activations &&
01479                   !negative_phase_hidden_activations->isEmpty() );
01480 
01481         // Perform update.
01482         visible_layer->update(*visible, *negative_phase_visible_samples);
01483 
01484         bool connection_update_is_done = false;
01485         if (compute_weights_grad) {
01486             // First resize the 'weights_grad' matrix.
01487             int up = connection->up_size;
01488             int down = connection->down_size;
01489             PLASSERT( weights && !weights->isEmpty() &&
01490                       weights_grad->width() == up * down );
01491             weights_grad->resize(mbs, up * down);
01492 
01493             if (standard_cd_weights_grad)
01494             {
01495                 // Perform both computation of weights gradient and do update
01496                 // at the same time.
01497                 Mat wg;
01498                 Vec vp, hp, vn, hn;
01499                 for(int i=0; i<mbs; i++)
01500                 {
01501                     vp = (*visible)(i);
01502                     hp = (*hidden)(i);
01503                     vn = (*negative_phase_visible_samples)(i);
01504                     hn = (*negative_phase_hidden_expectations)(i);
01505                     wg = Mat(up, down,(*weights_grad)(i));
01506                     connection->petiteCulotteOlivierCD(
01507                             vp, hp,
01508                             vn,
01509                             hn,
01510                             wg,
01511                             true);
01512                     connection_update_is_done = true;
01513                 }
01514             }
01515         }
01516         if (!standard_cd_weights_grad || !standard_cd_grad) {
01517             // Compute 'true' gradient of contrastive divergence w.r.t.
01518             // the weights matrix.
01519             int up = connection->up_size;
01520             int down = connection->down_size;
01521             Mat* weights_g = weights_grad;
01522             if (!weights_g) {
01523                 // We need to store the gradient in another matrix.
01524                 store_weights_grad.resize(mbs, up * down);
01525                 store_weights_grad.clear();
01526                 weights_g = & store_weights_grad;
01527             }
01528             PLASSERT( connection->classname() == "RBMMatrixConnection" &&
01529                       visible_layer->classname() == "RBMBinomialLayer" &&
01530                       hidden_layer->classname() == "RBMBinomialLayer" );
01531 
01532             for (int k = 0; k < mbs; k++) {
01533                 int idx = 0;
01534                 for (int i = 0; i < up; i++) {
01535                     real p_i_p = (*hidden)(k, i);
01536                     real a_i_p = (*hidden_act)(k, i);
01537                     real p_i_n =
01538                         (*negative_phase_hidden_expectations)(k, i);
01539                     real a_i_n =
01540                         (*negative_phase_hidden_activations)(k, i);
01541 
01542                     real scale_p = 1 + (1 - p_i_p) * a_i_p;
01543                     real scale_n = 1 + (1 - p_i_n) * a_i_n;
01544                     for (int j = 0; j < down; j++, idx++) {
01545                         // Weight 'idx' is the (i,j)-th element in the
01546                         // 'weights' matrix.
01547                         real v_j_p = (*visible)(k, j);
01548                         real v_j_n =
01549                             (*negative_phase_visible_samples)(k, j);
01550                         (*weights_g)(k, idx) +=
01551                             p_i_n * v_j_n * scale_n     // Negative phase.
01552                             -(p_i_p * v_j_p * scale_p); // Positive phase.
01553                     }
01554                 }
01555             }
01556             if (!standard_cd_grad) {
01557                 // Update connection manually.
01558                 Mat& weights = ((RBMMatrixConnection*)
01559                                 get_pointer(connection))->weights;
01560                 real lr = cd_learning_rate / mbs;
01561                 for (int k = 0; k < mbs; k++) {
01562                     int idx = 0;
01563                     for (int i = 0; i < up; i++)
01564                         for (int j = 0; j < down; j++, idx++)
01565                             weights(i, j) -= lr * (*weights_g)(k, idx);
01566                 }
01567                 connection_update_is_done = true;
01568             }
01569         }
01570         if (!connection_update_is_done)
01571             // Perform standard update of the connection.
01572             connection->update(*visible, *hidden,
01573                     *negative_phase_visible_samples,
01574                     *negative_phase_hidden_expectations);
01575 
01576         Mat* hidden_bias_g = hidden_bias_grad;
01577         if (!standard_cd_grad && !hidden_bias_grad) {
01578             // We need to compute the CD gradient w.r.t. bias of hidden layer,
01579             // but there is no bias coming from the outside. Thus we need
01580             // another matrix to store this gradient.
01581             store_hidden_bias_grad.resize(mbs, hidden_layer->size);
01582             store_hidden_bias_grad.clear();
01583             hidden_bias_g = & store_hidden_bias_grad;
01584         }
01585 
01586         if (hidden_bias_g)
01587         {
01588             if (hidden_bias_g->isEmpty()) {
01589                 PLASSERT(hidden_bias_g->width() == hidden_layer->size);
01590                 hidden_bias_g->resize(mbs,hidden_layer->size);
01591             }
01592             PLASSERT_MSG( hidden_layer->classname() == "RBMBinomialLayer" &&
01593                           visible_layer->classname() == "RBMBinomialLayer",
01594                           "Only implemented for binomial layers" );
01595             // d(contrastive_divergence)/dhidden_bias
01596             for (int k = 0; k < hidden_bias_g->length(); k++) {
01597                 for (int i = 0; i < hidden_bias_g->width(); i++) {
01598                     real p_i_p = (*hidden)(k, i);
01599                     real a_i_p = (*hidden_act)(k, i);
01600                     real p_i_n = (*negative_phase_hidden_expectations)(k, i);
01601                     real a_i_n = (*negative_phase_hidden_activations)(k, i);
01602                     (*hidden_bias_g)(k, i) +=
01603                         standard_cd_bias_grad ? p_i_n - p_i_p :
01604                         p_i_n * (1 - p_i_n) * a_i_n + p_i_n     // Neg. phase
01605                      -( p_i_p * (1 - p_i_p) * a_i_p + p_i_p );  // Pos. phase
01606 
01607                 }
01608             }
01609         }
01610 
01611         if (standard_cd_grad) {
01612             hidden_layer->update(*hidden, *negative_phase_hidden_expectations);
01613         } else {
01614             PLASSERT( hidden_layer->classname() == "RBMBinomialLayer" );
01615             // Update hidden layer by hand.
01616             Vec& bias = hidden_layer->bias;
01617             real lr = cd_learning_rate / mbs;
01618             for (int i = 0; i < mbs; i++)
01619                 bias -= lr * (*hidden_bias_g)(i);
01620         }
01621 
01622 
01623         partition_function_is_stale = true;
01624     } else {
01625         PLCHECK_MSG( !contrastive_divergence_grad ||
01626                      (!hidden_bias_grad && !weights_grad),
01627                 "You currently cannot compute the "
01628                 "gradient of contrastive divergence w.r.t. external ports "
01629                 "when 'cd_learning_rate' is set to 0" );
01630     }
01631 
01632     if (reconstruction_error_grad && !reconstruction_error_grad->isEmpty()) {
01633         setAllLearningRates(grad_learning_rate);
01634         PLASSERT( reconstruction_connection != 0 );
01635         // Perform gradient descent on Autoassociator reconstruction cost
01636         Mat* visible_reconstruction = ports_value[getPortIndex("visible_reconstruction.state")];
01637         Mat* visible_reconstruction_activations = ports_value[getPortIndex("visible_reconstruction_activations.state")];
01638         Mat* reconstruction_error = ports_value[getPortIndex("reconstruction_error.state")];
01639         PLASSERT( hidden != 0 );
01640         PLASSERT( visible  && hidden_act &&
01641                   visible_reconstruction && visible_reconstruction_activations &&
01642                   reconstruction_error);
01643         //int mbs = reconstruction_error_grad->length();
01644 
01645         PLCHECK_MSG( !weights, "In KLp0p1RBMModule::bpropAccUpdate(): reconstruction cost "
01646                      "for conditional weights is not implemented");
01647 
01648         // Backprop reconstruction gradient
01649 
01650         // Must change visible_layer's expectation
01651         visible_layer->getExpectations() << *visible_reconstruction;
01652         visible_layer->bpropNLL(*visible,*reconstruction_error,
01653                                 visible_act_grad);
01654 
01655         // Combine with incoming gradient
01656         PLASSERT( (*reconstruction_error_grad).width() == 1 );
01657         for (int t=0;t<mbs;t++)
01658             visible_act_grad(t) *= (*reconstruction_error_grad)(t,0);
01659 
01660         // Visible bias update
01661         columnMean(visible_act_grad, visible_bias_grad);
01662         visible_layer->update(visible_bias_grad);
01663 
01664         // Reconstruction connection update
01665         hidden_exp_grad.resize(mbs, hidden_layer->size);
01666         hidden_exp_grad.clear();
01667         hidden_exp_grad.resize(0, hidden_layer->size);
01668 
01669         TVec<Mat*> rec_ports_value(2);
01670         rec_ports_value[0] = visible_reconstruction_activations;
01671         rec_ports_value[1] = hidden;
01672         TVec<Mat*> rec_ports_gradient(2);
01673         rec_ports_gradient[0] = &visible_act_grad;
01674         rec_ports_gradient[1] = &hidden_exp_grad;
01675 
01676         reconstruction_connection->bpropAccUpdate( rec_ports_value,
01677                                                    rec_ports_gradient );
01678 
01679         // Hidden layer bias update
01680         hidden_layer->bpropUpdate(*hidden_act,
01681                                   *hidden, hidden_act_grad,
01682                                   hidden_exp_grad, false);
01683         if (hidden_bias_grad)
01684         {
01685             if (hidden_bias_grad->isEmpty()) {
01686                 PLASSERT( hidden_bias_grad->width() == hidden_layer->size );
01687                 hidden_bias_grad->resize(mbs,hidden_layer->size);
01688             }
01689             *hidden_bias_grad += hidden_act_grad;
01690         }
01691         // Connection update
01692         if(compute_visible_grad)
01693         {
01694             // The length of 'visible_grad' must be either 0 (if not computed
01695             // previously) or the size of the mini-batches (otherwise).
01696             PLASSERT( visible_grad->width() == visible_layer->size &&
01697                       visible_grad->length() == 0 ||
01698                       visible_grad->length() == mbs );
01699             visible_grad->resize(mbs, visible_grad->width());
01700             connection->bpropUpdate(
01701                 *visible, *hidden_act,
01702                 *visible_grad, hidden_act_grad, true);
01703         }
01704         else
01705         {
01706             visible_exp_grad.resize(mbs,visible_layer->size);
01707             connection->bpropUpdate(
01708                 *visible, *hidden_act,
01709                 visible_exp_grad, hidden_act_grad, true);
01710         }
01711         partition_function_is_stale = true;
01712     }
01713 
01714     // compute gradient of KLp0p1 cost, given visible input
01715     if (klp0p1_learning_rate>0 && visible && !visible->isEmpty())
01716     {
01717         // WE ASSUME THAT THIS BPROP IS CALLED JUST AFTER THE CORRESPONDING FPROP!!!
01718         // consequentely, we have
01719         //   * P(h_i=1|x^k) for each x^k in the training set, in hidden_layer->expectations
01720         //   * every h configuration in conf_hidden_layer->samples
01721         //   * P(visible_j=1|h) for each h configuration, in conf_visible_layer->expectations
01722         //   * x^t for every t in the input visible, in *visible
01723         //   * -log P1(x^t) for each input visible(t) in KLp0p1(t,0)
01724         //
01725         // Since C(x) = - log sum_h P(x|h) sum_k P(h|x^k), dC/dsum = -1/sum = -1/exp(-C)=-exp(C)
01726         // We want to compute
01727         //   dC(x)/dWij = (-exp(C(x)))
01728         //       sum_{k=1}^n sum_h P(x|h) P(h|x^k) (h_i(x_j - P(x_j=1|h)) + x_j^k(h_i - P(h_i=1|x^k)))
01729         //
01730         PLASSERT_MSG(KLp0p1 && !KLp0p1->isEmpty(), "Must compute KLp0p1 in order to compute its gradient, connect that port!");
01731         int mbs=visible->length();
01732         int n=training_set.length();
01733         PLASSERT(connection->classname()=="RBMMatrixConnection");
01734         PP<RBMMatrixConnection> matrix_connection = PP<RBMMatrixConnection>(connection);
01735         hidden_layer->setBatchSize(n);
01736         visible_layer->setBatchSize(n);
01737         Mat& W = /* weights ? *weights :*/ matrix_connection->weights;
01738         Vec& hidden_bias = hidden_layer->bias;
01739         Vec& visible_bias = visible_layer->bias;
01740         const Mat& X=visible_layer->getExpectations();
01741         int n_configurations = 1 << hidden_layer->size; // = 2^{hidden_layer->size}
01742         //real logn=safelog(n);
01743         // we only computed the activations in the fprop
01744         conf_visible_layer->computeExpectations(); 
01745         const Mat& pvisible_given_H = conf_visible_layer->getExpectations();
01746         const Mat& ph_given_X = hidden_layer->getExpectations();
01747         for (int t=0;t<mbs;t++)
01748         {
01749             Vec xt = (*visible)(t);
01750             for (int k=0;k<n;k++)
01751             {
01752                 Vec ah_given_xk = hidden_layer->activations(k);
01753                 Vec ph_given_xk = ph_given_X(k);
01754                 Vec xk = X(k);
01755                 for (int c=0;c<n_configurations;c++)
01756                 {
01757                     Vec h = conf_hidden_layer->samples(c);
01758                     Vec avisible_given_h=conf_visible_layer->activations(c);
01759                     // KLp0p1(x) = -log p1(x) - logn
01760                     real lp = (*KLp0p1)(t,0); // lp = log (exp(C(x^t)))
01761                     // compute and multiply exp(lp) by P(h|x^k)
01762                     for (int i=0;i<hidden_layer->size;i++)
01763                     {
01764                         real act=ah_given_xk[i];
01765                         // note that log sigmoid(act) = -softplus(-act)
01766                         // and       log(1 - sigmoid(act)) = -act -softplus(-act)
01767                         // so h*log(sigmoid(act))+(1-h)*log(sigmoid(act)) = act*h-softplus(act)
01768                         lp += h[i]*act-softplus(act);
01769                     }
01770                     // now lp = log ( exp(C(x^t)) P(h|x^k) )
01771 
01772                     // compute and multiply by P(x^t|h)
01773                     for (int j=0;j<visible_layer->size;j++)
01774                     {
01775                         real act=avisible_given_h[j];
01776                         lp += act*xt[j] - softplus(act);
01777                     }
01778                     // now lp = log ( exp(C(x^t)) P(h|x^k)  P(x^t|h) )
01779                     real coeff = exp(lp);
01780                     Vec pvisible_given_h=pvisible_given_H(c);
01781                     for (int j=0;j<visible_layer->size;j++)
01782                     {
01783                         visible_bias[j] +=
01784                             klp0p1_learning_rate*coeff*(xt[j]-pvisible_given_h[j]);
01785                     }
01786                     for (int i=0;i<hidden_layer->size;i++)
01787                     {
01788                         hidden_bias[i] += klp0p1_learning_rate*coeff*(h[i]-ph_given_xk[i]);
01789                         for (int j=0;j<visible_layer->size;j++)
01790                         {
01791                             real grad = - coeff *
01792                                 (  xk[j] * (h[i]  - ph_given_xk[i])
01793                                  + h[i]  * (xt[j] - pvisible_given_h[j]));
01794 
01795 #if 0
01796                             if (compute_weights_grad) {
01797                                 weights_grad->resize(mbs,
01798                                         weights_grad->width());
01799                                 (*weights_grad)(0, i * visible_layer->size + j)
01800                                     += grad;
01801                             }
01802 #else
01803                             W(i,j) -= klp0p1_learning_rate * grad;
01804 #endif
01805                         }
01806                     }
01807                 }
01808             }
01809         }
01810         hidden_layer->setBatchSize(mbs);
01811         visible_layer->setBatchSize(mbs);
01812     }
01813 
01814     // Explicit error message in the case of the 'visible' port.
01815     if (compute_visible_grad && visible_grad->isEmpty())
01816         PLERROR("In KLp0p1RBMModule::bpropAccUpdate - The gradient with respect "
01817                 "to the 'visible' port was asked, but not computed");
01818 
01819     checkProp(ports_gradient);
01820 
01821     // Reset pointers to ensure we do not reuse them by mistake.
01822     hidden_act = NULL;
01823     weights = NULL;
01824     hidden_bias = NULL;
01825 }
01826 
01828 // forget //
01830 void KLp0p1RBMModule::forget()
01831 {
01832     DBG_MODULE_LOG << "Forgetting KLp0p1RBMModule '" << name << "'" << endl;
01833     PLASSERT( hidden_layer && visible_layer && connection );
01834     hidden_layer->forget();
01835     visible_layer->forget();
01836     connection->forget();
01837     if (reconstruction_connection)
01838         reconstruction_connection->forget();
01839 }
01840 
01842 // getPortIndex //
01844 int KLp0p1RBMModule::getPortIndex(const string& port)
01845 {
01846     map<string, int>::const_iterator it = portname_to_index.find(port);
01847     if (it == portname_to_index.end())
01848         return -1;
01849     else
01850         return it->second;
01851 }
01852 
01854 // getPorts //
01856 const TVec<string>& KLp0p1RBMModule::getPorts()
01857 {
01858     return ports;
01859 }
01860 
01862 // getPortsSizes //
01864 const TMat<int>& KLp0p1RBMModule::getPortSizes()
01865 {
01866     return port_sizes;
01867 }
01868 
01870 // bpropDoesNothing //
01872 /* THIS METHOD IS OPTIONAL
01873 bool KLp0p1RBMModule::bpropDoesNothing()
01874 {
01875 }
01876 */
01877 
01879 // setAllLearningRates //
01881 void KLp0p1RBMModule::setAllLearningRates(real lr)
01882 {
01883     hidden_layer->setLearningRate(lr);
01884     visible_layer->setLearningRate(lr);
01885     connection->setLearningRate(lr);
01886     if(reconstruction_connection)
01887         reconstruction_connection->setLearningRate(lr);
01888 }
01889 
01891 // sampleHiddenGivenVisible //
01893 void KLp0p1RBMModule::sampleHiddenGivenVisible(const Mat& visible)
01894 {
01895     computeHiddenActivations(visible);
01896     hidden_layer->computeExpectations();
01897     hidden_layer->generateSamples();
01898 }
01899 
01901 // sampleVisibleGivenHidden //
01903 void KLp0p1RBMModule::sampleVisibleGivenHidden(const Mat& hidden)
01904 {
01905     computeVisibleActivations(hidden);
01906     visible_layer->computeExpectations();
01907     visible_layer->generateSamples();
01908 }
01909 
01911 // setLearningRate //
01913 void KLp0p1RBMModule::setLearningRate(real dynamic_learning_rate)
01914 {
01915     // Out of safety, force the user to go through the two different learning
01916     // rate. May need to be removed if it causes unwanted crashes.
01917     PLERROR("In KLp0p1RBMModule::setLearningRate - Do not use this method, instead "
01918             "explicitely use 'cd_learning_rate' and 'grad_learning_rate'");
01919 }
01920 
01921 } // end of namespace PLearn
01922 
01923 
01924 /*
01925   Local Variables:
01926   mode:c++
01927   c-basic-offset:4
01928   c-file-style:"stroustrup"
01929   c-file-offsets:((innamespace . 0)(inline-open . 0))
01930   indent-tabs-mode:nil
01931   fill-column:79
01932   End:
01933 */
01934 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines