PLearn 0.1
FNetLayerVariable.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // PLearn (A C++ Machine Learning Library)
00004 // Copyright (C) 1998 Pascal Vincent
00005 // Copyright (C) 2005 Yoshua Bengio
00006 
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 // 
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 // 
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 // 
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 // 
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 // 
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 
00036 /* *******************************************************      
00037  * $Id: FNetLayerVariable.cc 5370 2006-04-12 15:27:55Z tihocan $
00038  * This file is part of the PLearn library.
00039  ******************************************************* */
00040 
00041 //#include "ProductTransposeVariable.h"
00042 //#include "ProductVariable.h"
00043 //#include "TransposeProductVariable.h"
00044 #include "FNetLayerVariable.h"
00045 #include <plearn/math/random.h>
00046 #include <plearn/math/TMat_maths.h>
00047 #include <plearn/math/TMat_maths_specialisation.h>
00048 
00049 namespace PLearn {
00050 using namespace std;
00051 
00052 
00055 // Single layer of a neural network, with acceleration tricks
00056 
00057 PLEARN_IMPLEMENT_OBJECT(FNetLayerVariable,
00058                         "Single layer of a neural network, with acceleration tricks",
00059                         "This variable takes four inputs:\n"
00060                         "(1) the input of the layer (minibatch_size x n_inputs) matrix, and\n"
00061                         "(2) the weights matrix (n_hidden x n_inputs)\n"
00062                         "(3) the bias vector (n_hidden) b\n"
00063                         "(4) a 2-element parameter vector c = (c1,c2) which is used when inhibition is active.\n"
00064                         "For each row vector x[k] of the input matrix, it computes the following\n"
00065                         "output row vector y[k] in the fprop function:\n"
00066                         "  y[k,i] = sigmoid(a[k,i])\n"
00067                         "where\n"
00068                         "  a[k,i] = dot(W[i],u[k,i]) + b[i] - 1_{inhibit_next_units} c1*sigmoid(c2* avg_{j<i} y[k,j])\n"
00069                         "where u[k,i]= col. vector from optionally normalizing the x[k] vector:\n"
00070                         "  u[k,i] = (x[k] - mu[i])*invs[i]\n"
00071                         "and the free parameters and the W's, the b's, and c1 and c2.\n"
00072                         "The negative sum over j<i is optional and should help the units of the layer\n"
00073                         "to differentiate, since when one is active (y[k,j] close to 1), it inhibits\n"
00074                         "the units that follow it (y[k,i], with i>j). The normalization parameters\n"
00075                         "mu[i] and invs[i] are estimated by an exponential moving average of the\n"
00076                         "inputs x[k] for which |dC/da[k,i]| was above a threshold, described below.\n"
00077                         "The exponential moving average is with the option value\n"
00078                         "exp_moving_average_coefficient, also used to compute the threshold.\n"
00079                         "The moving averages are only updated during the bprop phase, i.e. only\n"
00080                         "during training.\n"
00081                         "\n"
00082                         "In the bprop phase, unlike with other Variable classes, this class can\n"
00083                         "optionally compute a pseudo-gradient which is not the actual gradient.\n"
00084                         "The pseudo-gradient is obtained by zeroing the gradient on some of the\n"
00085                         "a[k,i] terms before continuing the gradient propagation to the input and\n"
00086                         "weight matrices. The gradient on a[k,i] is zeroed if its absolute value is\n"
00087                         "below a threshold gradient_threshold, that is adapted to represent\n"
00088                         "approximately the fraction average_error_fraction_to_threshold\n"
00089                         "of the exponential moving average of the |dC/da[k,i]| over k,i, and\n"
00090                         "past examples, with the exponential moving average being done with\n"
00091                         "the value of the exp_moving_average_coefficient option.\n"
00092     );
00093 
00094 FNetLayerVariable::FNetLayerVariable()
00095     : c1_(0),
00096       c2_(0),
00097       n_inputs(-1), // MUST BE SPECIFIED BY THE USER
00098       n_hidden(-1), // MUST BE SPECIFIED BY THE USER
00099       minibatch_size(1),
00100       inhibit_next_units(true),
00101       inhibit_by_sum(false),
00102       squashed_inhibition(true),
00103       normalize_inputs(true),
00104       backprop_to_inputs(false),
00105       exp_moving_average_coefficient(0.001),
00106       average_error_fraction_to_threshold(0.5),
00107       min_stddev(1e-2)
00108 {
00109     avg_act_gradient = -1;
00110 }
00111 
00112 FNetLayerVariable::FNetLayerVariable(Var inputs,  // x
00113                                      Var weights,  // W
00114                                      Var biases, // b
00115                                      Var inhibition_weights, // c
00116                                      bool _inhibit_next_units,
00117                                      bool _normalize_inputs,
00118                                      bool _backprop_to_inputs,
00119                                      real _exp_moving_average_coefficient,
00120                                      real _average_error_fraction_to_threshold)
00121     : inherited(inputs & weights &
00122                 biases & inhibition_weights,
00123                 inputs->length(), weights->length()),
00124       c1_(0),
00125       c2_(0),
00126       n_inputs(inputs->matValue.width()),
00127       n_hidden(weights->matValue.length()),
00128       minibatch_size(inputs->matValue.length()),
00129       inhibit_next_units(_inhibit_next_units),
00130       inhibit_by_sum(false),
00131       squashed_inhibition(true),
00132       normalize_inputs(_normalize_inputs),
00133       backprop_to_inputs(_backprop_to_inputs),
00134       exp_moving_average_coefficient(_exp_moving_average_coefficient),
00135       average_error_fraction_to_threshold(_average_error_fraction_to_threshold),
00136       min_stddev(1e-2)
00137 {
00138     avg_act_gradient = -1;
00139     build_();
00140 }
00141 
00142 void
00143 FNetLayerVariable::build()
00144 {
00145     inherited::build();
00146     build_();
00147 }
00148 
00149 void
00150 FNetLayerVariable::build_()
00151 {
00152     if (varray.length() == 0 && n_inputs == -1)
00153         // Cannot do anything yet.
00154         return;
00155     if (   varray.size() != 4
00156            || n_hidden      != varray[1].length()
00157            || n_inputs      != varray[1].width()  )
00158     {
00159         varray.resize(4);
00160         if (varray[0])
00161             n_inputs = varray[0]->width();    // Get n_inputs from first var if present.
00162         varray[1] = Var(n_hidden,n_inputs);
00163         varray[2] = Var(n_hidden);
00164         varray[3] = Var(2);
00165     }
00166     if (varray[0]) {
00167         if (n_inputs != varray[0]->width())
00168             PLERROR("In FNetLayerVariable: input var 0 should have width = %d = n_inputs, but is %d\n",n_inputs, varray[0]->width());
00169         if (n_hidden != varray[1]->length())
00170             PLERROR("In FNetLayerVariable: input var 1 should have length = %d = n_hidden, but is %d\n",n_hidden, varray[1]->length());
00171         if (minibatch_size != varray[0]->length())
00172             PLERROR("In FNetLayerVariable: input var 0 should have length = %d = minibatch_size, but is %d\n",minibatch_size, varray[0]->length());
00173         if (n_inputs != varray[1]->width())
00174             PLERROR("In FNetLayerVariable: the size of inputs and weights are not compatible for an affine application of weights on inputs");
00175         if (varray[2]->size() != n_hidden)
00176             PLERROR("In FNetLayerVariable: the biases vector should have the same length as the weights matrix number of rows.");
00177         if (normalize_inputs && (mu.length() != n_hidden || mu.width() != n_inputs)) {
00178             mu.resize(n_hidden, n_inputs);
00179             mu.clear();
00180             invs.resize(n_hidden, n_inputs);
00181             invs.fill(1.0);
00182             mu2.resize(n_hidden, n_inputs);
00183             mu2.fill(0);
00184         } else
00185             // TODO Remove later, this is just a safety check.
00186             PLWARNING("In FNetLayerVariable::build_ - Using previously saved normalization parameters");
00187         inh.resize(minibatch_size, n_hidden);
00188         cum_inh.resize(minibatch_size, n_hidden);
00189         u.resize(minibatch_size);
00190         if (normalize_inputs)
00191             for (int i=0;i<minibatch_size;i++)
00192                 u[i].resize(n_hidden,n_inputs);
00193         no_bprop_has_been_done = true;
00194         gradient_threshold = 0;
00195         if (avg_act_gradient < 0)
00196             avg_act_gradient = 0.0;
00197         // Initialize parameters.
00198         real delta = real(1.0 / n_inputs);
00199         fill_random_uniform(varray[1]->matValue, -delta, delta);
00200         varray[2]->matValue.fill(0.0);
00201         varray[3]->matValue.fill(1.0);
00202         if (!fast_exact_is_equal(c1_, 0))
00203             varray[3]->value[0] = c1_;
00204         if (!fast_exact_is_equal(c2_, 0))
00205             varray[3]->value[1] = c2_;
00206         // Set correct sizes.
00207         resize(minibatch_size, n_hidden);
00208     }
00209 }
00210 
00212 // declareOptions //
00214 void FNetLayerVariable::declareOptions(OptionList& ol)
00215 {
00216     declareOption(ol, "n_inputs", &FNetLayerVariable::n_inputs, OptionBase::buildoption, 
00217                   "    Number of inputs of the layer, for each element of the mini-batch.\n");
00218 
00219     declareOption(ol, "n_hidden", &FNetLayerVariable::n_hidden, OptionBase::buildoption, 
00220                   "    Number of outputs of the layer (hidden units), for each element of the mini-batch.\n");
00221 
00222     declareOption(ol, "minibatch_size", &FNetLayerVariable::minibatch_size, OptionBase::buildoption, 
00223                   "    Number of elements of each mini-batch.\n");
00224 
00225     declareOption(ol, "inhibit_next_units", &FNetLayerVariable::inhibit_next_units, OptionBase::buildoption, 
00226                   "    If true then activation of unit i contains minus the sum of the outputs of\n"
00227                   "    all units j for j<i, i.e. y[k,i] = sigmoid(W (u[k,i] 1) - 1_{inhibit_next_units} sum_{j<i} y[k,j]).\n");
00228 
00229     declareOption(ol, "inhibit_by_sum", &FNetLayerVariable::inhibit_by_sum, OptionBase::buildoption, 
00230                   "    If true, then the inhibition will be based on the sum of the previous units'\n"
00231                   "    activations, instead of their average.");
00232 
00233     declareOption(ol, "squashed_inhibition", &FNetLayerVariable::squashed_inhibition, OptionBase::buildoption, 
00234                   "    If true, then the inhibition will be squashed by a sigmoid (if false, c2 is not used).");
00235       
00236     declareOption(ol, "normalize_inputs", &FNetLayerVariable::normalize_inputs, OptionBase::buildoption, 
00237                   "    If true, then normalized input u[k,i]=(x[k] - mu[i])*invs[i], otherwise u[k,i]=x[k].\n"
00238                   "    mu[i,j] is a moving average of the x[k,j]'s when |dC/da[k,i]| is above gradient_threshold.\n"
00239                   "    Similarly, mu2[i,j] is a moving average of x[k,j]*x[k,j] when |dC/da[k,i]| is above gradient_threshold\n"
00240                   "    and invs[i,j] = 1/sqrt(mu2[i,j] - mu[i,j]*mu[i,j]). The moving averages are exponential moving\n"
00241                   "    averages with coefficient exp_moving_average_coefficient.\n");
00242 
00243     declareOption(ol, "min_stddev", &FNetLayerVariable::min_stddev, OptionBase::buildoption, 
00244                   "Used only when 'normalize_inputs' is true, any input whose standard deviation is less than this value\n"
00245                   "will be considered as having this standard deviation (prevents numerical problems with constant inputs).");
00246 
00247     declareOption(ol, "backprop_to_inputs", &FNetLayerVariable::backprop_to_inputs, OptionBase::buildoption, 
00248                   "    If true then gradient is propagated to the inputs. When this object is the first layer\n"
00249                   "    of a neural network, it is more efficient to set this option to false (which is its default).\n");
00250 
00251     declareOption(ol, "exp_moving_average_coefficient", &FNetLayerVariable::exp_moving_average_coefficient, OptionBase::buildoption, 
00252                   "    The moving average coefficient used in updating mu, var and gradient_threshold, with\n"
00253                   "    updates of the form\n"
00254                   "       newvalue = (1 - exp_moving_average_coefficient)*oldvalue + exp_moving_average_coefficient*summand\n"
00255                   "    in order to obtain a moving average of the summands.\n");
00256 
00257     declareOption(ol, "average_error_fraction_to_threshold", &FNetLayerVariable::average_error_fraction_to_threshold, 
00258                   OptionBase::buildoption, 
00259                   "    The fraction of the average of |dC/da[k,i]| that determines the gradient_threshold.\n");
00260 
00261     declareOption(ol, "c1", &FNetLayerVariable::c1_, OptionBase::buildoption, 
00262                   "    Fixed coefficient c1. '0' means it will be optimized, starting from 1.\n");
00263 
00264     declareOption(ol, "c2", &FNetLayerVariable::c2_, OptionBase::buildoption, 
00265                   "    Fixed coefficient c2. '0' means it will be optimized, starting from 1.\n");
00266 
00267     // Learnt options.
00268 
00269     declareOption(ol, "avg_act_gradient", &FNetLayerVariable::avg_act_gradient, OptionBase::learntoption, 
00270                   "The exponential moving average of the absolute value of the gradient.");
00271 
00272     declareOption(ol, "mu", &FNetLayerVariable::mu, OptionBase::learntoption, 
00273                   "The centers for normalization.");
00274 
00275     declareOption(ol, "mu2", &FNetLayerVariable::mu, OptionBase::learntoption, 
00276                   "The squared centers for computation of the variance.");
00277 
00278     declareOption(ol, "mu2", &FNetLayerVariable::mu, OptionBase::learntoption, 
00279                   "The normalization factors.");
00280 
00281     inherited::declareOptions(ol);
00282 }
00283 
00284 
00285 void FNetLayerVariable::recomputeSize(int& l, int& w) const
00286 {
00287     if (varray.length() >= 2 && varray[0] && varray[1]) {
00288         l = varray[0]->length();
00289         w = varray[1]->length();
00290     } else
00291         l = w = 0;
00292 }
00293 
00294 void FNetLayerVariable::fprop()
00295 {
00296     real* x = varray[0]->valuedata;
00297     real* y = valuedata;
00298     real* b = varray[2]->valuedata;
00299     real c1 = varray[3]->valuedata[0];
00300     real c2 = varray[3]->valuedata[1];
00301     int mx=varray[0]->matValue.mod();
00302     int my=matValue.mod();
00303     for (int k=0;k<minibatch_size;k++, x+=mx, y+=my)
00304     {
00305         real cum_s = 0;
00306         Mat u_k = u[k];
00307         real* inh_k = inh[k];
00308         real* cum_inh_k = cum_inh[k];
00309         for (int i=0;i<n_hidden;i++)
00310         {
00311             real* Wi = varray[1]->matValue[i];
00312             real bi = b[i];
00313             if (inhibit_next_units && i>0)
00314             {
00315                 if (inhibit_by_sum)
00316                     cum_inh_k[i] = cum_s;
00317                 else
00318                     cum_inh_k[i] = cum_s / real(i);
00319                 if (squashed_inhibition)
00320                     inh_k[i] = sigmoid(c2 * cum_inh_k[i]);
00321                 else
00322                     inh_k[i] = cum_inh_k[i];
00323                 bi -= c1*inh_k[i];
00324             }
00325             if (normalize_inputs)
00326             {
00327                 real* mu_i = mu[i];
00328                 real* invs_i = invs[i];
00329                 real* u_ki = u_k[i];
00330                 for (int j=0;j<n_inputs;j++)
00331                     u_ki[j] = (x[j] - mu_i[j])*invs_i[j];
00332                 y[i] = sigmoid(dot_product(bi,u_ki,Wi,n_inputs));
00333             }
00334             else
00335                 y[i] = sigmoid(dot_product(bi,x,Wi,n_inputs));
00336             cum_s += y[i];
00337         }
00338     }
00339 }
00340 
00341 
00342 void FNetLayerVariable::bprop()
00343 {
00344     real* x = varray[0]->valuedata;
00345     real* dx = varray[0]->gradientdata;
00346     real* y = valuedata;
00347     real* dy = gradientdata;
00348     real c1 = varray[3]->valuedata[0];
00349     real c2 = varray[3]->valuedata[1];
00350     real* db = varray[2]->gradientdata;
00351     real& dc1 = varray[3]->gradientdata[0];
00352     real& dc2 = varray[3]->gradientdata[1];
00353     int mx=varray[0]->matValue.mod();
00354     int mdx = varray[0]->matGradient.mod();
00355     int my=matValue.mod();
00356     int mdy = matGradient.mod();
00357     for (int k=0;k<minibatch_size;k++, x+=mx, y+=my, dx+=mdx, dy+=mdy)
00358     {
00359         Mat u_k = u[k];
00360         real* inh_k = inh[k];
00361         real* cum_inh_k = cum_inh[k];
00362         real dcum_s = 0;
00363         Vec xk = varray[0]->matValue(k);
00364         Vec dxk = varray[0]->matGradient(k);
00365         for (int i=n_hidden-1;i>=0;i--)
00366         {
00367             real dai = (dy[i]+dcum_s)*y[i]*(1-y[i]);
00368             real erri = fabs(dai);
00369             avg_act_gradient = (1 - exp_moving_average_coefficient)*avg_act_gradient +
00370                 exp_moving_average_coefficient * erri;
00371             if (erri > gradient_threshold)
00372             {
00373                 real* dWi = varray[1]->matGradient[i];
00374                 if (normalize_inputs)
00375                 {
00376                     real* u_ki = u_k[i];
00377                     for (int j=0;j<n_inputs;j++)
00378                         dWi[j] += dai * u_ki[j];
00379                     Vec mu_i = mu(i);
00380                     Vec mu2_i = mu2(i);
00381                     exponentialMovingAverageUpdate(mu_i, xk, exp_moving_average_coefficient);
00382                     exponentialMovingSquareUpdate(mu2_i, xk, exp_moving_average_coefficient);
00383                 } else
00384                     for (int j=0;j<n_inputs;j++)
00385                         dWi[j] += dai * x[j];
00386                 db[i] += dai;
00387                 if (inhibit_next_units && i>0)
00388                 {
00389                     real inh_ki = inh_k[i]; 
00390                     if (!fast_exact_is_equal(c1_, 0)) // c1 is optimized.
00391                         dc1 -= dai * inh_ki;
00392                     if (squashed_inhibition) {
00393                         real dinh_ki = - dai * c1 * inh_ki * (1 - inh_ki);
00394                         if (!fast_exact_is_equal(c2_, 0)) // c2 is optimized.
00395                             dc2 += dinh_ki * cum_inh_k[i];
00396                         if (inhibit_by_sum)
00397                             dcum_s += dinh_ki * c2;
00398                         else
00399                             dcum_s += dinh_ki * c2 / i;
00400                     } else {
00401                         real dinh_ki = - dai * c1;
00402                         if (inhibit_by_sum)
00403                             dcum_s += dinh_ki;
00404                         else
00405                             dcum_s += dinh_ki / i;
00406                     }
00407                 }
00408                 if (backprop_to_inputs)
00409                 {
00410                     Vec Wi = varray[1]->matValue(i);
00411                     multiplyAcc(dxk,Wi,dai);
00412                 }
00413             }
00414         }
00415     }
00416     if (normalize_inputs)
00417         // invs = 1/ sqrt(mu2 - mu*mu)
00418         computeInverseStandardDeviationFromMeanAndSquareMean(invs,mu,mu2, min_stddev, min_stddev);
00419     gradient_threshold = average_error_fraction_to_threshold * avg_act_gradient;
00420 }
00421 
00423 // makeDeepCopyFromShallowCopy //
00425 void FNetLayerVariable::makeDeepCopyFromShallowCopy(CopiesMap& copies) {
00426     inherited::makeDeepCopyFromShallowCopy(copies);
00427     deepCopyField(mu, copies);
00428     deepCopyField(invs, copies);
00429     deepCopyField(mu2, copies);
00430     deepCopyField(u, copies);
00431     deepCopyField(inh, copies);
00432     deepCopyField(cum_inh, copies);
00433 }
00434 
00435 } // end of namespace PLearn
00436 
00437 
00438 /*
00439   Local Variables:
00440   mode:c++
00441   c-basic-offset:4
00442   c-file-style:"stroustrup"
00443   c-file-offsets:((innamespace . 0)(inline-open . 0))
00444   indent-tabs-mode:nil
00445   fill-column:79
00446   End:
00447 */
00448 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines