PLearn 0.1
UndirectedSoftmaxModule.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // UndirectedSoftmaxModule.cc
00004 //
00005 // Copyright (C) 2005 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 /* *******************************************************
00036    * $Id$
00037    ******************************************************* */
00038 
00039 // Authors: Yoshua Bengio
00040 
00044 #include "UndirectedSoftmaxModule.h"
00045 
00046 namespace PLearn {
00047 using namespace std;
00048 
00049 PLEARN_IMPLEMENT_OBJECT(
00050     UndirectedSoftmaxModule,
00051     "Softmax output layer in an undirected multi-layer graphical model, using stochastic gradient to update neuron weights",
00052     "There is one output unit per class. The model estimates P(Y|X) where Y is the output class and X is the input\n"
00053     "of the module. The input X can be interpreted as the linear output of binary stochastic neurons H at a previous layer,\n"
00054     "i.e. these input neurons fire with probability sigmoid(X + weights'*T), where T_i = 1_{Y=i}.\n"
00055     "Output units fire with probability proportional to exp(biases + weights*H),where H is the vector of binary values of the\n"
00056     "hidden (or input) neurons whose activations are in X.\n"
00057     "The output probabilities are computed as follows:\n"
00058     "      P(Y=i|X) = exp(-biases[i] + sum(softplus(-(X + weights[i])))) / Z\n"
00059     "where Z normalizes over classes and softplus(a)=log(1+exp(a)).\n"
00060     "This formula can be derived by considering that X,H, and T are binary random variables\n"
00061     "following the Boltzmann distribution with energy\n"
00062     "  energy(H,T,X) = biases'T + T' weights H + H' X.\n"
00063     "During training, both X and T are observed, so that E is linear in H, i.e. P(H|X,T) is\n"
00064     "a product of P(H_i|X,T), i.e. the H_i are conditionally independent given X and T.\n"
00065     "This corresponds to an undirected graphical model with full connectivity between each H_i\n"
00066     "and each T_j (and similarly between H_i and the inputs of the previous layer, if there is one),\n"
00067     "but no connection among the H_i or among the T_j's. Because of this factorization we obtain that\n"
00068     "   P(Y|X) = sum_H exp(-energy(H,T,X)) / Z\n"
00069     "and\n"
00070     "   sum_H exp(-energy(H,T,X)) = exp(-biases'T) prod_i (exp(-energy_i(1,T,X)) + exp(-energy_i(0,T,X)))\n"
00071     "where energy_i(h,T,X) = the term in H_i=h in the energy = h(T' weights[:,i] + X_i).\n"
00072     "Since energy_i(0,T,X) = 0, we obtain that\n"
00073     "   sum_H exp(-energy(H,T,X)) = exp(-biases'T) exp(sum_i log(1+exp(-energy_i(1,T,X))))\n"
00074     "                             = exp(-biases'T + sum(softplus(-T'weights + X')))\n"
00075     "which gives the above formula for P(Y|X).\n"
00076     "\n"
00077     "Weights and biases are updated by online gradient with learning rate possibly decreasing\n"
00078     "in 1/(1 + n_updates_done_up_to_now * decrease_constant).\n"
00079     "An L1 and/or L2 regularization penalty can be added to push weights to 0.\n"
00080     "Weights can be initialized to 0, to a given initial matrix, or randomly\n"
00081     "from a uniform distribution. Biases can be initialized to 0 or from a user-provided vector.\n"
00082     );
00083 
00084 UndirectedSoftmaxModule::UndirectedSoftmaxModule():
00085     start_learning_rate( .001 ),
00086     decrease_constant( 0 ),
00087     init_weights_random_scale( 1. ),
00088     L1_penalty_factor( 0. ),
00089     L2_penalty_factor( 0. ),
00090     step_number( 0 )
00091     /* ### Initialize all fields to their default value */
00092 {
00093 }
00094 
00095 // Applies linear transformation
00096 void UndirectedSoftmaxModule::fprop(const Vec& input, Vec& output) const
00097 {
00098     int in_size = input.size();
00099 
00100     // size check
00101     if( in_size != input_size )
00102     {
00103         PLERROR("UndirectedSoftmaxModule::fprop: 'input.size()' should be equal\n"
00104                 " to 'input_size' (%i != %i)\n", in_size, input_size);
00105     }
00106 
00107 
00108 
00109 }
00110 
00111 void UndirectedSoftmaxModule::bpropUpdate(const Vec& input, const Vec& output,
00112                                       const Vec& output_gradient)
00113 {
00114     int in_size = input.size();
00115     int out_size = output.size();
00116     int og_size = output_gradient.size();
00117 
00118     // size check
00119     if( in_size != input_size )
00120     {
00121         PLERROR("UndirectedSoftmaxModule::bpropUpdate: 'input.size()' should be"
00122                 " equal\n"
00123                 " to 'input_size' (%i != %i)\n", in_size, input_size);
00124     }
00125     if( out_size != output_size )
00126     {
00127         PLERROR("UndirectedSoftmaxModule::bpropUpdate: 'output.size()' should be"
00128                 " equal\n"
00129                 " to 'output_size' (%i != %i)\n", out_size, output_size);
00130     }
00131     if( og_size != output_size )
00132     {
00133         PLERROR("UndirectedSoftmaxModule::bpropUpdate: 'output_gradient.size()'"
00134                 " should\n"
00135                 " be equal to 'output_size' (%i != %i)\n",
00136                 og_size, output_size);
00137     }
00138 
00139     learning_rate = start_learning_rate / ( 1+decrease_constant*step_number);
00140 
00141     if (L2_penalty_factor==0)
00142     {
00143     }
00144     else
00145     {
00146     }
00147 
00148     if (L1_penalty_factor!=0)
00149     {
00150         real delta = learning_rate * L1_penalty_factor;
00151         for (int i=0;i<output_size;i++)
00152         {
00153             real* Wi = weights[i]; // don't apply penalty on bias
00154             for (int j=0;j<input_size;j++)
00155             {
00156                 real Wij =  Wi[j];
00157                 if (Wij>delta)
00158                     Wi[j] -=delta;
00159                 else if (Wij<-delta)
00160                     Wi[j] +=delta;
00161                 else
00162                     Wi[j]=0;
00163             }
00164         }
00165     }
00166     if (L2_penalty_factor!=0)
00167     {
00168         real delta = learning_rate*L2_penalty_factor;
00169         if (delta>1)
00170             PLWARNING("UndirectedSoftmaxModule::bpropUpdate: learning rate = %f is too large!",learning_rate);
00171         weights *= 1 - delta;
00172     }
00173 
00174     step_number++;
00175 
00176 }
00177 
00178 
00179 // Simply updates and propagates back gradient
00180 void UndirectedSoftmaxModule::bpropUpdate(const Vec& input, const Vec& output,
00181                                       Vec& input_gradient,
00182                                       const Vec& output_gradient)
00183 {
00184     // compute input_gradient from initial weights
00185     input_gradient = transposeProduct( weights, output_gradient
00186                                      ).subVec( 1, input_size );
00187 
00188     // do the update (and size check)
00189     bpropUpdate( input, output, output_gradient);
00190 
00191 
00192 }
00193 
00194 // Update
00195 void UndirectedSoftmaxModule::bbpropUpdate(const Vec& input, const Vec& output,
00196                                        const Vec& output_gradient,
00197                                        const Vec& output_diag_hessian)
00198 {
00199     PLWARNING("UndirectedSoftmaxModule::bbpropUpdate: You're providing\n"
00200               "'output_diag_hessian', but it will not be used.\n");
00201 
00202     int odh_size = output_diag_hessian.size();
00203     if( odh_size != output_size )
00204     {
00205         PLERROR("UndirectedSoftmaxModule::bbpropUpdate:"
00206                 " 'output_diag_hessian.size()'\n"
00207                 " should be equal to 'output_size' (%i != %i)\n",
00208                 odh_size, output_size);
00209     }
00210 
00211     bpropUpdate( input, output, output_gradient );
00212 
00213 }
00214 
00215 // Propagates back output_gradient and output_diag_hessian
00216 void UndirectedSoftmaxModule::bbpropUpdate(const Vec& input, const Vec& output,
00217                               Vec&  input_gradient,
00218                               const Vec& output_gradient,
00219                               Vec&  input_diag_hessian,
00220                               const Vec& output_diag_hessian)
00221 {
00222     bpropUpdate( input, output, input_gradient, output_gradient );
00223 }
00224 
00225 
00226 // Nothing to forget
00227 void UndirectedSoftmaxModule::forget()
00228 {
00229     resetWeights();
00230 
00231     if( init_weights.size() !=0 )
00232         weights << init_weights;
00233     else if (init_weights_random_scale!=0)
00234     {
00235         real r = init_weights_random_scale / input_size;
00236         random_gen->fill_random_uniform(weights,-r,r);
00237     }
00238     if( init_biases.size() !=0 )
00239         biases << init_biases;
00240     else
00241         biases.clear();
00242 
00243     learning_rate = start_learning_rate;
00244     step_number = 0;
00245 }
00246 
00247 
00248 
00249 void UndirectedSoftmaxModule::resetWeights()
00250 {
00251     weights.resize( output_size, input_size );
00252     biases.resize(output_size);
00253     weights.fill( 0 );
00254 }
00255 
00256 
00257 // ### Nothing to add here, simply calls build_
00258 void UndirectedSoftmaxModule::build()
00259 {
00260     inherited::build();
00261     build_();
00262 }
00263 
00264 void UndirectedSoftmaxModule::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00265 {
00266     inherited::makeDeepCopyFromShallowCopy(copies);
00267 
00268     deepCopyField(init_weights, copies);
00269     deepCopyField(init_biases, copies);
00270     deepCopyField(weights, copies);
00271     deepCopyField(biases, copies);
00272 }
00273 
00274 void UndirectedSoftmaxModule::declareOptions(OptionList& ol)
00275 {
00276     declareOption(ol, "start_learning_rate",
00277                   &UndirectedSoftmaxModule::start_learning_rate,
00278                   OptionBase::buildoption,
00279                   "Learning-rate of stochastic gradient optimization");
00280 
00281     declareOption(ol, "decrease_constant",
00282                   &UndirectedSoftmaxModule::decrease_constant,
00283                   OptionBase::buildoption,
00284                   "Decrease constant of stochastic gradient optimization");
00285 
00286     declareOption(ol, "init_weights", &UndirectedSoftmaxModule::init_weights,
00287                   OptionBase::buildoption,
00288                   "Optional initial weights of the neurons (one row per output).\n"
00289                   "If not provided then weights are initialized according\n"
00290                   "to a uniform distribution (see init_weights_random_scale)\n"
00291                   "and biases are initialized to 0.\n");
00292 
00293     declareOption(ol, "init_biases", &UndirectedSoftmaxModule::init_biases,
00294                   OptionBase::buildoption,
00295                   "Optional initial biases (one per output neuron). If not provided\n"
00296                   "then biases are initialized to 0.\n");
00297 
00298     declareOption(ol, "init_weights_random_scale", &UndirectedSoftmaxModule::init_weights_random_scale,
00299                   OptionBase::buildoption,
00300                   "If init_weights is not provided, the weights are initialized randomly by\n"
00301                   "from a uniform in [-r,r], with r = init_weights_random_scale/input_size.\n"
00302                   "To clear the weights initially, just set this option to 0.");
00303 
00304     declareOption(ol, "L1_penalty_factor", &UndirectedSoftmaxModule::L1_penalty_factor,
00305                   OptionBase::buildoption,
00306                   "Optional (default=0) factor of L1 regularization term, i.e.\n"
00307                   "minimize L1_penalty_factor * sum_{ij} |weights(i,j)| during training.\n");
00308 
00309     declareOption(ol, "L2_penalty_factor", &UndirectedSoftmaxModule::L2_penalty_factor,
00310                   OptionBase::buildoption,
00311                   "Optional (default=0) factor of L2 regularization term, i.e.\n"
00312                   "minimize 0.5 * L2_penalty_factor * sum_{ij} weights(i,j)^2 during training.");
00313 
00314 
00315     declareOption(ol, "weights", &UndirectedSoftmaxModule::weights,
00316                   OptionBase::learntoption,
00317                   "Input weights of the output neurons (one row per output neuron)." );
00318 
00319     declareOption(ol, "biases", &UndirectedSoftmaxModule::biases,
00320                   OptionBase::learntoption,
00321                   "Biases of the output neurons.");
00322 
00323     inherited::declareOptions(ol);
00324 }
00325 
00326 void UndirectedSoftmaxModule::build_()
00327 {
00328     if( input_size < 0 ) // has not been initialized
00329     {
00330         PLERROR("UndirectedSoftmaxModule::build_: 'input_size' < 0 (%i).\n"
00331                 "You should set it to a positive integer.\n", input_size);
00332     }
00333     else if( output_size < 0 ) // default to 1 neuron
00334     {
00335         PLWARNING("UndirectedSoftmaxModule::build_: 'output_size' is < 0 (%i),\n"
00336                   " you should set it to a positive integer (the number of"
00337                   " neurons).\n"
00338                   " Defaulting to 1.\n", output_size);
00339         output_size = 1;
00340     }
00341 
00342     if( weights.size() == 0 )
00343     {
00344         resetWeights();
00345     }
00346 
00347     if (init_weights.size()==0 && init_weights_random_scale!=0 && !random_gen)
00348         random_gen = new PRandom();
00349 }
00350 
00351 
00352 
00353 
00354 } // end of namespace PLearn
00355 
00356 
00357 /*
00358   Local Variables:
00359   mode:c++
00360   c-basic-offset:4
00361   c-file-style:"stroustrup"
00362   c-file-offsets:((innamespace . 0)(inline-open . 0))
00363   indent-tabs-mode:nil
00364   fill-column:79
00365   End:
00366 */
00367 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines