PLearn 0.1
RBMMatrixConnectionNatGrad.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMMatrixConnectionNatGrad.cc
00004 //
00005 // Copyright (C) 2006 Yoshua Bengio
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Yoshua Bengio
00036 
00041 #include "RBMMatrixConnectionNatGrad.h"
00042 #include <plearn/math/TMat_maths.h>
00043 
00044 namespace PLearn {
00045 using namespace std;
00046 
00047 PLEARN_IMPLEMENT_OBJECT(
00048     RBMMatrixConnectionNatGrad,
00049     "Subclass of RBMMatrixConnection which uses a block-diagonal natural gradient.\n",
00050     "The natural gradient algorithm used to adjust the update direction is the\n"
00051     "one implemented in NatGradEstimator, a template of which is provided by the\n"
00052     "user. One such estimator is adapted separately for each neuron (for the input\n"
00053     "weights of each neuron), i.e. for each row of the weights matrix.\n");
00054 
00055 RBMMatrixConnectionNatGrad::RBMMatrixConnectionNatGrad( real the_learning_rate ) :
00056     inherited(the_learning_rate)
00057 {
00058 }
00059 
00060 void RBMMatrixConnectionNatGrad::declareOptions(OptionList& ol)
00061 {
00062     declareOption(ol, "natgrad_template", &RBMMatrixConnectionNatGrad::natgrad_template,
00063                   OptionBase::learntoption,
00064                   "An object of type NatGradEstimator which will be copied for each row of the\n"
00065                   "weights matrix; each will compute the adjustment to the update direction\n"
00066                   "based on the natural gradient.\n");
00067 
00068 
00069     // Now call the parent class' declareOptions
00070     inherited::declareOptions(ol);
00071 }
00072 
00073 void RBMMatrixConnectionNatGrad::build_()
00074 {
00075     cd_natgrad.resize(up_size);
00076     bp_natgrad.resize(up_size);
00077     for (int i=0;i<up_size;i++)
00078     {
00079         cd_natgrad[i] = PLearn::deepCopy(natgrad_template);
00080         bp_natgrad[i] = PLearn::deepCopy(natgrad_template);
00081     }
00082     weights_gradient.resize(up_size,down_size);
00083     natural_gradient.resize(down_size);
00084 }
00085 
00086 void RBMMatrixConnectionNatGrad::build()
00087 {
00088     inherited::build();
00089     build_();
00090 }
00091 
00092 
00093 void RBMMatrixConnectionNatGrad::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00094 {
00095     inherited::makeDeepCopyFromShallowCopy(copies);
00096 
00097     deepCopyField(natgrad_template, copies);
00098     deepCopyField(cd_natgrad, copies);
00099     deepCopyField(bp_natgrad, copies);
00100     deepCopyField(weights_gradient, copies);
00101     deepCopyField(natural_gradient, copies);
00102 }
00103 
00104 void RBMMatrixConnectionNatGrad::update( const Mat& pos_down_values, // v_0
00105                                          const Mat& pos_up_values,   // h_0
00106                                          const Mat& neg_down_values, // v_1
00107                                          const Mat& neg_up_values )  // h_1
00108 {
00109     // weights -= learning_rate * ( h_0 v_0' - h_1 v_1' );
00110     // or:
00111     // weights[i][j] += learning_rate * (h_1[i] v_1[j] - h_0[i] v_0[j]);
00112 
00113     PLASSERT( pos_up_values.width() == weights.length() );
00114     PLASSERT( neg_up_values.width() == weights.length() );
00115     PLASSERT( pos_down_values.width() == weights.width() );
00116     PLASSERT( neg_down_values.width() == weights.width() );
00117     if( momentum == real(0.) )
00118     {
00119         // We use the average gradient over a mini-batch.
00120         real mbnorm = 1. / pos_down_values.length();
00121         productScaleAcc(weights_gradient, pos_up_values, true, pos_down_values, false,
00122                         mbnorm, 0.);
00123         productScaleAcc(weights_gradient, neg_up_values, true, neg_down_values, false,
00124                         -mbnorm, 1.);
00125 
00126         for (int i=0;i<up_size;i++)
00127         {
00128             (*cd_natgrad[i])(pos_count,weights_gradient(i),natural_gradient);
00129             multiplyAcc(weights(i),natural_gradient,-learning_rate);
00130         }
00131         pos_count++;
00132     }
00133     else
00134         PLERROR("RBMMatrixConnectionNatGrad::update with momentum - Not implemented");
00135 }
00136 
00137 
00138 void RBMMatrixConnectionNatGrad::bpropUpdate(const Mat& inputs,
00139                                              const Mat& outputs,
00140                                              Mat& input_gradients,
00141                                              const Mat& output_gradients,
00142                                              bool accumulate)
00143 {
00144     PLASSERT( inputs.width() == down_size );
00145     PLASSERT( outputs.width() == up_size );
00146     PLASSERT( output_gradients.width() == up_size );
00147 
00148     if( accumulate )
00149     {
00150         PLASSERT_MSG( input_gradients.width() == down_size &&
00151                       input_gradients.length() == inputs.length(),
00152                       "Cannot resize input_gradients and accumulate into it" );
00153 
00154         // input_gradients += output_gradient * weights
00155         productAcc(input_gradients, output_gradients, weights);
00156     }
00157     else
00158     {
00159         input_gradients.resize(inputs.length(), down_size);
00160         // input_gradients = output_gradient * weights
00161         product(input_gradients, output_gradients, weights);
00162     }
00163 
00164     // weights_gradient = 1/n * output_gradients' * inputs
00165     productScaleAcc(weights_gradient, output_gradients, true, inputs, false,
00166                     1. / inputs.length(), 0.);
00167     for (int i=0;i<up_size;i++)
00168     {
00169         (*bp_natgrad[i])(pos_count,weights_gradient(i),natural_gradient);
00170         multiplyAcc(weights(i),natural_gradient,-learning_rate);
00171     }
00172     pos_count++;
00173 }
00174 
00175 
00176 
00177 } // end of namespace PLearn
00178 
00179 
00180 /*
00181   Local Variables:
00182   mode:c++
00183   c-basic-offset:4
00184   c-file-style:"stroustrup"
00185   c-file-offsets:((innamespace . 0)(inline-open . 0))
00186   indent-tabs-mode:nil
00187   fill-column:79
00188   End:
00189 */
00190 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines