PLearn 0.1
RBMClassificationModule.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMClassificationModule.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin
00036 
00040 #define PL_LOG_MODULE_NAME "RBMClassificationModule"
00041 
00042 #include "RBMClassificationModule.h"
00043 #include <plearn/io/pl_log.h>
00044 #include <plearn/math/TMat_maths.h>
00045 
00046 namespace PLearn {
00047 using namespace std;
00048 
00049 PLEARN_IMPLEMENT_OBJECT(
00050     RBMClassificationModule,
00051     "Computes the undirected softmax used in deep belief nets",
00052     "This module contains, from bottom to top:\n"
00053     "  - an RBMConnection - previous_to_last,\n"
00054     "  - an RBMBinomialLayer - last_layer,\n"
00055     "  - an RBMMatrixConnection (transposed) - last_to_target,\n"
00056     "  - and an RBMMultinomialLayer - target_layer.\n"
00057     "The two RBMConnections are combined in joint_connection.\n");
00058 
00059 RBMClassificationModule::RBMClassificationModule()
00060 {
00061 }
00062 
00063 void RBMClassificationModule::declareOptions(OptionList& ol)
00064 {
00065     declareOption(ol, "previous_to_last",
00066                   &RBMClassificationModule::previous_to_last,
00067                   OptionBase::buildoption,
00068                   "Connection between the previous layer, and last_layer");
00069 
00070     declareOption(ol, "last_layer", &RBMClassificationModule::last_layer,
00071                   OptionBase::buildoption,
00072                   "Top-level layer (the one in the middle if we unfold)");
00073 
00074     declareOption(ol, "last_to_target",
00075                   &RBMClassificationModule::last_to_target,
00076                   OptionBase::buildoption,
00077                   "Connection between last_layer and target_layer");
00078 
00079     declareOption(ol, "target_layer", &RBMClassificationModule::target_layer,
00080                   OptionBase::buildoption,
00081                   "Layer containing the one-hot vector containing the target\n"
00082                   "(or its prediction).\n");
00083 
00084     declareOption(ol, "joint_connection",
00085                   &RBMClassificationModule::joint_connection,
00086                   OptionBase::learntoption,
00087                   "Connection grouping previous_to_last and last_to_target");
00088 
00089     declareOption(ol, "last_size", &RBMClassificationModule::last_size,
00090                   OptionBase::learntoption,
00091                   "Size of last_layer");
00092     /*
00093     declareOption(ol, "", &RBMClassificationModule::,
00094                   OptionBase::buildoption,
00095                   "");
00096      */
00097 
00098     // Now call the parent class' declareOptions
00099     inherited::declareOptions(ol);
00100 }
00101 
00102 void RBMClassificationModule::build_()
00103 {
00104     MODULE_LOG << "build_() called" << endl;
00105 
00106     if( !previous_to_last || !last_layer || !last_to_target || !target_layer )
00107     {
00108         MODULE_LOG << "build_() aborted because layers and connections were"
00109            " not set" << endl;
00110         return;
00111     }
00113     input_size = previous_to_last->down_size;
00114     last_size = last_layer->size;
00115     output_size = target_layer->size;
00116 
00117     PLASSERT( previous_to_last->up_size == last_size );
00118     PLASSERT( last_to_target->up_size == last_size );
00119     PLASSERT( last_to_target->down_size == output_size );
00120 
00121     d_last_act.resize( last_size );
00122     d_target_act.resize( output_size );
00123 
00125     if( !joint_connection )
00126         joint_connection = new RBMMixedConnection();
00127 
00128     joint_connection->sub_connections.resize(1,2);
00129     joint_connection->sub_connections(0,0) = previous_to_last;
00130     joint_connection->sub_connections(0,1) = last_to_target;
00131     joint_connection->build();
00132     // If we have a random_gen, share it with the ones who do not
00133     if( random_gen )
00134     {
00135         if( !(previous_to_last->random_gen) )
00136         {
00137             previous_to_last->random_gen = random_gen;
00138             previous_to_last->forget();
00139         }
00140         if( !(last_layer->random_gen) )
00141         {
00142             last_layer->random_gen = random_gen;
00143             last_layer->forget();
00144         }
00145         if( !(last_to_target->random_gen) )
00146         {
00147             last_to_target->random_gen = random_gen;
00148             last_to_target->forget();
00149         }
00150         if( !(target_layer->random_gen) )
00151         {
00152             target_layer->random_gen = random_gen;
00153             target_layer->forget();
00154         }
00155         if( !(joint_connection->random_gen) )
00156             joint_connection->random_gen = previous_to_last->random_gen;
00157     }
00158 }
00159 
00160 // ### Nothing to add here, simply calls build_
00161 void RBMClassificationModule::build()
00162 {
00163     inherited::build();
00164     build_();
00165 }
00166 
00167 
00168 void RBMClassificationModule::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00169 {
00170     inherited::makeDeepCopyFromShallowCopy(copies);
00171 
00172     deepCopyField(previous_to_last, copies);
00173     deepCopyField(last_layer, copies);
00174     deepCopyField(last_to_target, copies);
00175     deepCopyField(target_layer, copies);
00176     deepCopyField(joint_connection, copies);
00177     deepCopyField(out_act, copies);
00178     deepCopyField(d_target_act, copies);
00179     deepCopyField(d_last_act, copies);
00180 }
00181 
00183 void RBMClassificationModule::fprop(const Vec& input, Vec& output) const
00184 {
00185     PLASSERT( input.size() == input_size );
00186     output.resize( output_size );
00187 
00188     // input is supposed to be an expectation or sample from the previous layer
00189     previous_to_last->setAsDownInput( input );
00190 
00191     // last_layer->activation = bias + previous_to_last_weights * input
00192     last_layer->getAllActivations( previous_to_last );
00193 
00194     // target_layer->activation =
00195     //      bias + sum_j softplus(W_ji + last_layer->activation[j])
00196     Vec target_act = target_layer->activation;
00197     for( int i=0 ; i<output_size ; i++ )
00198     {
00199         target_act[i] = target_layer->bias[i];
00200         real *w = &(last_to_target->weights(0,i));
00201         // step from one row to the next in weights matrix
00202         int m = last_to_target->weights.mod();
00203 
00204         Vec last_act = last_layer->activation;
00205         for( int j=0 ; j<last_size ; j++, w+=m )
00206         {
00207             // *w = weights(j,i)
00208             target_act[i] += softplus(*w + last_act[j]);
00209         }
00210     }
00211 
00212     target_layer->expectation_is_up_to_date = false;
00213     target_layer->computeExpectation();
00214     output << target_layer->expectation;
00215 }
00216 
00217 void RBMClassificationModule::fprop(const Mat& inputs, Mat& outputs)
00218 {
00219     int batch_size = inputs.length();
00220     outputs.resize(batch_size, output_size);
00221 
00222     for (int k=0; k<batch_size; k++)
00223     {
00224         Vec tmp_out = outputs(k);
00225         fprop(inputs(k), tmp_out);
00226     }
00227 }
00228 
00229 /* THIS METHOD IS OPTIONAL
00240 void RBMClassificationModule::bpropUpdate(const Vec& input, const Vec& output,
00241                                const Vec& output_gradient)
00242 {
00243 }
00244 */
00245 
00247 void RBMClassificationModule::bpropUpdate(const Vec& input, const Vec& output,
00248                                           Vec& input_gradient,
00249                                           const Vec& output_gradient,
00250                                           bool accumulate)
00251 {
00252     // size checks
00253     PLASSERT( input.size() == input_size );
00254     PLASSERT( output.size() == output_size );
00255     PLASSERT( output_gradient.size() == output_size );
00256 
00257     if( accumulate )
00258     {
00259         PLASSERT_MSG( input_gradient.size() == input_size,
00260                       "Cannot resize input_gradient AND accumulate into it" );
00261     }
00262 
00263     // bpropUpdate in target_layer,
00264     // assuming target_layer->activation is up-to-date, but it should be the
00265     // case if fprop() has been called just before.
00266     target_layer->bpropUpdate( target_layer->activation, output,
00267                                d_target_act, output_gradient );
00268 
00269     // the tricky part is the backpropagation through last_to_target
00270     Vec last_act = last_layer->activation;
00271     for( int i=0 ; i<last_size ; i++ )
00272     {
00273         real* w = last_to_target->weights[i];
00274         d_last_act[i] = 0;
00275         for( int k=0 ; k<output_size ; k++ )
00276         {
00277             // dC/d( w_ik + target_act_i )
00278             real d_z = d_target_act[k]*(sigmoid(w[k] + last_act[i]));
00279             w[k] -= last_to_target->learning_rate * d_z;
00280 
00281             d_last_act[i] += d_z;
00282         }
00283     }
00284 
00285     // don't use bpropUpdate(), because the function is different here
00286     // last_layer->bias -= learning_rate * d_last_act;
00287     multiplyAcc( last_layer->bias, d_last_act, -(last_layer->learning_rate) );
00288 
00289     // at this point, the gradient can be backpropagated through
00290     // previous_to_last the usual way (even if output is wrong)
00291     previous_to_last->bpropUpdate( input, last_act,
00292                                    input_gradient, d_last_act, accumulate );
00293 
00294 }
00295 
00298 void RBMClassificationModule::forget()
00299 {
00300     if( !random_gen )
00301     {
00302         PLWARNING("RBMClassificationModule: cannot forget() without"
00303                   " random_gen");
00304         return;
00305     }
00306 
00307     if( !(previous_to_last->random_gen) )
00308         previous_to_last->random_gen = random_gen;
00309     previous_to_last->forget();
00310     if( !(last_to_target->random_gen) )
00311         last_to_target->random_gen = random_gen;
00312     last_to_target->forget();
00313     if( !(joint_connection->random_gen) )
00314         joint_connection->random_gen = random_gen;
00315     joint_connection->forget();
00316     if( !(target_layer->random_gen) )
00317         target_layer->random_gen = random_gen;
00318     target_layer->forget();
00319 
00320 }
00321 
00322 /* THIS METHOD IS OPTIONAL
00332 void RBMClassificationModule::bbpropUpdate(const Vec& input, const Vec& output,
00333                                            const Vec& output_gradient,
00334                                            const Vec& output_diag_hessian)
00335 {
00336 }
00337 */
00338 
00339 /* THIS METHOD IS OPTIONAL
00346 void RBMClassificationModule::bbpropUpdate(const Vec& input, const Vec& output,
00347                                            Vec& input_gradient,
00348                                            const Vec& output_gradient,
00349                                            Vec& input_diag_hessian,
00350                                            const Vec& output_diag_hessian,
00351                                            bool accumulate)
00352 {
00353 }
00354 */
00355 
00356 
00357 } // end of namespace PLearn
00358 
00359 
00360 /*
00361   Local Variables:
00362   mode:c++
00363   c-basic-offset:4
00364   c-file-style:"stroustrup"
00365   c-file-offsets:((innamespace . 0)(inline-open . 0))
00366   indent-tabs-mode:nil
00367   fill-column:79
00368   End:
00369 */
00370 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines