PLearn 0.1
RBMMultitaskClassificationModule.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // RBMMultitaskClassificationModule.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin
00036 
00040 #define PL_LOG_MODULE_NAME "RBMMultitaskClassificationModule"
00041 
00042 #include "RBMMultitaskClassificationModule.h"
00043 #include <plearn/io/pl_log.h>
00044 #include <plearn/math/TMat_maths.h>
00045 
00046 namespace PLearn {
00047 using namespace std;
00048 
00049 PLEARN_IMPLEMENT_OBJECT(
00050     RBMMultitaskClassificationModule,
00051     "Computes a mean-field approximate of p(y|x), with y a binary vector.",
00052     "This module contains, from bottom to top:\n"
00053     "  - an RBMConnection - previous_to_last,\n"
00054     "  - an RBMBinomialLayer - last_layer,\n"
00055     "  - an RBMMatrixConnection (transposed) - last_to_target,\n"
00056     "  - and an RBMBinomialLayer - target_layer.\n"
00057     "The two RBMConnections are combined in joint_connection.\n");
00058 
00059 RBMMultitaskClassificationModule::RBMMultitaskClassificationModule():
00060     n_mean_field_iterations( 1 ),
00061     fprop_outputs_activation( false )
00062 {
00063 }
00064 
00065 void RBMMultitaskClassificationModule::declareOptions(OptionList& ol)
00066 {
00067     declareOption(ol, "previous_to_last",
00068                   &RBMMultitaskClassificationModule::previous_to_last,
00069                   OptionBase::buildoption,
00070                   "Connection between the previous layer, and last_layer.\n");
00071 
00072     declareOption(ol, "last_layer", &RBMMultitaskClassificationModule::last_layer,
00073                   OptionBase::buildoption,
00074                   "Top-level layer (the one in the middle if we unfold).\n");
00075 
00076     declareOption(ol, "last_to_target",
00077                   &RBMMultitaskClassificationModule::last_to_target,
00078                   OptionBase::buildoption,
00079                   "Connection between last_layer and target_layer.\n");
00080 
00081     declareOption(ol, "target_layer", &RBMMultitaskClassificationModule::target_layer,
00082                   OptionBase::buildoption,
00083                   "Layer containing the one-hot vector containing the target\n"
00084                   "(or its prediction).\n");
00085 
00086     declareOption(ol, "joint_connection",
00087                   &RBMMultitaskClassificationModule::joint_connection,
00088                   OptionBase::learntoption,
00089                   "Connection grouping previous_to_last and last_to_target.\n");
00090 
00091     declareOption(ol, "n_mean_field_iterations",
00092                   &RBMMultitaskClassificationModule::n_mean_field_iterations,
00093                   OptionBase::buildoption,
00094                   "Number of mean-field iterations.\n");
00095 
00096     declareOption(ol, "fprop_outputs_activation",
00097                   &RBMMultitaskClassificationModule::fprop_outputs_activation,
00098                   OptionBase::buildoption,
00099                   "Indication that fprop should output the value of the "
00100                   "activation\n"
00101                   "before the squashing function and the application of the bias,\n"
00102                   "instead of the mean-field approximation.\n");
00103 
00104     declareOption(ol, "last_size", &RBMMultitaskClassificationModule::last_size,
00105                   OptionBase::learntoption,
00106                   "Size of last_layer.\n");
00107     /*
00108     declareOption(ol, "", &RBMMultitaskClassificationModule::,
00109                   OptionBase::buildoption,
00110                   "");
00111      */
00112 
00113     // Now call the parent class' declareOptions
00114     inherited::declareOptions(ol);
00115 }
00116 
00117 void RBMMultitaskClassificationModule::build_()
00118 {
00119     MODULE_LOG << "build_() called" << endl;
00120 
00121     if( !previous_to_last || !last_layer || !last_to_target || !target_layer )
00122     {
00123         MODULE_LOG << "build_() aborted because layers and connections were"
00124            " not set" << endl;
00125         return;
00126     }
00127 
00129     input_size = previous_to_last->down_size;
00130     last_size = last_layer->size;
00131     output_size = target_layer->size;
00132 
00133     PLASSERT( previous_to_last->up_size == last_size );
00134     PLASSERT( last_to_target->up_size == last_size );
00135     PLASSERT( last_to_target->down_size == output_size );
00136 
00138     if( !joint_connection )
00139         joint_connection = new RBMMixedConnection();
00140 
00141     joint_connection->sub_connections.resize(1,2);
00142     joint_connection->sub_connections(0,0) = previous_to_last;
00143     joint_connection->sub_connections(0,1) = last_to_target;
00144     joint_connection->build();
00145 
00146     if( n_mean_field_iterations > 0 )
00147     {
00148         mean_field_activations_target.resize( n_mean_field_iterations );
00149         mean_field_approximations_target.resize( n_mean_field_iterations );
00150         mean_field_activations_hidden.resize( n_mean_field_iterations );
00151         mean_field_approximations_hidden.resize( n_mean_field_iterations );
00152         for( int i=0; i<n_mean_field_iterations; i++ )
00153         {
00154             mean_field_activations_target[i].resize( output_size );
00155             mean_field_approximations_target[i].resize( output_size );
00156             mean_field_activations_hidden[i].resize( last_size );
00157             mean_field_approximations_hidden[i].resize( last_size );
00158         }
00159         mean_field_activations_gradient_target.resize( output_size );
00160         mean_field_approximations_gradient_target.resize( output_size );
00161         mean_field_activations_gradient_hidden.resize( last_size );
00162         mean_field_approximations_gradient_hidden.resize( last_size );
00163     }
00164     else
00165         PLERROR("In RBMMultitaskClassificationModule::build_(): "
00166                 "n_mean_field_iterations should be > 0\n");
00167 
00168     last_to_target_gradient.resize( last_to_target->up_size,
00169                                     last_to_target->down_size );
00170 
00171     // If we have a random_gen, share it with the ones who do not
00172     if( random_gen )
00173     {
00174         if( !(previous_to_last->random_gen) )
00175         {
00176             previous_to_last->random_gen = random_gen;
00177             previous_to_last->forget();
00178         }
00179         if( !(last_layer->random_gen) )
00180         {
00181             last_layer->random_gen = random_gen;
00182             last_layer->forget();
00183         }
00184         if( !(last_to_target->random_gen) )
00185         {
00186             last_to_target->random_gen = random_gen;
00187             last_to_target->forget();
00188         }
00189         if( !(target_layer->random_gen) )
00190         {
00191             target_layer->random_gen = random_gen;
00192             target_layer->forget();
00193         }
00194         if( !(joint_connection->random_gen) )
00195         {
00196             joint_connection->random_gen = random_gen;
00197             joint_connection->forget();
00198         }
00199     }
00200 }
00201 
00202 // ### Nothing to add here, simply calls build_
00203 void RBMMultitaskClassificationModule::build()
00204 {
00205     inherited::build();
00206     build_();
00207 }
00208 
00209 
00210 void RBMMultitaskClassificationModule::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00211 {
00212     inherited::makeDeepCopyFromShallowCopy(copies);
00213 
00214     deepCopyField(previous_to_last, copies);
00215     deepCopyField(last_layer, copies);
00216     deepCopyField(last_to_target, copies);
00217     deepCopyField(target_layer, copies);
00218     deepCopyField(joint_connection, copies);
00219     deepCopyField(mean_field_activations_target, copies);
00220     deepCopyField(mean_field_approximations_target, copies);
00221     deepCopyField(mean_field_activations_hidden, copies);
00222     deepCopyField(mean_field_approximations_hidden, copies);
00223     deepCopyField(last_to_target_gradient, copies);
00224     deepCopyField(mean_field_activations_gradient_target, copies);
00225     deepCopyField(mean_field_approximations_gradient_target, copies);
00226     deepCopyField(mean_field_activations_gradient_hidden, copies);
00227     deepCopyField(mean_field_approximations_gradient_hidden, copies);
00228 }
00229 
00230 void RBMMultitaskClassificationModule::fprop(const Vec& input, Vec& output) const
00231 {
00232     PLASSERT( input.size() == input_size );
00233     output.resize( output_size );
00234 
00235     previous_to_last->fprop( input, mean_field_activations_hidden[0] );
00236     last_layer->fprop( mean_field_activations_hidden[0],
00237                        mean_field_approximations_hidden[0] );
00238 
00239     Mat weights = last_to_target->weights;
00240     for( int t=0; t<n_mean_field_iterations; t++ )
00241     {
00242         transposeProduct( mean_field_activations_target[t], weights,
00243                           mean_field_approximations_hidden[t] );
00244         target_layer->fprop( mean_field_activations_target[t],
00245                              mean_field_approximations_target[t] );
00246 
00247         if( t != n_mean_field_iterations -1 )
00248         {
00249             product( mean_field_activations_hidden[t+1], weights,
00250                      mean_field_approximations_target[t] );
00251             mean_field_activations_hidden[t+1] += mean_field_activations_hidden[0];
00252             last_layer->fprop( mean_field_activations_hidden[t+1],
00253                                mean_field_approximations_hidden[t+1] );
00254         }
00255     }
00256 
00257     if( fprop_outputs_activation )
00258     {
00259         output << mean_field_activations_target.last();
00260         //output += target_layer->bias;
00261     }
00262     else
00263         output << mean_field_approximations_target.last();
00264 }
00265 
00266 /* THIS METHOD IS OPTIONAL
00277 void RBMMultitaskClassificationModule::bpropUpdate(const Vec& input, const Vec& output,
00278                                const Vec& output_gradient)
00279 {
00280 }
00281 */
00282 
00284 void RBMMultitaskClassificationModule::bpropUpdate(const Vec& input, const Vec& output,
00285                                           Vec& input_gradient,
00286                                           const Vec& output_gradient,
00287                                           bool accumulate)
00288 {
00289     // size checks
00290     PLASSERT( input.size() == input_size );
00291     PLASSERT( output.size() == output_size );
00292     PLASSERT( output_gradient.size() == output_size );
00293 
00294     if( accumulate )
00295     {
00296         PLASSERT_MSG( input_gradient.size() == input_size,
00297                       "Cannot resize input_gradient AND accumulate into it" );
00298     }
00299 
00300     last_to_target_gradient.clear();
00301     Mat weights = last_to_target->weights;
00302     if( fprop_outputs_activation )
00303         mean_field_activations_gradient_target << output_gradient;
00304     else
00305         mean_field_approximations_gradient_target << output_gradient;
00306 
00307     for( int t=n_mean_field_iterations-1; t>=0; t-- )
00308     {
00309         if( t != n_mean_field_iterations-1 || !fprop_outputs_activation )
00310             target_layer->bpropUpdate( mean_field_activations_target[t],
00311                                        mean_field_approximations_target[t],
00312                                        mean_field_activations_gradient_target,
00313                                        mean_field_approximations_gradient_target
00314                 );
00315 
00316         externalProductAcc( last_to_target_gradient,
00317                             mean_field_approximations_hidden[t],
00318                             mean_field_activations_gradient_target);
00319 
00320         product( mean_field_approximations_gradient_hidden, weights,
00321                           mean_field_activations_gradient_target);
00322 
00323         if( t != 0 )
00324         {
00325             last_layer->bpropUpdate( mean_field_activations_hidden[t],
00326                                        mean_field_approximations_hidden[t],
00327                                        mean_field_activations_gradient_hidden,
00328                                        mean_field_approximations_gradient_hidden
00329                 );
00330 
00331             externalProductAcc( last_to_target_gradient,
00332                                 mean_field_activations_gradient_hidden,
00333                                 mean_field_approximations_target[t-1]
00334                                 );
00335 
00336             transposeProduct( mean_field_approximations_gradient_target, weights,
00337                               mean_field_activations_gradient_hidden);
00338         }
00339     }
00340 
00341     last_layer->bpropUpdate( mean_field_activations_hidden[0],
00342                              mean_field_approximations_hidden[0],
00343                              mean_field_activations_gradient_hidden,
00344                              mean_field_approximations_gradient_hidden
00345         );
00346 
00347     previous_to_last->bpropUpdate( input, mean_field_activations_hidden[0],
00348                                    input_gradient,
00349                                    mean_field_activations_gradient_hidden,
00350                                    accumulate);
00351 
00352     multiplyAcc( weights, last_to_target_gradient,
00353                  - (last_to_target->learning_rate) );
00354 }
00355 
00358 void RBMMultitaskClassificationModule::forget()
00359 {
00360     if( !random_gen )
00361     {
00362         PLWARNING("RBMMultitaskClassificationModule: cannot forget() without"
00363                   " random_gen");
00364         return;
00365     }
00366 
00367     if( !(previous_to_last->random_gen) )
00368         previous_to_last->random_gen = random_gen;
00369     previous_to_last->forget();
00370     if( !(last_to_target->random_gen) )
00371         last_to_target->random_gen = random_gen;
00372     last_to_target->forget();
00373     if( !(joint_connection->random_gen) )
00374         joint_connection->random_gen = random_gen;
00375     joint_connection->forget();
00376 }
00377 
00378 /* THIS METHOD IS OPTIONAL
00388 void RBMMultitaskClassificationModule::bbpropUpdate(const Vec& input, const Vec& output,
00389                                            const Vec& output_gradient,
00390                                            const Vec& output_diag_hessian)
00391 {
00392 }
00393 */
00394 
00395 /* THIS METHOD IS OPTIONAL
00402 void RBMMultitaskClassificationModule::bbpropUpdate(const Vec& input, const Vec& output,
00403                                            Vec& input_gradient,
00404                                            const Vec& output_gradient,
00405                                            Vec& input_diag_hessian,
00406                                            const Vec& output_diag_hessian,
00407                                            bool accumulate)
00408 {
00409 }
00410 */
00411 
00412 
00413 } // end of namespace PLearn
00414 
00415 
00416 /*
00417   Local Variables:
00418   mode:c++
00419   c-basic-offset:4
00420   c-file-style:"stroustrup"
00421   c-file-offsets:((innamespace . 0)(inline-open . 0))
00422   indent-tabs-mode:nil
00423   fill-column:79
00424   End:
00425 */
00426 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines