PLearn 0.1
UnfrozenDeepBeliefNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // UnfrozenDeepBeliefNet.cc
00004 //
00005 // Copyright (C) 2006 Pascal Lamblin
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Pascal Lamblin
00036 
00039 #define PL_LOG_MODULE_NAME "UnfrozenDeepBeliefNet"
00040 #include <plearn/io/pl_log.h>
00041 
00042 #include "UnfrozenDeepBeliefNet.h"
00043 #include "RBMLayer.h"
00044 #include "RBMMixedLayer.h"
00045 #include "RBMMultinomialLayer.h"
00046 #include "RBMParameters.h"
00047 #include "RBMLLParameters.h"
00048 #include "RBMJointLLParameters.h"
00049 
00050 namespace PLearn {
00051 using namespace std;
00052 
00053 PLEARN_IMPLEMENT_OBJECT(
00054     UnfrozenDeepBeliefNet,
00055     "HintonDeepBeliefNet without freezing weights of earlier layers",
00056     ""
00057 );
00058 
00060 // UnfrozenDeepBeliefNet //
00062 UnfrozenDeepBeliefNet::UnfrozenDeepBeliefNet() :
00063     inherited()
00064 {
00065 }
00066 
00068 // declareOptions //
00070 void UnfrozenDeepBeliefNet::declareOptions(OptionList& ol)
00071 {
00072     declareOption(ol, "learning_rates", &UnfrozenDeepBeliefNet::learning_rate,
00073                   OptionBase::buildoption,
00074                   "Learning rate of each layer");
00075 
00076     // Now call the parent class' declareOptions().
00077     inherited::declareOptions(ol);
00078 
00079     redeclareOption(ol, "learning_rate", &UnfrozenDeepBeliefNet::learning_rate,
00080                     OptionBase::buildoption,
00081                     "Global learning rate, will not be used if learning_rates"
00082                     " is provided.");
00083 
00084     redeclareOption(ol, "training_schedule",
00085                   &UnfrozenDeepBeliefNet::training_schedule,
00086                   OptionBase::buildoption,
00087                   "No training_schedule, all layers are always learned.");
00088 
00089     redeclareOption(ol, "fine_tuning_method",
00090                     &UnfrozenDeepBeliefNet::fine_tuning_method,
00091                     OptionBase::nosave,
00092                     "No fine-tuning");
00093 }
00094 
00096 // build //
00098 void UnfrozenDeepBeliefNet::build()
00099 {
00100     // ### Nothing to add here, simply calls build_().
00101     inherited::build();
00102     build_();
00103 }
00104 
00106 // build_ //
00108 void UnfrozenDeepBeliefNet::build_()
00109 {
00110     MODULE_LOG << "build_() called" << endl;
00111     MODULE_LOG << "stage = " << stage << endl;
00112 
00113     // check value of fine_tuning_method
00114     string ftm = lowerstring( fine_tuning_method );
00115     if( ftm == "" | ftm == "none" )
00116         fine_tuning_method = "";
00117     else
00118         PLERROR( "UnfrozenDeepBeliefNet::build_ - fine_tuning_method \"%s\"\n"
00119                  "is unknown.\n", fine_tuning_method.c_str() );
00120     MODULE_LOG << "  fine_tuning_method = \"" << fine_tuning_method << "\""
00121         <<  endl;
00122 
00123     if( learning_rates.length() != n_layers-1 )
00124         learning_rates = Vec( n_layers-1, learning_rate );
00125 
00126     for( int i=0 ; i<n_layers-2 ; i++ )
00127         params[i]->learning_rate = learning_rates[i];
00128     joint_params->learning_rate = learning_rates[n_layers-2];
00129 
00130     MODULE_LOG << "end of build_()" << endl;
00131 }
00132 
00134 // makeDeepCopyFromShallowCopy //
00136 void UnfrozenDeepBeliefNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00137 {
00138     inherited::makeDeepCopyFromShallowCopy(copies);
00139 }
00140 
00141 
00143 // train //
00145 void UnfrozenDeepBeliefNet::train()
00146 {
00147     MODULE_LOG << "train() called" << endl;
00148     // The role of the train method is to bring the learner up to
00149     // stage==nstages, updating train_stats with training costs measured
00150     // on-line in the process.
00151 
00152     /* TYPICAL CODE:
00153 
00154     static Vec input;  // static so we don't reallocate memory each time...
00155     static Vec target; // (but be careful that static means shared!)
00156     input.resize(inputsize());    // the train_set's inputsize()
00157     target.resize(targetsize());  // the train_set's targetsize()
00158     real weight;
00159 
00160     // This generic PLearner method does a number of standard stuff useful for
00161     // (almost) any learner, and return 'false' if no training should take
00162     // place. See PLearner.h for more details.
00163     if (!initTrain())
00164         return;
00165 
00166     while(stage<nstages)
00167     {
00168         // clear statistics of previous epoch
00169         train_stats->forget();
00170 
00171         //... train for 1 stage, and update train_stats,
00172         // using train_set->getExample(input, target, weight)
00173         // and train_stats->update(train_costs)
00174 
00175         ++stage;
00176         train_stats->finalize(); // finalize statistics for this epoch
00177     }
00178     */
00179 
00180     Vec input( inputsize() );
00181     Vec target( targetsize() ); // unused
00182     real weight; // unused
00183     Vec train_costs( 2 );
00184 
00185     if( !initTrain() )
00186     {
00187         MODULE_LOG << "train() aborted" << endl;
00188         return;
00189     }
00190 
00191     int nsamples = train_set->length();
00192     MODULE_LOG << "nsamples = " << nsamples << endl;
00193 
00194     MODULE_LOG << "initial stage = " << stage << endl;
00195     MODULE_LOG << "objective: nstages = " << nstages << endl;
00196 
00197     for( ; stage < nstages ; stage++ )
00198     {
00199         // sample is the index in the training set
00200         int sample = stage % nsamples;
00201         if( sample == 0 )
00202         {
00203             MODULE_LOG << "train_stats->forget() called" << endl;
00204             train_stats->forget();
00205         }
00206 /*
00207         MODULE_LOG << "stage = " << stage << endl;
00208         MODULE_LOG << "sample = " << sample << endl;
00209 // */
00210         if( (100*stage) % nsamples == 0 )
00211             MODULE_LOG << "stage = " << stage << endl;
00212 
00213         train_set->getExample(sample, input, target, weight);
00214         splitCond( input );
00215 
00216         // deterministic forward propagation
00217         layers[0]->expectation << predictor_part;
00218         for( int i=0 ; i<n_layers-2 ; i++ )
00219         {
00220             params[i]->setAsDownInput( layers[i]->expectation );
00221             layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
00222             layers[i+1]->computeExpectation();
00223             layers[i+1]->generateSample();
00224             params[i]->accumulatePosStats( layers[i]->expectation,
00225                                            layers[i+1]->expectation );
00226         }
00227 
00228         // compute output and cost at this point, even though it is not the
00229         // criterion that will be directly optimized
00230         joint_params->setAsCondInput( layers[n_layers-2]->expectation );
00231         target_layer->getAllActivations( (RBMLLParameters*) joint_params );
00232         target_layer->computeExpectation();
00233         // get costs
00234         int actual_index = argmax(predicted_part);
00235         train_costs[0] = -pl_log( target_layer->expectation[actual_index] );
00236         if( argmax( target_layer->expectation ) == actual_index )
00237             train_costs[1] = 0;
00238         else
00239             train_costs[1] = 1;
00240 
00241         // end of the forward propagation
00242         target_layer->expectation << predicted_part;
00243         joint_params->setAsDownInput( joint_layer->expectation );
00244         last_layer->getAllActivations( (RBMLLParameters*) joint_params );
00245         last_layer->computeExpectation();
00246         last_layer->generateSample();
00247         joint_params->accumulatePosStats( joint_layer->expectation,
00248                                           last_layer->expectation );
00249 
00250 
00251         // for each params, one step of CD
00252         for( int i=0 ; i<n_layers-2 ; i++ )
00253         {
00254             // down propagation
00255             params[i]->setAsUpInput( layers[i+1]->sample );
00256             layers[i]->getAllActivations( (RBMLLParameters*) params[i] );
00257 
00258             // negative phase
00259             layers[i]->generateSample();
00260             params[i]->setAsDownInput( layers[i]->sample );
00261             layers[i+1]->getAllActivations( (RBMLLParameters*) params[i] );
00262             layers[i+1]->computeExpectation();
00263             params[i]->accumulateNegStats( layers[i]->sample,
00264                                            layers[i+1]->expectation );
00265             params[i]->update();
00266         }
00267         // down propagation
00268         joint_params->setAsUpInput( last_layer->sample );
00269         joint_layer->getAllActivations( (RBMLLParameters*) joint_params );
00270 
00271         // negative phase
00272         joint_layer->generateSample();
00273         joint_params->setAsDownInput( joint_layer->sample );
00274         last_layer->getAllActivations( (RBMLLParameters*) joint_params );
00275         last_layer->computeExpectation();
00276         joint_params->accumulateNegStats( joint_layer->sample,
00277                                           last_layer->expectation );
00278 
00279         //update
00280         joint_params->update();
00281 
00282         train_stats->update( train_costs );
00283     }
00284     train_stats->finalize();
00285     MODULE_LOG << endl;
00286 }
00287 
00288 } // end of namespace PLearn
00289 
00290 
00291 /*
00292   Local Variables:
00293   mode:c++
00294   c-basic-offset:4
00295   c-file-style:"stroustrup"
00296   c-file-offsets:((innamespace . 0)(inline-open . 0))
00297   indent-tabs-mode:nil
00298   fill-column:79
00299   End:
00300 */
00301 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines