PLearn 0.1
StackedSVDNet.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // StackedSVDNet.cc
00004 //
00005 // Copyright (C) 2007 Hugo Larochelle
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Hugo Larochelle
00036 
00039 #include "StackedSVDNet.h"
00040 
00041 #define PL_LOG_MODULE_NAME "StackedSVDNet"
00042 #include <plearn/io/pl_log.h>
00043 #include <plearn/math/plapack.h>
00044 
00045 
00046 namespace PLearn {
00047 using namespace std;
00048 
00049 PLEARN_IMPLEMENT_OBJECT(
00050     StackedSVDNet,
00051     "Neural net, initialized with SVDs of logistic auto-regressions.",
00052     ""
00053     );
00054 
00055 StackedSVDNet::StackedSVDNet() :
00056     greedy_learning_rate( 0. ),
00057     greedy_decrease_ct( 0. ),
00058     fine_tuning_learning_rate( 0. ),
00059     fine_tuning_decrease_ct( 0. ),
00060     minibatch_size(50),
00061     global_output_layer(false),
00062     fill_in_null_diagonal(false),
00063     n_layers( 0 )
00064 {
00065     // random_gen will be initialized in PLearner::build_()
00066     random_gen = new PRandom();
00067     nstages = 0;
00068 }
00069 
00070 void StackedSVDNet::declareOptions(OptionList& ol)
00071 {
00072     declareOption(ol, "greedy_learning_rate", 
00073                   &StackedSVDNet::greedy_learning_rate,
00074                   OptionBase::buildoption,
00075                   "The learning rate used during the logistic auto-regression "
00076                   "gradient descent training"
00077         );
00078     
00079     declareOption(ol, "greedy_decrease_ct", 
00080                   &StackedSVDNet::greedy_decrease_ct,
00081                   OptionBase::buildoption,
00082                   "The decrease constant of the learning rate used during the "
00083                   "logistic auto-regression gradient descent training. "
00084         );
00085 
00086     declareOption(ol, "fine_tuning_learning_rate", 
00087                   &StackedSVDNet::fine_tuning_learning_rate,
00088                   OptionBase::buildoption,
00089                   "The learning rate used during the fine tuning gradient descent");
00090 
00091     declareOption(ol, "fine_tuning_decrease_ct", 
00092                   &StackedSVDNet::fine_tuning_decrease_ct,
00093                   OptionBase::buildoption,
00094                   "The decrease constant of the learning rate used during "
00095                   "fine tuning\n"
00096                   "gradient descent.\n");
00097 
00098     declareOption(ol, "minibatch_size", 
00099                   &StackedSVDNet::minibatch_size,
00100                   OptionBase::buildoption,
00101                   "Size of mini-batch for gradient descent");
00102 
00103     declareOption(ol, "training_schedule", &StackedSVDNet::training_schedule,
00104                   OptionBase::buildoption,
00105                   "Number of examples to use during each phase of learning:\n"
00106                   "first the greedy phases, and then the fine-tuning phase.\n"
00107                   "However, the learning will stop as soon as we reach nstages.\n"
00108                   "For example for 2 hidden layers, with 1000 examples in each\n"
00109                   "greedy phase, and 500 in the fine-tuning phase, this option\n"
00110                   "should be [1000 1000 500], and nstages should be at least 2500.\n"
00111         );
00112     
00113     declareOption(ol, "global_output_layer", 
00114                   &StackedSVDNet::global_output_layer,
00115                   OptionBase::buildoption,
00116                   "Indication that the output layer (given by the final module)\n"
00117                   "should have as input all units of the network (including the"
00118                   "input units).\n");
00119 
00120     declareOption(ol, "fill_in_null_diagonal", 
00121                   &StackedSVDNet::fill_in_null_diagonal,
00122                   OptionBase::buildoption,
00123                   "Indication that the zero diagonal of the weight matrix after\n"
00124                   "logistic auto-regression should be filled with the\n"
00125                   "maximum absolute value of each corresponding row.\n");
00126 
00127     declareOption(ol, "layers", &StackedSVDNet::layers,
00128                   OptionBase::buildoption,
00129                   "The layers of units in the network. The first element\n"
00130                   "of this vector should be the input layer and the\n"
00131                   "subsequent elements should be the hidden layers. The\n"
00132                   "should not be included in this layer.\n");
00133 
00134     declareOption(ol, "final_module", &StackedSVDNet::final_module,
00135                   OptionBase::buildoption,
00136                   "Module that takes as input the output of the last layer\n"
00137                   "(layers[n_layers-1), and feeds its output to final_cost\n"
00138                   "which defines the fine-tuning criteria.\n"
00139                  );
00140 
00141     declareOption(ol, "final_cost", &StackedSVDNet::final_cost,
00142                   OptionBase::buildoption,
00143                   "The cost function to be applied on top of the neural network\n"
00144                   "(i.e. at the output of final_module). Its gradients will be \n"
00145                   "backpropagated to final_module and then backpropagated to\n"
00146                   "the layers.\n"
00147                   );
00148 
00149     declareOption(ol, "connections", &StackedSVDNet::connections,
00150                   OptionBase::learntoption,
00151                   "The weights of the connections between the layers");
00152 
00153     declareOption(ol, "n_layers", &StackedSVDNet::n_layers,
00154                   OptionBase::learntoption,
00155                   "Number of layers");
00156 
00157     // Now call the parent class' declareOptions
00158     inherited::declareOptions(ol);
00159 }
00160 
00161 void StackedSVDNet::build_()
00162 {
00163 
00164     MODULE_LOG << "build_() called" << endl;
00165 
00166     if(inputsize_ > 0 && targetsize_ > 0)
00167     {
00168         // Initialize some learnt variables
00169         n_layers = layers.length();
00170         
00171         cumulative_schedule.resize( n_layers+1 );
00172         cumulative_schedule[0] = 0;
00173         for( int i=0 ; i<n_layers ; i++ )
00174         {
00175             cumulative_schedule[i+1] = cumulative_schedule[i] +
00176                 training_schedule[i];
00177         }
00178 
00179         reconstruction_test_costs.resize( n_layers-1 );
00180         reconstruction_test_costs.fill( MISSING_VALUE );
00181 
00182         if( training_schedule.length() != n_layers )
00183             PLERROR("StackedSVDNet::build_() - \n"
00184                     "training_schedule should have %d elements.\n",
00185                     n_layers-1);
00186 
00187         if( weightsize_ > 0 )
00188             PLERROR("StackedSVDNet::build_() - \n"
00189                     "usage of weighted samples (weight size > 0) is not\n"
00190                     "implemented yet.\n");
00191         
00192         if(layers[0]->size != inputsize_)
00193             PLERROR("StackedSVDNet::build_layers_and_connections() - \n"
00194                     "layers[0] should have a size of %d.\n",
00195                     inputsize_);
00196 
00197         reconstruction_costs.resize(minibatch_size,1);    
00198 
00199         activation_gradients.resize( n_layers );
00200         expectation_gradients.resize( n_layers );
00201 
00202         for( int i=0 ; i<n_layers ; i++ )
00203         {
00204             if( !(layers[i]->random_gen) )
00205             {
00206                 layers[i]->random_gen = random_gen;
00207                 layers[i]->forget();
00208             }
00209 
00210             if(i>0 && layers[i]->size > layers[i-1]->size)
00211                 PLERROR("In StackedSVDNet::build()_: "
00212                     "layers must have decreasing sizes from bottom to top.");
00213                 
00214             activation_gradients[i].resize( minibatch_size, layers[i]->size );
00215             expectation_gradients[i].resize( minibatch_size, layers[i]->size );
00216         }
00217 
00218         if( !final_cost )
00219             PLERROR("StackedSVDNet::build_costs() - \n"
00220                     "final_cost should be provided.\n");
00221 
00222         final_cost_inputs.resize( minibatch_size, final_cost->input_size );
00223         final_cost_value.resize( final_cost->output_size );
00224         final_cost_values.resize( minibatch_size, final_cost->output_size );
00225         final_cost_gradients.resize( minibatch_size, final_cost->input_size );
00226         final_cost->setLearningRate( fine_tuning_learning_rate );
00227 
00228         if( !(final_cost->random_gen) )
00229         {
00230             final_cost->random_gen = random_gen;
00231             final_cost->forget();
00232         }
00233 
00234         if( !final_module )
00235             PLERROR("StackedSVDNet::build_costs() - \n"
00236                     "final_module should be provided.\n");
00237     
00238         if(global_output_layer)
00239         {
00240             int sum = 0;
00241             for(int i=0; i<layers.length(); i++)
00242                 sum += layers[i]->size;
00243             if( sum != final_module->input_size )
00244                 PLERROR("StackedSVDNet::build_costs() - \n"
00245                         "final_module should have an input_size of %d.\n", 
00246                         sum);
00247 
00248             global_output_layer_input.resize(sum);
00249             global_output_layer_inputs.resize(minibatch_size,sum);
00250             global_output_layer_input_gradients.resize(minibatch_size,sum);
00251             expectation_gradients[n_layers-1] = 
00252                 global_output_layer_input_gradients.subMat(
00253                     0, sum-layers[n_layers-1]->size, 
00254                     minibatch_size, layers[n_layers-1]->size);
00255         }
00256         else
00257         {
00258             if( layers[n_layers-1]->size != final_module->input_size )
00259                 PLERROR("StackedSVDNet::build_costs() - \n"
00260                         "final_module should have an input_size of %d.\n", 
00261                         layers[n_layers-1]->size);
00262         }
00263 
00264         if( final_module->output_size != final_cost->input_size )
00265             PLERROR("StackedSVDNet::build_costs() - \n"
00266                     "final_module should have an output_size of %d.\n", 
00267                     final_cost->input_size);
00268 
00269         final_module->setLearningRate( fine_tuning_learning_rate );
00270 
00271         if( !(final_module->random_gen) )
00272         {
00273             final_module->random_gen = random_gen;
00274             final_module->forget();
00275         }
00276 
00277 
00278         if(targetsize_ != 1)
00279             PLERROR("StackedSVDNet::build_costs() - \n"
00280                     "target size of %d is not supported.\n", targetsize_);    
00281     }
00282 }
00283 
00284 // ### Nothing to add here, simply calls build_
00285 void StackedSVDNet::build()
00286 {
00287     inherited::build();
00288     build_();
00289 }
00290 
00291 
00292 void StackedSVDNet::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00293 {
00294     inherited::makeDeepCopyFromShallowCopy(copies);
00295 
00296     // deepCopyField(, copies);
00297 
00298     deepCopyField(training_schedule, copies);
00299     deepCopyField(layers, copies);
00300     deepCopyField(final_module, copies);
00301     deepCopyField(final_cost, copies);
00302     deepCopyField(connections, copies);
00303     deepCopyField(rbm_connections, copies);
00304     deepCopyField(activation_gradients, copies);
00305     deepCopyField(expectation_gradients, copies);
00306     deepCopyField(reconstruction_layer, copies);
00307     deepCopyField(reconstruction_targets, copies);
00308     deepCopyField(reconstruction_costs, copies);
00309     deepCopyField(reconstruction_test_costs, copies);
00310     deepCopyField(reconstruction_activation_gradient, copies);
00311     deepCopyField(reconstruction_activation_gradients, copies);
00312     deepCopyField(reconstruction_input_gradients, copies);
00313     deepCopyField(global_output_layer_input, copies);
00314     deepCopyField(global_output_layer_inputs, copies);
00315     deepCopyField(global_output_layer_input_gradients, copies);
00316     deepCopyField(final_cost_inputs, copies);
00317     deepCopyField(final_cost_value, copies);
00318     deepCopyField(final_cost_values, copies);
00319     deepCopyField(final_cost_gradients, copies);
00320     deepCopyField(cumulative_schedule, copies);
00321     
00322     //PLERROR("In StackedSVDNet::makeDeepCopyFromShallowCopy(): "
00323     //        "not implemented yet.");
00324 }
00325 
00326 
00327 int StackedSVDNet::outputsize() const
00328 {
00329     if( stage == 0 )
00330         return layers[0]->size;
00331     for( int i=1; i<n_layers; i++ )
00332         if( stage <= cumulative_schedule[i] )
00333             return layers[i-1]->size;
00334     return final_module->output_size;
00335 }
00336 
00337 void StackedSVDNet::forget()
00338 {
00339     inherited::forget();
00340 
00341     connections.resize(0);
00342     rbm_connections.resize(0);
00343     
00344     for(int i=0; i<layers.length(); i++)
00345         layers[i]->forget();
00346 
00347     final_module->forget();
00348     final_cost->forget();
00349 
00350     stage = 0;
00351 }
00352 
00353 void StackedSVDNet::train()
00354 {
00355     MODULE_LOG << "train() called " << endl;
00356 
00357     // Enforce value of cumulative_schedule because build_() might
00358     // not be called if we change training_schedule inside a HyperLearner
00359     for( int i=0 ; i<n_layers ; i++ )
00360         cumulative_schedule[i+1] = cumulative_schedule[i] +
00361             training_schedule[i];
00362 
00363     Vec input( inputsize() );
00364     Vec target( targetsize() );
00365     Mat inputs( minibatch_size, inputsize() );
00366     Mat targets( minibatch_size, targetsize() );
00367     Vec weights( minibatch_size );
00368 
00369     TVec<string> train_cost_names = getTrainCostNames() ;
00370     Vec train_costs( train_cost_names.length() );
00371     train_costs.fill(MISSING_VALUE) ;
00372 
00373     PP<ProgressBar> pb;
00374 
00375     // clear stats of previous epoch
00376     train_stats->forget();
00377 
00378     real lr = 0;
00379     int init_stage;
00380     int end_stage;
00381 
00382     /***** initial greedy training *****/
00383     connections.resize(n_layers-1);
00384     rbm_connections.resize(n_layers-1);
00385     TVec< Vec > biases(n_layers-1);
00386     for( int i=0 ; i<n_layers-1 ; i++ )
00387     {
00388 
00389         end_stage = min(cumulative_schedule[i+1], nstages);
00390         if( stage >= end_stage )
00391             continue;
00392 
00393         MODULE_LOG << "Training connection weights between layers " << i
00394                    << " and " << i+1 << endl;
00395         MODULE_LOG << "  stage = " << stage << endl;
00396         MODULE_LOG << "  end_stage = " << end_stage << endl;
00397         MODULE_LOG << "  greedy_learning_rate = " 
00398                    << greedy_learning_rate << endl;
00399 
00400         if( report_progress )
00401             pb = new ProgressBar( "Training layer "+tostring(i)
00402                                   +" of "+classname(),
00403                                   end_stage - stage );
00404 
00405 
00406         // Finalize training of last layer (if any)
00407         if( i>0 && stage < end_stage && stage == cumulative_schedule[i] )
00408         {
00409             if(fill_in_null_diagonal)
00410             {
00411                 // Fill in the empty diagonal
00412                 for(int j=0; j<layers[i]->size; j++)
00413                 {
00414                     connections[i-1]->weights(j,j) = 
00415                         maxabs(connections[i-1]->weights(j));
00416                 }
00417             }
00418             
00419             if(layers[i-1]->size != layers[i]->size)
00420             {
00421                 Mat A,U,Vt;
00422                 Vec S;
00423                 A.resize( reconstruction_layer->size, 
00424                           reconstruction_layer->size+1);
00425                 A.column( 0 ) << reconstruction_layer->bias;
00426                 A.subMat( 0, 1, reconstruction_layer->size, 
00427                           reconstruction_layer->size ) << 
00428                     connections[i-1]->weights;
00429                 SVD( A, U, S, Vt );
00430                 connections[ i-1 ]->up_size = layers[ i ]->size;
00431                 connections[ i-1 ]->down_size = layers[ i-1 ]->size;
00432                 connections[ i-1 ]->build();
00433                 connections[ i-1 ]->weights << Vt.subMat( 
00434                     0, 1, layers[ i ]->size, Vt.width()-1 );
00435                 biases[ i-1 ].resize( layers[i]->size );
00436                 for(int j=0; j<biases[ i-1 ].length(); j++)
00437                     biases[ i-1 ][ j ] = Vt(j,0);
00438                 
00439                 for(int j=0; j<connections[ i-1 ]->up_size; j++)
00440                 {
00441                     connections[ i-1 ]->weights( j ) *= S[ j ];
00442                     biases[ i-1 ][ j ] *= S[ j ];
00443                 }
00444             }
00445             else
00446             {
00447                 biases[ i-1 ].resize( layers[ i ]->size );
00448                 biases[ i-1 ] << reconstruction_layer->bias;
00449             }
00450             layers[ i ]->bias << biases[ i-1 ];
00451         }
00452 
00453         // Create connections
00454         if(stage == cumulative_schedule[i])
00455         {
00456             connections[i] = new RBMMatrixConnection();
00457             connections[i]->up_size = layers[i]->size;
00458             connections[i]->down_size = layers[i]->size;
00459             connections[i]->random_gen = random_gen;
00460             connections[i]->build();
00461             for(int j=0; j < layers[i]->size; j++)
00462                 connections[i]->weights(j,j) = 0;
00463 
00464             rbm_connections[i] = (RBMMatrixConnection *) connections[i];
00465 
00466             CopiesMap map;
00467             reconstruction_layer = layers[ i ]->deepCopy( map );
00468             reconstruction_targets.resize( minibatch_size, layers[ i ]->size );
00469             reconstruction_activation_gradient.resize( layers[ i ]->size );
00470             reconstruction_activation_gradients.resize( 
00471                 minibatch_size, layers[ i ]->size );
00472             reconstruction_input_gradients.resize( 
00473                 minibatch_size, layers[ i ]->size );
00474 
00475             lr = greedy_learning_rate;
00476             connections[i]->setLearningRate( lr );
00477             reconstruction_layer->setLearningRate( lr );
00478         }
00479 
00480         for( ; stage<end_stage ; stage++)
00481         {
00482             train_stats->forget();
00483             
00484             if( !fast_exact_is_equal( greedy_decrease_ct , 0 ) )
00485             {
00486                 lr = greedy_learning_rate/(1 + greedy_decrease_ct 
00487                                            * (stage - cumulative_schedule[i]) );
00488                 connections[i]->setLearningRate( lr );
00489                 reconstruction_layer->setLearningRate( lr );                
00490             }
00491             
00492             train_set->getExamples((stage*minibatch_size)%train_set->length(),
00493                                    minibatch_size, inputs, targets, weights,
00494                                    NULL, true);
00495                                    
00496             greedyStep( inputs, targets, i, train_costs );
00497             train_stats->update( train_costs );
00498 
00499             if( pb )
00500                 pb->update( stage - cumulative_schedule[i] + 1 );
00501         }
00502         train_stats->finalize();
00503     }
00504 
00505     /***** fine-tuning by gradient descent *****/
00506 
00507     end_stage = min(cumulative_schedule[n_layers], nstages);
00508     if( stage >= end_stage )
00509         return;
00510 
00511     // Finalize training of last layer (if any)
00512     if( n_layers>1 && stage < end_stage && stage == cumulative_schedule[n_layers-1] )
00513     {
00514         if(fill_in_null_diagonal)
00515         {
00516             // Fill in the empty diagonal
00517             for(int j=0; j<layers[n_layers-1]->size; j++)
00518             {
00519                 connections[n_layers-2]->weights(j,j) = 
00520                     maxabs(connections[n_layers-2]->weights(j));
00521             }
00522         }
00523         
00524         if(layers[n_layers-2]->size != layers[n_layers-1]->size)
00525         {
00526             Mat A,U,Vt;
00527             Vec S;
00528             A.resize( reconstruction_layer->size, 
00529                       reconstruction_layer->size+1);
00530             A.column( 0 ) << reconstruction_layer->bias;
00531             A.subMat( 0, 1, reconstruction_layer->size, 
00532                       reconstruction_layer->size ) << 
00533                 connections[n_layers-2]->weights;
00534             SVD( A, U, S, Vt );
00535             connections[ n_layers-2 ]->up_size = layers[ n_layers-1 ]->size;
00536             connections[ n_layers-2 ]->down_size = layers[ n_layers-2 ]->size;
00537             connections[ n_layers-2 ]->build();
00538             connections[ n_layers-2 ]->weights << Vt.subMat( 
00539                 0, 1, layers[ n_layers-1 ]->size, Vt.width()-1 );
00540             biases[ n_layers-2 ].resize( layers[n_layers-1]->size );
00541             for(int j=0; j<biases[ n_layers-2 ].length(); j++)
00542                 biases[ n_layers-2 ][ j ] = Vt(j,0);
00543             
00544             for(int j=0; j<connections[ n_layers-2 ]->up_size; j++)
00545             {
00546                 connections[ n_layers-2 ]->weights( j ) *= S[ j ];
00547                 biases[ n_layers-2 ][ j ] *= S[ j ];
00548             }
00549         }
00550         else
00551         {
00552             biases[ n_layers-2 ].resize( layers[ n_layers-1 ]->size );
00553             biases[ n_layers-2 ] << reconstruction_layer->bias;
00554         }
00555         layers[ n_layers-1 ]->bias << biases[ n_layers-2 ];
00556     }
00557 
00558     MODULE_LOG << "Fine-tuning all parameters, by gradient descent" << endl;
00559     MODULE_LOG << "  stage = " << stage << endl;
00560     MODULE_LOG << "  end_stage = " << end_stage << endl;
00561     MODULE_LOG << "  fine_tuning_learning_rate = " 
00562                << fine_tuning_learning_rate << endl;
00563     
00564     init_stage = stage;
00565     if( report_progress && stage < end_stage )
00566         pb = new ProgressBar( "Fine-tuning parameters of all layers of "
00567                               + classname(),
00568                               end_stage - init_stage );
00569     
00570     setLearningRate( fine_tuning_learning_rate );
00571     train_costs.fill(MISSING_VALUE);
00572     
00573     for( ; stage<end_stage ; stage++ )
00574     {
00575         if( !fast_exact_is_equal( fine_tuning_decrease_ct, 0. ) )
00576             setLearningRate( fine_tuning_learning_rate
00577                              / (1. + fine_tuning_decrease_ct * 
00578                                 (stage - cumulative_schedule[n_layers]) ) );
00579             
00580         train_set->getExamples((stage*minibatch_size)%train_set->length(),
00581                                minibatch_size, inputs, targets, weights,
00582                                NULL, true);
00583         
00584         fineTuningStep( inputs, targets, train_costs );
00585         train_stats->update( train_costs );
00586         
00587         if( pb )
00588             pb->update( stage - init_stage + 1 );
00589     }
00590 
00591     if(verbosity > 2)
00592         cout << "error at stage " << stage << ": " << 
00593             train_stats->getMean() << endl;
00594     train_stats->finalize();
00595 }
00596 
00597 void StackedSVDNet::greedyStep( const Mat& inputs, const Mat& targets, int index, Vec train_costs )
00598 {
00599     PLASSERT( index < n_layers );
00600 
00601     layers[ 0 ]->setExpectations( inputs );
00602     
00603     for( int i=0 ; i<index ; i++ )
00604     {
00605         connections[ i ]->setAsDownInputs( layers[i]->getExpectations() );
00606         layers[ i+1 ]->getAllActivations( rbm_connections[i], 0, true );
00607         layers[ i+1 ]->computeExpectations();
00608     }
00609     reconstruction_targets << layers[ index ]->getExpectations();
00610     
00611     connections[ index ]->setAsDownInputs( layers[ index ]->getExpectations() );
00612     reconstruction_layer->getAllActivations( rbm_connections[ index ], 0, true );
00613     reconstruction_layer->computeExpectations();
00614     
00615     reconstruction_layer->fpropNLL( layers[ index ]->getExpectations(), 
00616                                     reconstruction_costs);
00617     train_costs[index] = sum( reconstruction_costs )/minibatch_size;
00618 
00619     reconstruction_layer->bpropNLL( 
00620         layers[ index ]->getExpectations(), reconstruction_costs,
00621         reconstruction_activation_gradients );
00622 
00623     columnMean( reconstruction_activation_gradients, 
00624                 reconstruction_activation_gradient );
00625     reconstruction_layer->update( reconstruction_activation_gradient );
00626 
00627     connections[ index ]->bpropUpdate( 
00628         layers[ index ]->getExpectations(), 
00629         layers[ index ]->activations, 
00630         reconstruction_input_gradients, 
00631         reconstruction_activation_gradients);
00632 
00633     // Set diagonal to zero
00634     for(int i=0; i<connections[ index ]->up_size; i++)
00635         connections[ index ]->weights(i,i) = 0;
00636 }
00637 
00638 void StackedSVDNet::fineTuningStep( const Mat& inputs, const Mat& targets,
00639                                     Vec& train_costs )
00640 {
00641     // fprop
00642     layers[ 0 ]->setExpectations( inputs );
00643     
00644     for( int i=0 ; i<n_layers-1 ; i++ )
00645     {
00646         connections[ i ]->setAsDownInputs( layers[i]->getExpectations() );
00647         layers[ i+1 ]->getAllActivations( rbm_connections[i], 0, true );
00648         layers[ i+1 ]->computeExpectations();
00649     }
00650 
00651     if( global_output_layer )
00652     {
00653         int offset = 0;
00654         for(int i=0; i<layers.length(); i++)
00655         {
00656             global_output_layer_inputs.subMat(0, offset, 
00657                                               minibatch_size, layers[i]->size)
00658                 << layers[i]->getExpectations();
00659             offset += layers[i]->size;
00660         }
00661         final_module->fprop( global_output_layer_inputs, final_cost_inputs );
00662     }
00663     else
00664     {
00665         final_module->fprop( layers[ n_layers-1 ]->getExpectations(),
00666                              final_cost_inputs );
00667     }
00668     final_cost->fprop( final_cost_inputs, targets, final_cost_values );
00669 
00670     columnMean( final_cost_values, 
00671                 final_cost_value );
00672     train_costs.subVec(train_costs.length()-final_cost_value.length(),
00673                        final_cost_value.length()) << final_cost_value;
00674 
00675     final_cost->bpropUpdate( final_cost_inputs, targets,
00676                              final_cost_value,
00677                              final_cost_gradients );
00678     
00679     if( global_output_layer )
00680     {
00681         final_module->bpropUpdate( global_output_layer_inputs,
00682                                    final_cost_inputs,
00683                                    global_output_layer_input_gradients,
00684                                    final_cost_gradients );     
00685     }
00686     else
00687     {
00688         final_module->bpropUpdate( layers[ n_layers-1 ]->getExpectations(),
00689                                    final_cost_inputs,
00690                                    expectation_gradients[ n_layers-1 ],
00691                                    final_cost_gradients );
00692     }
00693 
00694     int sum = final_module->input_size - layers[ n_layers-1 ]->size;
00695     for( int i=n_layers-1 ; i>0 ; i-- )
00696     {
00697         if( global_output_layer && i != n_layers-1 )
00698         {
00699             expectation_gradients[ i ] +=  
00700                 global_output_layer_input_gradients.subMat(
00701                     0, sum - layers[i]->size,
00702                     minibatch_size, layers[i]->size);
00703             sum -= layers[i]->size;
00704         }
00705                 
00706 
00707         layers[ i ]->bpropUpdate( layers[ i ]->activations,
00708                                   layers[ i ]->getExpectations(),
00709                                   activation_gradients[ i ],
00710                                   expectation_gradients[ i ] );
00711 
00712         connections[ i-1 ]->bpropUpdate( layers[ i-1 ]->getExpectations(),
00713                                          layers[ i ]->activations,
00714                                          expectation_gradients[ i-1 ],
00715                                          activation_gradients[ i ] );
00716     }
00717 }
00718 
00719 void StackedSVDNet::computeOutput(const Vec& input, Vec& output) const
00720 {
00721     // fprop
00722     layers[ 0 ]->expectation <<  input ;
00723     layers[ 0 ]->expectation_is_up_to_date = true;
00724     
00725     if( stage == 0 )
00726     {
00727         output << input;
00728         return;
00729     }
00730 
00731     for( int i=0 ; i<n_layers-1 ; i++ )
00732     {
00733         connections[ i ]->setAsDownInput( layers[i]->expectation );
00734         if( stage <= cumulative_schedule[i+1] )
00735         {
00736             reconstruction_layer->getAllActivations( rbm_connections[i], 0, false );
00737             reconstruction_layer->computeExpectation();
00738             reconstruction_test_costs[i] = 
00739                 reconstruction_layer->fpropNLL( layers[i]->expectation );
00740             output << reconstruction_layer->expectation;
00741             return;
00742         }
00743         layers[ i+1 ]->getAllActivations( rbm_connections[i], 0, false );
00744         layers[ i+1 ]->computeExpectation();
00745     }
00746 
00747     if(global_output_layer)
00748     {
00749         int offset = 0;
00750         for(int i=0; i<layers.length(); i++)
00751         {
00752             global_output_layer_input.subVec(offset, layers[i]->size)
00753                 << layers[i]->expectation;
00754             offset += layers[i]->size;
00755         }
00756         final_module->fprop( global_output_layer_input, output );
00757     }
00758     else
00759     {
00760         final_module->fprop( layers[ n_layers-1 ]->expectation,
00761                              output );
00762     }
00763 }
00764 
00765 void StackedSVDNet::computeCostsFromOutputs(const Vec& input, const Vec& output,
00766                                            const Vec& target, Vec& costs) const
00767 {
00768     //Assumes that computeOutput has been called
00769 
00770     costs.resize( getTestCostNames().length() );
00771     costs.fill( MISSING_VALUE );
00772 
00773     if( stage == 0 )
00774         return;
00775 
00776     for( int i=0 ; i<n_layers-1 ; i++ )
00777     {
00778         if( stage <= cumulative_schedule[i+1] )
00779         {
00780             costs[i] = reconstruction_test_costs[i];
00781             return;
00782         }
00783     }
00784     
00785     final_cost->fprop( output, target, final_cost_value );
00786     costs.subVec(0, reconstruction_test_costs.length()) << reconstruction_test_costs;
00787     costs.subVec(costs.length()-final_cost_value.length(),
00788                  final_cost_value.length()) <<
00789         final_cost_value;
00790 }
00791 
00792 TVec<string> StackedSVDNet::getTestCostNames() const
00793 {
00794     // Return the names of the costs computed by computeCostsFromOutputs
00795     // (these may or may not be exactly the same as what's returned by
00796     // getTrainCostNames).
00797 
00798     TVec<string> cost_names(0);
00799 
00800     for( int i=0; i<layers.size()-1; i++)
00801         cost_names.push_back("layer"+tostring(i)+".reconstruction_error");
00802     
00803     cost_names.append( final_cost->name() );
00804 
00805     return cost_names;
00806 }
00807 
00808 TVec<string> StackedSVDNet::getTrainCostNames() const
00809 {
00810     return getTestCostNames() ;    
00811 }
00812 
00813 
00814 //#####  Helper functions  ##################################################
00815 
00816 void StackedSVDNet::setLearningRate( real the_learning_rate )
00817 {
00818     for( int i=0 ; i<n_layers-1 ; i++ )
00819     {
00820         layers[i]->setLearningRate( the_learning_rate );
00821         connections[i]->setLearningRate( the_learning_rate );
00822     }
00823     layers[n_layers-1]->setLearningRate( the_learning_rate );
00824 
00825     final_cost->setLearningRate( fine_tuning_learning_rate );
00826     final_module->setLearningRate( fine_tuning_learning_rate );
00827 }
00828 
00829 
00830 } // end of namespace PLearn
00831 
00832 
00833 /*
00834   Local Variables:
00835   mode:c++
00836   c-basic-offset:4
00837   c-file-style:"stroustrup"
00838   c-file-offsets:((innamespace . 0)(inline-open . 0))
00839   indent-tabs-mode:nil
00840   fill-column:79
00841   End:
00842 */
00843 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines