PLearn 0.1
DiscriminativeRBM.cc
Go to the documentation of this file.
00001 // -*- C++ -*-
00002 
00003 // DiscriminativeRBM.cc
00004 //
00005 // Copyright (C) 2008 Hugo Larochelle
00006 //
00007 // Redistribution and use in source and binary forms, with or without
00008 // modification, are permitted provided that the following conditions are met:
00009 //
00010 //  1. Redistributions of source code must retain the above copyright
00011 //     notice, this list of conditions and the following disclaimer.
00012 //
00013 //  2. Redistributions in binary form must reproduce the above copyright
00014 //     notice, this list of conditions and the following disclaimer in the
00015 //     documentation and/or other materials provided with the distribution.
00016 //
00017 //  3. The name of the authors may not be used to endorse or promote
00018 //     products derived from this software without specific prior written
00019 //     permission.
00020 //
00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00031 //
00032 // This file is part of the PLearn library. For more information on the PLearn
00033 // library, go to the PLearn Web site at www.plearn.org
00034 
00035 // Authors: Hugo Larochelle
00036 
00040 #define PL_LOG_MODULE_NAME "DiscriminativeRBM"
00041 #include "DiscriminativeRBM.h"
00042 #include <plearn/io/pl_log.h>
00043 
00044 #define minibatch_hack 0 // Do we force the minibatch setting? (debug hack)
00045 
00046 namespace PLearn {
00047 using namespace std;
00048 
00049 PLEARN_IMPLEMENT_OBJECT(
00050     DiscriminativeRBM,
00051     "Discriminative Restricted Boltzmann Machine classifier.",
00052     "This classifier supports semi-supervised learning, as well as\n"
00053     "hybrid generative/discriminative learning. It is based on a\n"
00054     "Restricted Boltzmann Machine where the visible units contain the\n"
00055     "the input and the class target.");
00056 
00058 // DiscriminativeRBM //
00060 DiscriminativeRBM::DiscriminativeRBM() :
00061     disc_learning_rate( 0. ),
00062     disc_decrease_ct( 0. ),
00063     use_exact_disc_gradient( 0. ),
00064     gen_learning_weight( 0. ),
00065     use_multi_conditional_learning( false ),
00066     semi_sup_learning_weight( 0. ),
00067     n_classes( -1 ),
00068     target_weights_L1_penalty_factor( 0. ),
00069     target_weights_L2_penalty_factor( 0. ),
00070     do_not_use_discriminative_learning( false ),
00071     unlabeled_class_index_begin( 0 ),
00072     n_classes_at_test_time( -1 ),
00073     n_mean_field_iterations( 1 ),
00074     gen_learning_every_n_samples( 1 )
00075 {
00076     random_gen = new PRandom();
00077 }
00078 
00080 // declareOptions //
00082 void DiscriminativeRBM::declareOptions(OptionList& ol)
00083 {
00084     declareOption(ol, "disc_learning_rate", &DiscriminativeRBM::disc_learning_rate,
00085                   OptionBase::buildoption,
00086                   "The learning rate used for discriminative learning.\n");
00087 
00088     declareOption(ol, "disc_decrease_ct", &DiscriminativeRBM::disc_decrease_ct,
00089                   OptionBase::buildoption,
00090                   "The decrease constant of the discriminative learning rate.\n");
00091 
00092     declareOption(ol, "use_exact_disc_gradient", 
00093                   &DiscriminativeRBM::use_exact_disc_gradient,
00094                   OptionBase::buildoption,
00095                   "Indication that the exact gradient should be used for\n"
00096                   "discriminative learning (instead of the CD gradient).\n");
00097 
00098     declareOption(ol, "gen_learning_weight", &DiscriminativeRBM::gen_learning_weight,
00099                   OptionBase::buildoption,
00100                   "The weight of the generative learning term, for\n"
00101                   "hybrid discriminative/generative learning.\n");
00102 
00103     declareOption(ol, "use_multi_conditional_learning", 
00104                   &DiscriminativeRBM::use_multi_conditional_learning,
00105                   OptionBase::buildoption,
00106                   "Indication that multi-conditional learning should\n"
00107                   "be used instead of generative learning.\n");
00108 
00109     declareOption(ol, "semi_sup_learning_weight", 
00110                   &DiscriminativeRBM::semi_sup_learning_weight,
00111                   OptionBase::buildoption,
00112                   "The weight of the semi-supervised learning term, for\n"
00113                   "unsupervised learning on unlabeled data.\n");
00114 
00115     declareOption(ol, "n_classes", &DiscriminativeRBM::n_classes,
00116                   OptionBase::buildoption,
00117                   "Number of classes in the training set.\n"
00118                   );
00119 
00120     declareOption(ol, "input_layer", &DiscriminativeRBM::input_layer,
00121                   OptionBase::buildoption,
00122                   "The input layer of the RBM.\n");
00123 
00124     declareOption(ol, "hidden_layer", &DiscriminativeRBM::hidden_layer,
00125                   OptionBase::buildoption,
00126                   "The hidden layer of the RBM.\n");
00127 
00128     declareOption(ol, "connection", &DiscriminativeRBM::connection,
00129                   OptionBase::buildoption,
00130                   "The connection weights between the input and hidden layer.\n");
00131 
00132     declareOption(ol, "target_weights_L1_penalty_factor", 
00133                   &DiscriminativeRBM::target_weights_L1_penalty_factor,
00134                   OptionBase::buildoption,
00135                   "Target weights' L1_penalty_factor.\n");
00136 
00137     declareOption(ol, "target_weights_L2_penalty_factor", 
00138                   &DiscriminativeRBM::target_weights_L2_penalty_factor,
00139                   OptionBase::buildoption,
00140                   "Target weights' L2_penalty_factor.\n");
00141 
00142     declareOption(ol, "do_not_use_discriminative_learning", 
00143                   &DiscriminativeRBM::do_not_use_discriminative_learning,
00144                   OptionBase::buildoption,
00145                   "Indication that discriminative learning should not be used.\n");
00146 
00147     declareOption(ol, "unlabeled_class_index_begin", 
00148                   &DiscriminativeRBM::unlabeled_class_index_begin,
00149                   OptionBase::buildoption,
00150                   "The smallest index for the classes of the unlabeled data.\n");
00151 
00152     declareOption(ol, "n_classes_at_test_time", 
00153                   &DiscriminativeRBM::n_classes_at_test_time,
00154                   OptionBase::buildoption,
00155                   "The number of classes to discriminate from during test.\n"
00156                   "The classes that will be discriminated are indexed\n"
00157                   "from 0 to n_classes_at_test_time.\n");
00158 
00159     declareOption(ol, "n_mean_field_iterations", 
00160                   &DiscriminativeRBM::n_mean_field_iterations,
00161                   OptionBase::buildoption,
00162                   "Number of mean field iterations for the approximate computation of p(y|x)\n"
00163                   "for multitask learning.\n");
00164 
00165     declareOption(ol, "gen_learning_every_n_samples", 
00166                   &DiscriminativeRBM::gen_learning_every_n_samples,
00167                   OptionBase::buildoption,
00168                   "Determines the frequency of a generative learning update.\n"
00169                   "For example, set this option to 100 in order to do an\n"
00170                   "update every 100 samples. The gen_learning_weight will\n"
00171                   "then be multiplied by 100.");
00172 
00173     declareOption(ol, "classification_module",
00174                   &DiscriminativeRBM::classification_module,
00175                   OptionBase::learntoption,
00176                   "The module computing the class probabilities.\n"
00177                   );
00178 
00179     declareOption(ol, "multitask_classification_module",
00180                   &DiscriminativeRBM::multitask_classification_module,
00181                   OptionBase::learntoption,
00182                   "The module approximating the multitask class probabilities.\n"
00183                   );
00184 
00185     declareOption(ol, "classification_cost",
00186                   &DiscriminativeRBM::classification_cost,
00187                   OptionBase::nosave,
00188                   "The module computing the classification cost function (NLL)"
00189                   " on top\n"
00190                   "of classification_module.\n"
00191                   );
00192 
00193     declareOption(ol, "joint_layer", &DiscriminativeRBM::joint_layer,
00194                   OptionBase::nosave,
00195                   "Concatenation of input_layer and the target layer\n"
00196                   "(that is inside classification_module).\n"
00197                  );
00198 
00199     // Now call the parent class' declareOptions
00200     inherited::declareOptions(ol);
00201 }
00202 
00204 // build_ //
00206 void DiscriminativeRBM::build_()
00207 {
00208     MODULE_LOG << "build_() called" << endl;
00209 
00210     if( inputsize_ > 0 && targetsize_ > 0)
00211     {
00212         PLASSERT( n_classes >= 2 );
00213         PLASSERT( gen_learning_weight >= 0 );
00214         PLASSERT( semi_sup_learning_weight >= 0 );
00215         
00216         build_layers_and_connections();
00217         build_costs();
00218     }
00219 }
00220 
00222 // build_costs //
00224 void DiscriminativeRBM::build_costs()
00225 {
00226     cost_names.resize(0);
00227     
00228     // build the classification module, its cost and the joint layer
00229     build_classification_cost();
00230 
00231     int current_index = 0;
00232     cost_names.append("NLL");
00233     nll_cost_index = current_index;
00234     current_index++;
00235     
00236     cost_names.append("class_error");
00237     class_cost_index = current_index;
00238     current_index++;
00239 
00240     if( targetsize() > 1 )
00241     {
00242         cost_names.append("hamming_loss");
00243         hamming_loss_index = current_index;
00244         current_index++;
00245     }
00246     
00247     for( int i=0; i<targetsize(); i++ )
00248     {
00249         cost_names.append("class_error_" + tostring(i));
00250         current_index++;
00251     }
00252 
00253     PLASSERT( current_index == cost_names.length() );
00254 }
00255 
00257 // build_layers_and_connections //
00259 void DiscriminativeRBM::build_layers_and_connections()
00260 {
00261     MODULE_LOG << "build_layers_and_connections() called" << endl;
00262 
00263     if( !input_layer )
00264         PLERROR("In DiscriminativeRBM::build_layers_and_connections(): "
00265                 "input_layer must be provided");
00266     if( !hidden_layer )
00267         PLERROR("In DiscriminativeRBM::build_layers_and_connections(): "
00268                 "hidden_layer must be provided");
00269 
00270     if( !connection )
00271         PLERROR("DiscriminativeRBM::build_layers_and_connections(): \n"
00272                 "connection must be provided");
00273 
00274     if( connection->up_size != hidden_layer->size ||
00275         connection->down_size != input_layer->size )
00276         PLERROR("DiscriminativeRBM::build_layers_and_connections(): \n"
00277                 "connection's size (%d x %d) should be %d x %d",
00278                 connection->up_size, connection->down_size,
00279                 hidden_layer->size, input_layer->size);
00280 
00281     if( inputsize_ >= 0 )
00282         PLASSERT( input_layer->size == inputsize() );
00283 
00284     input_gradient.resize( inputsize() );
00285     class_output.resize( n_classes );
00286     before_class_output.resize( n_classes );
00287     class_gradient.resize( n_classes );
00288 
00289     target_one_hot.resize( n_classes );
00290 
00291     disc_pos_down_val.resize( inputsize() + n_classes );
00292     disc_pos_up_val.resize( hidden_layer->size );
00293     disc_neg_down_val.resize( inputsize() + n_classes );
00294     disc_neg_up_val.resize( hidden_layer->size );
00295   
00296     gen_pos_down_val.resize( inputsize() + n_classes );
00297     gen_pos_up_val.resize( hidden_layer->size );
00298     gen_neg_down_val.resize( inputsize() + n_classes );
00299     gen_neg_up_val.resize( hidden_layer->size );
00300 
00301     semi_sup_pos_down_val.resize( inputsize() + n_classes );
00302     semi_sup_pos_up_val.resize( hidden_layer->size );
00303     semi_sup_neg_down_val.resize( inputsize() + n_classes );
00304     semi_sup_neg_up_val.resize( hidden_layer->size );
00305 
00306 
00307 
00308     if( !input_layer->random_gen )
00309     {
00310         input_layer->random_gen = random_gen;
00311         input_layer->forget();
00312     }
00313 
00314     if( !hidden_layer->random_gen )
00315     {
00316         hidden_layer->random_gen = random_gen;
00317         hidden_layer->forget();
00318     }
00319 
00320     if( !connection->random_gen )
00321     {
00322         connection->random_gen = random_gen;
00323         connection->forget();
00324     }
00325 }
00326 
00328 // build_classification_cost //
00330 void DiscriminativeRBM::build_classification_cost()
00331 {
00332     MODULE_LOG << "build_classification_cost() called" << endl;
00333 
00334     if( targetsize() == 1 )
00335     {
00336         if (!classification_module ||
00337             classification_module->target_layer->size != n_classes ||
00338             classification_module->last_layer != hidden_layer || 
00339             classification_module->previous_to_last != connection )
00340         {
00341             // We need to (re-)create 'last_to_target', and thus the classification
00342             // module too.
00343             // This is not systematically done so that the learner can be
00344             // saved and loaded without losing learned parameters.
00345             last_to_target = new RBMMatrixConnection();
00346             last_to_target->up_size = hidden_layer->size;
00347             last_to_target->down_size = n_classes;
00348             last_to_target->L1_penalty_factor = target_weights_L1_penalty_factor;
00349             last_to_target->L2_penalty_factor = target_weights_L2_penalty_factor;
00350             last_to_target->random_gen = random_gen;
00351             last_to_target->build();
00352             
00353             target_layer = new RBMMultinomialLayer();
00354             target_layer->size = n_classes;
00355             target_layer->random_gen = random_gen;
00356             target_layer->build();
00357             
00358             classification_module = new RBMClassificationModule();
00359             classification_module->previous_to_last = connection;
00360             classification_module->last_layer = hidden_layer;
00361             classification_module->last_to_target = last_to_target;
00362             classification_module->target_layer = 
00363                 dynamic_cast<RBMMultinomialLayer*>((RBMLayer*) target_layer);
00364             classification_module->random_gen = random_gen;
00365             classification_module->build();
00366         }
00367 
00368         classification_cost = new NLLCostModule();
00369         classification_cost->input_size = n_classes;
00370         classification_cost->target_size = 1;
00371         classification_cost->build();
00372         
00373         last_to_target = classification_module->last_to_target;
00374         last_to_target_connection = 
00375             (RBMMatrixConnection*) classification_module->last_to_target;
00376         target_layer = classification_module->target_layer;
00377         joint_connection = classification_module->joint_connection;
00378         
00379         joint_layer = new RBMMixedLayer();
00380         joint_layer->sub_layers.resize( 2 );
00381         joint_layer->sub_layers[0] = input_layer;
00382         joint_layer->sub_layers[1] = target_layer;
00383         joint_layer->random_gen = random_gen;
00384         joint_layer->build();
00385         
00386         if( unlabeled_class_index_begin != 0 )
00387         {
00388             unlabeled_class_output.resize( n_classes - unlabeled_class_index_begin );
00389             PP<RBMMultinomialLayer> sub_layer = new RBMMultinomialLayer();
00390             sub_layer->bias = target_layer->bias.subVec(
00391                 unlabeled_class_index_begin,
00392                 n_classes - unlabeled_class_index_begin);
00393             sub_layer->size = n_classes - unlabeled_class_index_begin;
00394             sub_layer->random_gen = random_gen;
00395             sub_layer->build();
00396             
00397             PP<RBMMatrixConnection> sub_connection = new RBMMatrixConnection();
00398             sub_connection->weights = last_to_target->weights.subMatColumns(
00399                 unlabeled_class_index_begin,
00400                 n_classes - unlabeled_class_index_begin);
00401             sub_connection->up_size = hidden_layer->size;
00402             sub_connection->down_size = n_classes - unlabeled_class_index_begin;
00403             sub_connection->random_gen = random_gen;
00404             sub_connection->build();
00405             
00406             unlabeled_classification_module = new RBMClassificationModule();
00407             unlabeled_classification_module->previous_to_last = connection;
00408             unlabeled_classification_module->last_layer = hidden_layer;
00409             unlabeled_classification_module->last_to_target = sub_connection;
00410             unlabeled_classification_module->target_layer = sub_layer;
00411             unlabeled_classification_module->random_gen = random_gen;
00412             unlabeled_classification_module->build();
00413         }
00414         
00415         if( n_classes_at_test_time > 0 && n_classes_at_test_time != n_classes )
00416         {
00417             test_time_class_output.resize( n_classes_at_test_time ); 
00418             PP<RBMMultinomialLayer> sub_layer = new RBMMultinomialLayer();
00419             sub_layer->bias = target_layer->bias.subVec(
00420                 0, n_classes_at_test_time );
00421             sub_layer->size = n_classes_at_test_time;
00422             sub_layer->random_gen = random_gen;
00423             sub_layer->build();
00424             
00425             PP<RBMMatrixConnection> sub_connection = new RBMMatrixConnection();
00426             sub_connection->weights = last_to_target->weights.subMatColumns(
00427                 0, n_classes_at_test_time );
00428             sub_connection->up_size = hidden_layer->size;
00429             sub_connection->down_size = n_classes_at_test_time;
00430             sub_connection->random_gen = random_gen;
00431             sub_connection->build();
00432             test_time_classification_module = new RBMClassificationModule();
00433             test_time_classification_module->previous_to_last = connection;
00434             test_time_classification_module->last_layer = hidden_layer;
00435             test_time_classification_module->last_to_target = sub_connection;
00436             test_time_classification_module->target_layer = sub_layer;
00437             test_time_classification_module->random_gen = random_gen;
00438             test_time_classification_module->build();
00439         }
00440         else
00441         {
00442             test_time_classification_module = 0;
00443         }
00444     }
00445     else
00446     {
00447         if( n_classes != targetsize() )
00448             PLERROR("In DiscriminativeRBM::build_classification_cost(): "
00449                     "n_classes should be equal to targetsize()");
00450         
00451         // Multitask setting
00452         if (!multitask_classification_module ||
00453             multitask_classification_module->target_layer->size != n_classes ||
00454             multitask_classification_module->last_layer != hidden_layer || 
00455             multitask_classification_module->previous_to_last != connection )
00456         {
00457             // We need to (re-)create 'last_to_target', and thus the 
00458             // multitask_classification module too.
00459             // This is not systematically done so that the learner can be
00460             // saved and loaded without losing learned parameters.
00461             last_to_target = new RBMMatrixConnection();
00462             last_to_target->up_size = hidden_layer->size;
00463             last_to_target->down_size = n_classes;
00464             last_to_target->L1_penalty_factor = target_weights_L1_penalty_factor;
00465             last_to_target->L2_penalty_factor = target_weights_L2_penalty_factor;
00466             last_to_target->random_gen = random_gen;
00467             last_to_target->build();
00468             
00469             target_layer = new RBMBinomialLayer();
00470             target_layer->size = n_classes;
00471             target_layer->random_gen = random_gen;
00472             target_layer->build();
00473             
00474             multitask_classification_module = 
00475                 new RBMMultitaskClassificationModule();
00476             multitask_classification_module->previous_to_last = connection;
00477             multitask_classification_module->last_layer = hidden_layer;
00478             multitask_classification_module->last_to_target = last_to_target;
00479             multitask_classification_module->target_layer = 
00480                 dynamic_cast<RBMBinomialLayer*>((RBMLayer*) target_layer);
00481             multitask_classification_module->fprop_outputs_activation = true;
00482             multitask_classification_module->n_mean_field_iterations = n_mean_field_iterations;
00483             multitask_classification_module->random_gen = random_gen;
00484             multitask_classification_module->build();
00485         }
00486 
00487         last_to_target = multitask_classification_module->last_to_target;
00488         last_to_target_connection = 
00489             (RBMMatrixConnection*) multitask_classification_module->last_to_target;
00490         target_layer = multitask_classification_module->target_layer;
00491         joint_connection = multitask_classification_module->joint_connection;
00492         
00493         joint_layer = new RBMMixedLayer();
00494         joint_layer->sub_layers.resize( 2 );
00495         joint_layer->sub_layers[0] = input_layer;
00496         joint_layer->sub_layers[1] = target_layer;
00497         joint_layer->random_gen = random_gen;
00498         joint_layer->build();
00499         
00500         if( unlabeled_class_index_begin != 0 )
00501             PLERROR("In DiscriminativeRBM::build_classification_cost(): "
00502                 "can't use unlabeled_class_index_begin != 0 in multitask setting");
00503         
00504         if( n_classes_at_test_time > 0 && n_classes_at_test_time != n_classes )
00505             PLERROR("In DiscriminativeRBM::build_classification_cost(): "
00506                 "can't use n_classes_at_test_time in multitask setting");
00507     }
00508 }
00509 
00511 // build //
00513 void DiscriminativeRBM::build()
00514 {
00515     inherited::build();
00516     build_();
00517 }
00518 
00520 // makeDeepCopyFromShallowCopy //
00522 void DiscriminativeRBM::makeDeepCopyFromShallowCopy(CopiesMap& copies)
00523 {
00524     inherited::makeDeepCopyFromShallowCopy(copies);
00525 
00526     deepCopyField(input_layer, copies);
00527     deepCopyField(hidden_layer, copies);
00528     deepCopyField(connection, copies);
00529     deepCopyField(classification_module, copies);
00530     deepCopyField(multitask_classification_module, copies);
00531     deepCopyField(cost_names, copies);
00532     deepCopyField(classification_cost, copies);
00533     deepCopyField(joint_layer, copies);
00534     deepCopyField(last_to_target, copies);
00535     deepCopyField(last_to_target_connection, copies);
00536     deepCopyField(joint_connection, copies);
00537     deepCopyField(target_layer, copies);
00538     deepCopyField(unlabeled_classification_module, copies);
00539     deepCopyField(test_time_classification_module, copies);
00540     deepCopyField(target_one_hot, copies);
00541     deepCopyField(disc_pos_down_val, copies);
00542     deepCopyField(disc_pos_up_val, copies);
00543     deepCopyField(disc_neg_down_val, copies);
00544     deepCopyField(disc_neg_up_val, copies);
00545     deepCopyField(gen_pos_down_val, copies);
00546     deepCopyField(gen_pos_up_val, copies);
00547     deepCopyField(gen_neg_down_val, copies);
00548     deepCopyField(gen_neg_up_val, copies);
00549     deepCopyField(semi_sup_pos_down_val, copies);
00550     deepCopyField(semi_sup_pos_up_val, copies);
00551     deepCopyField(semi_sup_neg_down_val, copies);
00552     deepCopyField(semi_sup_neg_up_val, copies);
00553     deepCopyField(input_gradient, copies);
00554     deepCopyField(class_output, copies);
00555     deepCopyField(before_class_output, copies);
00556     deepCopyField(unlabeled_class_output, copies);
00557     deepCopyField(test_time_class_output, copies);
00558     deepCopyField(class_gradient, copies);
00559 }
00560 
00561 
00563 // outputsize //
00565 int DiscriminativeRBM::outputsize() const
00566 {
00567     return n_classes_at_test_time > 0 ? n_classes_at_test_time : n_classes;
00568 }
00569 
00571 // forget //
00573 void DiscriminativeRBM::forget()
00574 {
00575     inherited::forget();
00576 
00577     input_layer->forget();
00578     hidden_layer->forget();
00579     connection->forget();
00580     if( targetsize() > 1 )
00581     {
00582         multitask_classification_module->forget();
00583     }
00584     else
00585     {
00586         classification_cost->forget();
00587         classification_module->forget();
00588     }
00589 }
00590 
00592 // train //
00594 void DiscriminativeRBM::train()
00595 {
00596     MODULE_LOG << "train() called " << endl;
00597 
00598     MODULE_LOG << "stage = " << stage
00599         << ", target nstages = " << nstages << endl;
00600 
00601     PLASSERT( train_set );
00602 
00603     Vec input( inputsize() );
00604     Vec target( targetsize() );
00605     int target_index = -1;
00606     real weight; 
00607 
00608     real nll_cost; 
00609     real class_error;
00610     TVec<string> train_cost_names = getTrainCostNames() ;
00611     Vec train_costs( train_cost_names.length() );
00612     train_costs.fill(MISSING_VALUE) ;
00613 
00614     int nsamples = train_set->length();
00615     int init_stage = stage;
00616     if( !initTrain() )
00617     {
00618         MODULE_LOG << "train() aborted" << endl;
00619         return;
00620     }
00621 
00622     PP<ProgressBar> pb;
00623 
00624     // clear stats of previous epoch
00625     train_stats->forget();
00626 
00627     if( report_progress )
00628         pb = new ProgressBar( "Training "
00629                               + classname(),
00630                               nstages - stage );
00631 
00634     int offset = (int)round(stage/nstages) % gen_learning_every_n_samples;
00635 
00636     for( ; stage<nstages ; stage++ )
00637     {
00638         train_set->getExample(stage%nsamples, input, target, weight);
00639         if( pb )
00640             pb->update( stage - init_stage + 1 );
00641 
00642         if( targetsize() == 1 )
00643         {
00644             target_one_hot.clear();
00645             if( !is_missing(target[0]) && (target[0] >= 0) )
00646             {
00647                 target_index = (int)round( target[0] );
00648                 target_one_hot[ target_index ] = 1;
00649             }
00650         }
00651         else
00652         {
00653             target_one_hot << target;
00654         }
00655 
00656         // Get CD stats...
00657 
00658         // ... for discriminative learning
00659         if( !do_not_use_discriminative_learning && 
00660             !use_exact_disc_gradient && 
00661             ( ( !is_missing(target[0]) && (target[0] >= 0) ) || targetsize() > 1 ) )
00662         {
00663             // Positive phase
00664 
00665             // Clamp visible units
00666             target_layer->sample << target_one_hot;
00667             input_layer->sample << input ;
00668 
00669             // Up pass
00670             joint_connection->setAsDownInput( joint_layer->sample );
00671             hidden_layer->getAllActivations( joint_connection );
00672             hidden_layer->computeExpectation();
00673             hidden_layer->generateSample();
00674 
00675             disc_pos_down_val << joint_layer->sample;
00676             disc_pos_up_val << hidden_layer->expectation;
00677 
00678             // Negative phase
00679 
00680             // Down pass
00681             last_to_target_connection->setAsUpInput( hidden_layer->sample );
00682             target_layer->getAllActivations( last_to_target_connection );
00683             target_layer->computeExpectation();
00684             target_layer->generateSample();
00685 
00686             // Up pass
00687             joint_connection->setAsDownInput( joint_layer->sample );
00688             hidden_layer->getAllActivations( joint_connection );
00689             hidden_layer->computeExpectation();
00690 
00691             disc_neg_down_val << joint_layer->sample;
00692             disc_neg_up_val << hidden_layer->expectation;
00693         }
00694 
00695         // ... for generative learning
00696         if( (stage + offset) % gen_learning_every_n_samples == 0 )
00697         {            
00698             if( ( ( !is_missing(target[0]) && (target[0] >= 0) ) || targetsize() > 1 ) && 
00699                 gen_learning_weight > 0 )
00700             {
00701                 // Positive phase
00702                 if( !use_exact_disc_gradient && !do_not_use_discriminative_learning )
00703                 {
00704                     // Use previous computations
00705                     gen_pos_down_val << disc_pos_down_val;
00706                     gen_pos_up_val << disc_pos_up_val;
00707 
00708                     hidden_layer->setExpectation( gen_pos_up_val );
00709                     hidden_layer->generateSample();
00710                 }
00711                 else
00712                 {
00713                     // Clamp visible units
00714                     target_layer->sample << target_one_hot;
00715                     input_layer->sample << input ;
00716                 
00717                     // Up pass
00718                     joint_connection->setAsDownInput( joint_layer->sample );
00719                     hidden_layer->getAllActivations( joint_connection );
00720                     hidden_layer->computeExpectation();
00721                     hidden_layer->generateSample();
00722                 
00723                     gen_pos_down_val << joint_layer->sample;
00724                     gen_pos_up_val << hidden_layer->expectation;
00725                 }
00726 
00727                 // Negative phase
00728 
00729                 if( !use_multi_conditional_learning )
00730                 {
00731                     // Down pass
00732                     joint_connection->setAsUpInput( hidden_layer->sample );
00733                     joint_layer->getAllActivations( joint_connection );
00734                     joint_layer->computeExpectation();
00735                     joint_layer->generateSample();
00736                 
00737                     // Up pass
00738                     joint_connection->setAsDownInput( joint_layer->sample );
00739                     hidden_layer->getAllActivations( joint_connection );
00740                     hidden_layer->computeExpectation();
00741                 }
00742                 else
00743                 {
00744                     target_layer->sample << target_one_hot;
00745 
00746                     // Down pass
00747                     connection->setAsUpInput( hidden_layer->sample );
00748                     input_layer->getAllActivations( connection );
00749                     input_layer->computeExpectation();
00750                     input_layer->generateSample();
00751                 
00752                     // Up pass
00753                     joint_connection->setAsDownInput( joint_layer->sample );
00754                     hidden_layer->getAllActivations( joint_connection );
00755                     hidden_layer->computeExpectation(); 
00756                 }
00757 
00758                 gen_neg_down_val << joint_layer->sample;
00759                 gen_neg_up_val << hidden_layer->expectation;
00760 
00761             }
00762         }
00763 
00764         // ... and for semi-supervised learning
00765         if( targetsize() > 1 && semi_sup_learning_weight > 0 )
00766             PLERROR("DiscriminativeRBM::train(): semi-supervised learning "
00767                 "is not implemented yet for multi-task learning.");
00768 
00769         if( ( is_missing(target[0]) || target[0] < 0 ) && semi_sup_learning_weight > 0 )
00770         {
00771             // Positive phase
00772 
00773             // Clamp visible units and sample from p(y|x)
00774             if( unlabeled_classification_module )
00775             {
00776                 unlabeled_classification_module->fprop( input,
00777                                                         unlabeled_class_output );
00778                 class_output.clear();
00779                 class_output.subVec( unlabeled_class_index_begin,
00780                                      n_classes - unlabeled_class_index_begin )
00781                     << unlabeled_class_output;
00782             }
00783             else
00784             {
00785                 classification_module->fprop( input,
00786                                               class_output );
00787             }
00788             target_layer->setExpectation( class_output );
00789             target_layer->generateSample();            
00790             input_layer->sample << input ;
00791             
00792              // Up pass
00793             joint_connection->setAsDownInput( joint_layer->sample );
00794             hidden_layer->getAllActivations( joint_connection );
00795             hidden_layer->computeExpectation();
00796             hidden_layer->generateSample();
00797             
00798             semi_sup_pos_down_val << joint_layer->sample;
00799             semi_sup_pos_up_val << hidden_layer->expectation;
00800             
00801             // Negative phase
00802 
00803             // Down pass
00804             joint_connection->setAsUpInput( hidden_layer->sample );
00805             joint_layer->getAllActivations( joint_connection );
00806             joint_layer->computeExpectation();
00807             joint_layer->generateSample();
00808             
00809             // Up pass
00810             joint_connection->setAsDownInput( joint_layer->sample );
00811             hidden_layer->getAllActivations( joint_connection );
00812             hidden_layer->computeExpectation();
00813 
00814             semi_sup_neg_down_val << joint_layer->sample;
00815             semi_sup_neg_up_val << hidden_layer->expectation;
00816         }
00817 
00818         if( train_set->weightsize() == 0 )
00819             setLearningRate( disc_learning_rate / (1. + disc_decrease_ct * stage ));
00820         else
00821             setLearningRate( weight * disc_learning_rate / (1. + disc_decrease_ct * stage ));
00822         // Get gradient and update
00823 
00824         if( !do_not_use_discriminative_learning && 
00825             use_exact_disc_gradient && 
00826             ( ( !is_missing(target[0]) && (target[0] >= 0)  ) || targetsize() > 1 ) )
00827         {
00828             if( targetsize() == 1)
00829             {
00830                 PLASSERT( target_index >= 0 );
00831                 classification_module->fprop( input, class_output );
00832                 // This doesn't work. gcc bug?
00833                 //classification_cost->fprop( class_output, target, nll_cost );
00834                 classification_cost->CostModule::fprop( class_output, target,
00835                                                         nll_cost );
00836                 
00837                 class_error =  ( argmax(class_output) == target_index ) ? 0: 1;  
00838                 train_costs[nll_cost_index] = nll_cost;
00839                 train_costs[class_cost_index] = class_error;
00840                 
00841                 classification_cost->bpropUpdate( class_output, target, nll_cost,
00842                                                   class_gradient );
00843                 
00844                 classification_module->bpropUpdate( input,  class_output,
00845                                                     input_gradient, class_gradient );
00846                 
00847                 train_stats->update( train_costs );
00848             }
00849             else
00850             {
00851                 multitask_classification_module->fprop( input, before_class_output );
00852                 // This doesn't work. gcc bug?
00853                 //multitask_classification_cost->fprop( class_output, target, 
00854                 //                                      nll_cost );
00855                 //multitask_classification_cost->CostModule::fprop( class_output, 
00856                 //                                                  target,
00857                 //                                                  nll_cost );
00858                 
00859                 target_layer->fprop( before_class_output, class_output );
00860                 target_layer->activation << before_class_output;
00861                 target_layer->activation += target_layer->bias;
00862                 target_layer->setExpectation( class_output );
00863                 nll_cost = target_layer->fpropNLL( target );
00864                 
00865                 train_costs.clear();
00866                 train_costs[nll_cost_index] = nll_cost;
00867 
00868                 for( int task=0; task<targetsize(); task++)
00869                 {
00870                     if( class_output[task] > 0.5 && target[task] != 1)
00871                     {
00872                         train_costs[ hamming_loss_index ]++;
00873                         train_costs[ hamming_loss_index + task + 1 ] = 1;
00874                     }
00875                     
00876                     if( class_output[task] <= 0.5 && target[task] != 0)
00877                     {
00878                         train_costs[ hamming_loss_index ]++;
00879                         train_costs[ hamming_loss_index + task + 1 ] = 1;
00880                     }
00881                 }
00882 
00883                 if( train_costs[ hamming_loss_index ] > 0 )
00884                     train_costs[ class_cost_index ] = 1;
00885 
00886                 train_costs[ hamming_loss_index ] /= targetsize();
00887                 
00888                 //multitask_classification_cost->bpropUpdate( 
00889                 //    class_output, target, nll_cost,
00890                 //    class_gradient );
00891                 
00892                 class_gradient.clear();
00893                 target_layer->bpropNLL( target, nll_cost, class_gradient );
00894                 target_layer->update( class_gradient );
00895 
00896                 multitask_classification_module->bpropUpdate( 
00897                     input,  before_class_output,
00898                     input_gradient, class_gradient );
00899                 
00900                 train_stats->update( train_costs );
00901             }
00902         }
00903 
00904         // CD Updates
00905         if( !do_not_use_discriminative_learning && 
00906             !use_exact_disc_gradient && ( !is_missing(target[0]) && (target[0] >= 0) ) )
00907         {
00908             joint_layer->update( disc_pos_down_val, disc_neg_down_val );
00909             hidden_layer->update( disc_pos_up_val, disc_neg_up_val );
00910             joint_connection->update( disc_pos_down_val, disc_pos_up_val,
00911                                 disc_neg_down_val, disc_neg_up_val);
00912         }
00913 
00914         if( (stage + offset) % gen_learning_every_n_samples == 0 )
00915         { 
00916             if( ( !is_missing(target[0]) && (target[0] >= 0) ) && gen_learning_weight > 0 )
00917             {
00918                 if( train_set->weightsize() == 0 )
00919                     setLearningRate( gen_learning_every_n_samples * gen_learning_weight * disc_learning_rate / 
00920                                      (1. + disc_decrease_ct * stage ));
00921                 else
00922                     setLearningRate( weight * gen_learning_every_n_samples * gen_learning_weight * disc_learning_rate / 
00923                                      (1. + disc_decrease_ct * stage ));
00924                     
00925                 joint_layer->update( gen_pos_down_val, gen_neg_down_val );
00926                 hidden_layer->update( gen_pos_up_val, gen_neg_up_val );
00927                 joint_connection->update( gen_pos_down_val, gen_pos_up_val,
00928                                           gen_neg_down_val, gen_neg_up_val);
00929             }
00930         }            
00931 
00932         if( ( is_missing(target[0]) || (target[0] < 0)  ) && semi_sup_learning_weight > 0 )
00933         {
00934             if( train_set->weightsize() == 0 )
00935                 setLearningRate( semi_sup_learning_weight * disc_learning_rate / 
00936                                  (1. + disc_decrease_ct * stage ));
00937             else
00938                 setLearningRate( weight * semi_sup_learning_weight * disc_learning_rate / 
00939                                  (1. + disc_decrease_ct * stage ));
00940                 
00941             joint_layer->update( semi_sup_pos_down_val, semi_sup_neg_down_val );
00942             hidden_layer->update( semi_sup_pos_up_val, semi_sup_neg_up_val );
00943             joint_connection->update( semi_sup_pos_down_val, semi_sup_pos_up_val,
00944                                 semi_sup_neg_down_val, semi_sup_neg_up_val);
00945         }
00946 
00947     }
00948     
00949     train_stats->finalize();
00950 }
00951 
00952 
00954 // computeOutput //
00956 void DiscriminativeRBM::computeOutput(const Vec& input, Vec& output) const
00957 {
00958     // Compute the output from the input.
00959     output.resize(0);
00960     if( targetsize() == 1 )
00961     {
00962         if( test_time_classification_module )
00963         {
00964             test_time_classification_module->fprop( input,
00965                                                     output );
00966         }
00967         else
00968         {
00969             classification_module->fprop( input,
00970                                           output );
00971         }
00972     }
00973     else
00974     {
00975         multitask_classification_module->fprop( input,
00976                                                 output );
00977     }
00978 }
00979 
00980 
00981 void DiscriminativeRBM::computeCostsFromOutputs(const Vec& input, const Vec& output,
00982                                            const Vec& target, Vec& costs) const
00983 {
00984 
00985     // Compute the costs from *already* computed output.
00986     costs.resize( cost_names.length() );
00987     costs.fill( MISSING_VALUE );
00988 
00989     if( targetsize() == 1 )
00990     {
00991         if( !is_missing(target[0]) && (target[0] >= 0) )
00992         {
00993             //classification_cost->fprop( output, target, costs[nll_cost_index] );
00994             //classification_cost->CostModule::fprop( output, target, costs[nll_cost_index] );
00995             costs[nll_cost_index] = -pl_log(output[(int) round(target[0])]);
00996             costs[class_cost_index] =
00997                 (argmax(output) == (int) round(target[0]))? 0 : 1;
00998         }
00999     }
01000     else
01001     {
01002         costs.clear();
01003 
01004         // This doesn't work. gcc bug?
01005         //multitask_classification_cost->fprop( output, target, 
01006         //                                      costs[nll_cost_index] );
01007         //multitask_classification_cost->CostModule::fprop( output, 
01008         //                                                  target,
01009         //                                                  nll_cost );
01010 
01011         target_layer->fprop( output, class_output );
01012         target_layer->activation << output;
01013         target_layer->activation += target_layer->bias;
01014         target_layer->setExpectation( class_output );
01015         costs[ nll_cost_index ] = target_layer->fpropNLL( target );
01016 
01017 
01018         for( int task=0; task<targetsize(); task++)
01019         {
01020             if( class_output[task] > 0.5 && target[task] != 1)
01021             {
01022                 costs[ hamming_loss_index ]++;
01023                 costs[ hamming_loss_index + task + 1 ] = 1;
01024             }
01025             
01026             if( class_output[task] <= 0.5 && target[task] != 0)
01027             {
01028                 costs[ hamming_loss_index ]++;
01029                 costs[ hamming_loss_index + task + 1 ] = 1;
01030             }
01031         }
01032         
01033         if( costs[ hamming_loss_index ] > 0 )
01034             costs[ class_cost_index ] = 1;
01035         
01036         costs[ hamming_loss_index ] /= targetsize();
01037     }
01038 }
01039 
01040 TVec<string> DiscriminativeRBM::getTestCostNames() const
01041 {
01042     // Return the names of the costs computed by computeCostsFromOutputs
01043     // (these may or may not be exactly the same as what's returned by
01044     // getTrainCostNames).
01045 
01046     return cost_names;
01047 }
01048 
01049 TVec<string> DiscriminativeRBM::getTrainCostNames() const
01050 {
01051     return cost_names;
01052 }
01053 
01054 
01055 //#####  Helper functions  ##################################################
01056 
01057 void DiscriminativeRBM::setLearningRate( real the_learning_rate )
01058 {
01059     input_layer->setLearningRate( the_learning_rate );
01060     hidden_layer->setLearningRate( the_learning_rate );
01061     connection->setLearningRate( the_learning_rate );
01062     target_layer->setLearningRate( the_learning_rate );
01063     last_to_target->setLearningRate( the_learning_rate );
01064     if( targetsize() == 1)
01065         classification_cost->setLearningRate( the_learning_rate );
01066     //else
01067     //    multitask_classification_cost->setLearningRate( the_learning_rate );
01068     //classification_module->setLearningRate( the_learning_rate );
01069 }
01070 
01071 } // end of namespace PLearn
01072 
01073 
01074 /*
01075   Local Variables:
01076   mode:c++
01077   c-basic-offset:4
01078   c-file-style:"stroustrup"
01079   c-file-offsets:((innamespace . 0)(inline-open . 0))
01080   indent-tabs-mode:nil
01081   fill-column:79
01082   End:
01083 */
01084 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:encoding=utf-8:textwidth=79 :
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines